Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.13/0104-4.13.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2997 - (hide annotations) (download)
Mon Oct 9 08:50:17 2017 UTC (6 years, 7 months ago) by niro
File size: 151755 byte(s)
-linux-4.13.5
1 niro 2997 diff --git a/Makefile b/Makefile
2     index 159901979dec..189f1a748e4c 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 13
8     -SUBLEVEL = 4
9     +SUBLEVEL = 5
10     EXTRAVERSION =
11     NAME = Fearless Coyote
12    
13     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
14     index 6eae342ced6b..8d9832870ff4 100644
15     --- a/arch/arm64/include/asm/pgtable.h
16     +++ b/arch/arm64/include/asm/pgtable.h
17     @@ -412,7 +412,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
18     /* Find an entry in the third-level page table. */
19     #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
20    
21     -#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
22     +#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
23     #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
24    
25     #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
26     diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
27     index adb0910b88f5..8c908829d3c4 100644
28     --- a/arch/arm64/kernel/head.S
29     +++ b/arch/arm64/kernel/head.S
30     @@ -381,6 +381,7 @@ ENTRY(kimage_vaddr)
31     * booted in EL1 or EL2 respectively.
32     */
33     ENTRY(el2_setup)
34     + msr SPsel, #1 // We want to use SP_EL{1,2}
35     mrs x0, CurrentEL
36     cmp x0, #CurrentEL_EL2
37     b.eq 1f
38     diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
39     index 1f22a41565a3..92f3bc3bc74e 100644
40     --- a/arch/arm64/mm/fault.c
41     +++ b/arch/arm64/mm/fault.c
42     @@ -614,7 +614,7 @@ static const struct fault_info fault_info[] = {
43     { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
44     { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
45     { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
46     - { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
47     + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
48     { do_bad, SIGBUS, 0, "unknown 8" },
49     { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
50     { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
51     diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
52     index 9e6c74bf66c4..6668f67a61c3 100644
53     --- a/arch/mips/kernel/perf_event_mipsxx.c
54     +++ b/arch/mips/kernel/perf_event_mipsxx.c
55     @@ -618,8 +618,7 @@ static int mipspmu_event_init(struct perf_event *event)
56     return -ENOENT;
57     }
58    
59     - if ((unsigned int)event->cpu >= nr_cpumask_bits ||
60     - (event->cpu >= 0 && !cpu_online(event->cpu)))
61     + if (event->cpu >= 0 && !cpu_online(event->cpu))
62     return -ENODEV;
63    
64     if (!atomic_inc_not_zero(&active_events)) {
65     diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
66     index 63992b2d8e15..f27eecd5ec7f 100644
67     --- a/arch/powerpc/kernel/eeh.c
68     +++ b/arch/powerpc/kernel/eeh.c
69     @@ -1018,6 +1018,10 @@ int eeh_init(void)
70     } else if ((ret = eeh_ops->init()))
71     return ret;
72    
73     + /* Initialize PHB PEs */
74     + list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
75     + eeh_dev_phb_init_dynamic(hose);
76     +
77     /* Initialize EEH event */
78     ret = eeh_event_init();
79     if (ret)
80     diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
81     index d6b2ca70d14d..0820b73288c0 100644
82     --- a/arch/powerpc/kernel/eeh_dev.c
83     +++ b/arch/powerpc/kernel/eeh_dev.c
84     @@ -83,21 +83,3 @@ void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
85     /* EEH PE for PHB */
86     eeh_phb_pe_create(phb);
87     }
88     -
89     -/**
90     - * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs
91     - *
92     - * Scan all the existing PHBs and create EEH devices for their OF
93     - * nodes and their children OF nodes
94     - */
95     -static int __init eeh_dev_phb_init(void)
96     -{
97     - struct pci_controller *phb, *tmp;
98     -
99     - list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
100     - eeh_dev_phb_init_dynamic(phb);
101     -
102     - return 0;
103     -}
104     -
105     -core_initcall(eeh_dev_phb_init);
106     diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
107     index 660ed39e9c9a..b8d4f07f332c 100644
108     --- a/arch/powerpc/kernel/ptrace.c
109     +++ b/arch/powerpc/kernel/ptrace.c
110     @@ -131,7 +131,7 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
111     * in the appropriate thread structures from live.
112     */
113    
114     - if (tsk != current)
115     + if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
116     return;
117    
118     if (MSR_TM_SUSPENDED(mfmsr())) {
119     diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
120     index 359c79cdf0cc..9ecd9aea0b54 100644
121     --- a/arch/powerpc/kvm/book3s_hv.c
122     +++ b/arch/powerpc/kvm/book3s_hv.c
123     @@ -4187,11 +4187,13 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
124     if ((cfg->process_table & PRTS_MASK) > 24)
125     return -EINVAL;
126    
127     + mutex_lock(&kvm->lock);
128     kvm->arch.process_table = cfg->process_table;
129     kvmppc_setup_partition_table(kvm);
130    
131     lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
132     kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
133     + mutex_unlock(&kvm->lock);
134    
135     return 0;
136     }
137     diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
138     index abf5f01b6eb1..5b81a807d742 100644
139     --- a/arch/powerpc/kvm/book3s_hv_rm_xive.c
140     +++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c
141     @@ -38,7 +38,6 @@ static inline void __iomem *get_tima_phys(void)
142     #define __x_tima get_tima_phys()
143     #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
144     #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
145     -#define __x_readb __raw_rm_readb
146     #define __x_writeb __raw_rm_writeb
147     #define __x_readw __raw_rm_readw
148     #define __x_readq __raw_rm_readq
149     diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
150     index 9c9c983b864f..dc58c2a560f9 100644
151     --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
152     +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
153     @@ -765,6 +765,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
154    
155     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
156     BEGIN_FTR_SECTION
157     + /*
158     + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
159     + */
160     bl kvmppc_restore_tm
161     END_FTR_SECTION_IFSET(CPU_FTR_TM)
162     #endif
163     @@ -1623,6 +1626,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
164    
165     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
166     BEGIN_FTR_SECTION
167     + /*
168     + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
169     + */
170     bl kvmppc_save_tm
171     END_FTR_SECTION_IFSET(CPU_FTR_TM)
172     #endif
173     @@ -1742,7 +1748,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
174     /*
175     * Are we running hash or radix ?
176     */
177     - beq cr2,3f
178     + ld r5, VCPU_KVM(r9)
179     + lbz r0, KVM_RADIX(r5)
180     + cmpwi cr2, r0, 0
181     + beq cr2, 3f
182    
183     /* Radix: Handle the case where the guest used an illegal PID */
184     LOAD_REG_ADDR(r4, mmu_base_pid)
185     @@ -2459,6 +2468,9 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
186    
187     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
188     BEGIN_FTR_SECTION
189     + /*
190     + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
191     + */
192     ld r9, HSTATE_KVM_VCPU(r13)
193     bl kvmppc_save_tm
194     END_FTR_SECTION_IFSET(CPU_FTR_TM)
195     @@ -2569,6 +2581,9 @@ kvm_end_cede:
196    
197     #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198     BEGIN_FTR_SECTION
199     + /*
200     + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
201     + */
202     bl kvmppc_restore_tm
203     END_FTR_SECTION_IFSET(CPU_FTR_TM)
204     #endif
205     diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
206     index 08b200a0bbce..13304622ab1c 100644
207     --- a/arch/powerpc/kvm/book3s_xive.c
208     +++ b/arch/powerpc/kvm/book3s_xive.c
209     @@ -48,7 +48,6 @@
210     #define __x_tima xive_tima
211     #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
212     #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
213     -#define __x_readb __raw_readb
214     #define __x_writeb __raw_writeb
215     #define __x_readw __raw_readw
216     #define __x_readq __raw_readq
217     diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
218     index d1ed2c41b5d2..c7a5deadd1cc 100644
219     --- a/arch/powerpc/kvm/book3s_xive_template.c
220     +++ b/arch/powerpc/kvm/book3s_xive_template.c
221     @@ -28,7 +28,8 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
222     * bit.
223     */
224     if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
225     - u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
226     + __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
227     + u8 pipr = be64_to_cpu(qw1) & 0xff;
228     if (pipr >= xc->hw_cppr)
229     return;
230     }
231     @@ -336,7 +337,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long
232     struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
233     u8 pending = xc->pending;
234     u32 hirq;
235     - u8 pipr;
236    
237     pr_devel("H_IPOLL(server=%ld)\n", server);
238    
239     @@ -353,7 +353,8 @@ X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long
240     pending = 0xff;
241     } else {
242     /* Grab pending interrupt if any */
243     - pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
244     + __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
245     + u8 pipr = be64_to_cpu(qw1) & 0xff;
246     if (pipr < 8)
247     pending |= 1 << pipr;
248     }
249     diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
250     index 2da4851eff99..37f622444a04 100644
251     --- a/arch/powerpc/platforms/pseries/mobility.c
252     +++ b/arch/powerpc/platforms/pseries/mobility.c
253     @@ -226,8 +226,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
254     return -ENOENT;
255    
256     dn = dlpar_configure_connector(drc_index, parent_dn);
257     - if (!dn)
258     + if (!dn) {
259     + of_node_put(parent_dn);
260     return -ENOENT;
261     + }
262    
263     rc = dlpar_attach_node(dn);
264     if (rc)
265     diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
266     index 65ab11d654e1..80c1583d033f 100644
267     --- a/arch/s390/include/asm/pgtable.h
268     +++ b/arch/s390/include/asm/pgtable.h
269     @@ -1462,7 +1462,9 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
270     static inline void pmdp_invalidate(struct vm_area_struct *vma,
271     unsigned long addr, pmd_t *pmdp)
272     {
273     - pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
274     + pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
275     +
276     + pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
277     }
278    
279     #define __HAVE_ARCH_PMDP_SET_WRPROTECT
280     diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
281     index c1bf75ffb875..7e1e40323b78 100644
282     --- a/arch/s390/kernel/perf_cpum_sf.c
283     +++ b/arch/s390/kernel/perf_cpum_sf.c
284     @@ -823,9 +823,12 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
285     }
286    
287     /* Check online status of the CPU to which the event is pinned */
288     - if ((unsigned int)event->cpu >= nr_cpumask_bits ||
289     - (event->cpu >= 0 && !cpu_online(event->cpu)))
290     - return -ENODEV;
291     + if (event->cpu >= 0) {
292     + if ((unsigned int)event->cpu >= nr_cpumask_bits)
293     + return -ENODEV;
294     + if (!cpu_online(event->cpu))
295     + return -ENODEV;
296     + }
297    
298     /* Force reset of idle/hv excludes regardless of what the
299     * user requested.
300     diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
301     index 8ecc25e760fa..98ffe3ee9411 100644
302     --- a/arch/s390/mm/gup.c
303     +++ b/arch/s390/mm/gup.c
304     @@ -56,13 +56,12 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
305     static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
306     unsigned long end, int write, struct page **pages, int *nr)
307     {
308     - unsigned long mask, result;
309     struct page *head, *page;
310     + unsigned long mask;
311     int refs;
312    
313     - result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
314     - mask = result | _SEGMENT_ENTRY_INVALID;
315     - if ((pmd_val(pmd) & mask) != result)
316     + mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
317     + if ((pmd_val(pmd) & mask) != 0)
318     return 0;
319     VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
320    
321     diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
322     index b188b16841e3..8ab1a1f4d1c1 100644
323     --- a/arch/x86/kernel/fpu/regset.c
324     +++ b/arch/x86/kernel/fpu/regset.c
325     @@ -131,11 +131,16 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
326    
327     fpu__activate_fpstate_write(fpu);
328    
329     - if (boot_cpu_has(X86_FEATURE_XSAVES))
330     + if (boot_cpu_has(X86_FEATURE_XSAVES)) {
331     ret = copyin_to_xsaves(kbuf, ubuf, xsave);
332     - else
333     + } else {
334     ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
335    
336     + /* xcomp_bv must be 0 when using uncompacted format */
337     + if (!ret && xsave->header.xcomp_bv)
338     + ret = -EINVAL;
339     + }
340     +
341     /*
342     * In case of failure, mark all states as init:
343     */
344     diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
345     index 83c23c230b4c..3a9318610c4d 100644
346     --- a/arch/x86/kernel/fpu/signal.c
347     +++ b/arch/x86/kernel/fpu/signal.c
348     @@ -329,6 +329,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
349     } else {
350     err = __copy_from_user(&fpu->state.xsave,
351     buf_fx, state_size);
352     +
353     + /* xcomp_bv must be 0 when using uncompacted format */
354     + if (!err && state_size > offsetof(struct xregs_state, header) && fpu->state.xsave.header.xcomp_bv)
355     + err = -EINVAL;
356     }
357    
358     if (err || __copy_from_user(&env, buf, sizeof(env))) {
359     diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
360     index d04e30e3c0ff..58590a698a1a 100644
361     --- a/arch/x86/kernel/kvm.c
362     +++ b/arch/x86/kernel/kvm.c
363     @@ -140,7 +140,8 @@ void kvm_async_pf_task_wait(u32 token)
364    
365     n.token = token;
366     n.cpu = smp_processor_id();
367     - n.halted = is_idle_task(current) || preempt_count() > 1;
368     + n.halted = is_idle_task(current) || preempt_count() > 1 ||
369     + rcu_preempt_depth();
370     init_swait_queue_head(&n.wq);
371     hlist_add_head(&n.link, &b->list);
372     raw_spin_unlock(&b->lock);
373     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
374     index c6ef2940119b..95796e2efc38 100644
375     --- a/arch/x86/kvm/vmx.c
376     +++ b/arch/x86/kvm/vmx.c
377     @@ -200,6 +200,8 @@ struct loaded_vmcs {
378     int cpu;
379     bool launched;
380     bool nmi_known_unmasked;
381     + unsigned long vmcs_host_cr3; /* May not match real cr3 */
382     + unsigned long vmcs_host_cr4; /* May not match real cr4 */
383     struct list_head loaded_vmcss_on_cpu_link;
384     };
385    
386     @@ -595,8 +597,6 @@ struct vcpu_vmx {
387     int gs_ldt_reload_needed;
388     int fs_reload_needed;
389     u64 msr_host_bndcfgs;
390     - unsigned long vmcs_host_cr3; /* May not match real cr3 */
391     - unsigned long vmcs_host_cr4; /* May not match real cr4 */
392     } host_state;
393     struct {
394     int vm86_active;
395     @@ -2187,46 +2187,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
396     struct pi_desc old, new;
397     unsigned int dest;
398    
399     - if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
400     - !irq_remapping_cap(IRQ_POSTING_CAP) ||
401     - !kvm_vcpu_apicv_active(vcpu))
402     + /*
403     + * In case of hot-plug or hot-unplug, we may have to undo
404     + * vmx_vcpu_pi_put even if there is no assigned device. And we
405     + * always keep PI.NDST up to date for simplicity: it makes the
406     + * code easier, and CPU migration is not a fast path.
407     + */
408     + if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
409     + return;
410     +
411     + /*
412     + * First handle the simple case where no cmpxchg is necessary; just
413     + * allow posting non-urgent interrupts.
414     + *
415     + * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
416     + * PI.NDST: pi_post_block will do it for us and the wakeup_handler
417     + * expects the VCPU to be on the blocked_vcpu_list that matches
418     + * PI.NDST.
419     + */
420     + if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
421     + vcpu->cpu == cpu) {
422     + pi_clear_sn(pi_desc);
423     return;
424     + }
425    
426     + /* The full case. */
427     do {
428     old.control = new.control = pi_desc->control;
429    
430     - /*
431     - * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
432     - * are two possible cases:
433     - * 1. After running 'pre_block', context switch
434     - * happened. For this case, 'sn' was set in
435     - * vmx_vcpu_put(), so we need to clear it here.
436     - * 2. After running 'pre_block', we were blocked,
437     - * and woken up by some other guy. For this case,
438     - * we don't need to do anything, 'pi_post_block'
439     - * will do everything for us. However, we cannot
440     - * check whether it is case #1 or case #2 here
441     - * (maybe, not needed), so we also clear sn here,
442     - * I think it is not a big deal.
443     - */
444     - if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
445     - if (vcpu->cpu != cpu) {
446     - dest = cpu_physical_id(cpu);
447     -
448     - if (x2apic_enabled())
449     - new.ndst = dest;
450     - else
451     - new.ndst = (dest << 8) & 0xFF00;
452     - }
453     + dest = cpu_physical_id(cpu);
454    
455     - /* set 'NV' to 'notification vector' */
456     - new.nv = POSTED_INTR_VECTOR;
457     - }
458     + if (x2apic_enabled())
459     + new.ndst = dest;
460     + else
461     + new.ndst = (dest << 8) & 0xFF00;
462    
463     - /* Allow posting non-urgent interrupts */
464     new.sn = 0;
465     - } while (cmpxchg(&pi_desc->control, old.control,
466     - new.control) != old.control);
467     + } while (cmpxchg64(&pi_desc->control, old.control,
468     + new.control) != old.control);
469     }
470    
471     static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
472     @@ -5048,21 +5046,30 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
473     int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
474    
475     if (vcpu->mode == IN_GUEST_MODE) {
476     - struct vcpu_vmx *vmx = to_vmx(vcpu);
477     -
478     /*
479     - * Currently, we don't support urgent interrupt,
480     - * all interrupts are recognized as non-urgent
481     - * interrupt, so we cannot post interrupts when
482     - * 'SN' is set.
483     + * The vector of interrupt to be delivered to vcpu had
484     + * been set in PIR before this function.
485     *
486     - * If the vcpu is in guest mode, it means it is
487     - * running instead of being scheduled out and
488     - * waiting in the run queue, and that's the only
489     - * case when 'SN' is set currently, warning if
490     - * 'SN' is set.
491     + * Following cases will be reached in this block, and
492     + * we always send a notification event in all cases as
493     + * explained below.
494     + *
495     + * Case 1: vcpu keeps in non-root mode. Sending a
496     + * notification event posts the interrupt to vcpu.
497     + *
498     + * Case 2: vcpu exits to root mode and is still
499     + * runnable. PIR will be synced to vIRR before the
500     + * next vcpu entry. Sending a notification event in
501     + * this case has no effect, as vcpu is not in root
502     + * mode.
503     + *
504     + * Case 3: vcpu exits to root mode and is blocked.
505     + * vcpu_block() has already synced PIR to vIRR and
506     + * never blocks vcpu if vIRR is not cleared. Therefore,
507     + * a blocked vcpu here does not wait for any requested
508     + * interrupts in PIR, and sending a notification event
509     + * which has no effect is safe here.
510     */
511     - WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
512    
513     apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
514     return true;
515     @@ -5140,12 +5147,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
516     */
517     cr3 = __read_cr3();
518     vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
519     - vmx->host_state.vmcs_host_cr3 = cr3;
520     + vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
521    
522     /* Save the most likely value for this task's CR4 in the VMCS. */
523     cr4 = cr4_read_shadow();
524     vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
525     - vmx->host_state.vmcs_host_cr4 = cr4;
526     + vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
527    
528     vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
529     #ifdef CONFIG_X86_64
530     @@ -8994,15 +9001,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
531     vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
532    
533     cr3 = __get_current_cr3_fast();
534     - if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) {
535     + if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) {
536     vmcs_writel(HOST_CR3, cr3);
537     - vmx->host_state.vmcs_host_cr3 = cr3;
538     + vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
539     }
540    
541     cr4 = cr4_read_shadow();
542     - if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
543     + if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) {
544     vmcs_writel(HOST_CR4, cr4);
545     - vmx->host_state.vmcs_host_cr4 = cr4;
546     + vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
547     }
548    
549     /* When single-stepping over STI and MOV SS, we must clear the
550     @@ -9310,6 +9317,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
551    
552     vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
553    
554     + /*
555     + * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
556     + * or POSTED_INTR_WAKEUP_VECTOR.
557     + */
558     + vmx->pi_desc.nv = POSTED_INTR_VECTOR;
559     + vmx->pi_desc.sn = 1;
560     +
561     return &vmx->vcpu;
562    
563     free_vmcs:
564     @@ -10266,6 +10280,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
565     if (exec_control & CPU_BASED_TPR_SHADOW) {
566     vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
567     vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
568     + } else {
569     +#ifdef CONFIG_X86_64
570     + exec_control |= CPU_BASED_CR8_LOAD_EXITING |
571     + CPU_BASED_CR8_STORE_EXITING;
572     +#endif
573     }
574    
575     /*
576     @@ -11389,6 +11408,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
577     kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
578     }
579    
580     +static void __pi_post_block(struct kvm_vcpu *vcpu)
581     +{
582     + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
583     + struct pi_desc old, new;
584     + unsigned int dest;
585     +
586     + do {
587     + old.control = new.control = pi_desc->control;
588     + WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
589     + "Wakeup handler not enabled while the VCPU is blocked\n");
590     +
591     + dest = cpu_physical_id(vcpu->cpu);
592     +
593     + if (x2apic_enabled())
594     + new.ndst = dest;
595     + else
596     + new.ndst = (dest << 8) & 0xFF00;
597     +
598     + /* set 'NV' to 'notification vector' */
599     + new.nv = POSTED_INTR_VECTOR;
600     + } while (cmpxchg64(&pi_desc->control, old.control,
601     + new.control) != old.control);
602     +
603     + if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
604     + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
605     + list_del(&vcpu->blocked_vcpu_list);
606     + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
607     + vcpu->pre_pcpu = -1;
608     + }
609     +}
610     +
611     /*
612     * This routine does the following things for vCPU which is going
613     * to be blocked if VT-d PI is enabled.
614     @@ -11404,7 +11454,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
615     */
616     static int pi_pre_block(struct kvm_vcpu *vcpu)
617     {
618     - unsigned long flags;
619     unsigned int dest;
620     struct pi_desc old, new;
621     struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
622     @@ -11414,34 +11463,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
623     !kvm_vcpu_apicv_active(vcpu))
624     return 0;
625    
626     - vcpu->pre_pcpu = vcpu->cpu;
627     - spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
628     - vcpu->pre_pcpu), flags);
629     - list_add_tail(&vcpu->blocked_vcpu_list,
630     - &per_cpu(blocked_vcpu_on_cpu,
631     - vcpu->pre_pcpu));
632     - spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
633     - vcpu->pre_pcpu), flags);
634     + WARN_ON(irqs_disabled());
635     + local_irq_disable();
636     + if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
637     + vcpu->pre_pcpu = vcpu->cpu;
638     + spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
639     + list_add_tail(&vcpu->blocked_vcpu_list,
640     + &per_cpu(blocked_vcpu_on_cpu,
641     + vcpu->pre_pcpu));
642     + spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
643     + }
644    
645     do {
646     old.control = new.control = pi_desc->control;
647    
648     - /*
649     - * We should not block the vCPU if
650     - * an interrupt is posted for it.
651     - */
652     - if (pi_test_on(pi_desc) == 1) {
653     - spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
654     - vcpu->pre_pcpu), flags);
655     - list_del(&vcpu->blocked_vcpu_list);
656     - spin_unlock_irqrestore(
657     - &per_cpu(blocked_vcpu_on_cpu_lock,
658     - vcpu->pre_pcpu), flags);
659     - vcpu->pre_pcpu = -1;
660     -
661     - return 1;
662     - }
663     -
664     WARN((pi_desc->sn == 1),
665     "Warning: SN field of posted-interrupts "
666     "is set before blocking\n");
667     @@ -11463,10 +11498,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
668    
669     /* set 'NV' to 'wakeup vector' */
670     new.nv = POSTED_INTR_WAKEUP_VECTOR;
671     - } while (cmpxchg(&pi_desc->control, old.control,
672     - new.control) != old.control);
673     + } while (cmpxchg64(&pi_desc->control, old.control,
674     + new.control) != old.control);
675    
676     - return 0;
677     + /* We should not block the vCPU if an interrupt is posted for it. */
678     + if (pi_test_on(pi_desc) == 1)
679     + __pi_post_block(vcpu);
680     +
681     + local_irq_enable();
682     + return (vcpu->pre_pcpu == -1);
683     }
684    
685     static int vmx_pre_block(struct kvm_vcpu *vcpu)
686     @@ -11482,44 +11522,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
687    
688     static void pi_post_block(struct kvm_vcpu *vcpu)
689     {
690     - struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
691     - struct pi_desc old, new;
692     - unsigned int dest;
693     - unsigned long flags;
694     -
695     - if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
696     - !irq_remapping_cap(IRQ_POSTING_CAP) ||
697     - !kvm_vcpu_apicv_active(vcpu))
698     + if (vcpu->pre_pcpu == -1)
699     return;
700    
701     - do {
702     - old.control = new.control = pi_desc->control;
703     -
704     - dest = cpu_physical_id(vcpu->cpu);
705     -
706     - if (x2apic_enabled())
707     - new.ndst = dest;
708     - else
709     - new.ndst = (dest << 8) & 0xFF00;
710     -
711     - /* Allow posting non-urgent interrupts */
712     - new.sn = 0;
713     -
714     - /* set 'NV' to 'notification vector' */
715     - new.nv = POSTED_INTR_VECTOR;
716     - } while (cmpxchg(&pi_desc->control, old.control,
717     - new.control) != old.control);
718     -
719     - if(vcpu->pre_pcpu != -1) {
720     - spin_lock_irqsave(
721     - &per_cpu(blocked_vcpu_on_cpu_lock,
722     - vcpu->pre_pcpu), flags);
723     - list_del(&vcpu->blocked_vcpu_list);
724     - spin_unlock_irqrestore(
725     - &per_cpu(blocked_vcpu_on_cpu_lock,
726     - vcpu->pre_pcpu), flags);
727     - vcpu->pre_pcpu = -1;
728     - }
729     + WARN_ON(irqs_disabled());
730     + local_irq_disable();
731     + __pi_post_block(vcpu);
732     + local_irq_enable();
733     }
734    
735     static void vmx_post_block(struct kvm_vcpu *vcpu)
736     @@ -11547,7 +11556,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
737     struct kvm_lapic_irq irq;
738     struct kvm_vcpu *vcpu;
739     struct vcpu_data vcpu_info;
740     - int idx, ret = -EINVAL;
741     + int idx, ret = 0;
742    
743     if (!kvm_arch_has_assigned_device(kvm) ||
744     !irq_remapping_cap(IRQ_POSTING_CAP) ||
745     @@ -11556,7 +11565,12 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
746    
747     idx = srcu_read_lock(&kvm->irq_srcu);
748     irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
749     - BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
750     + if (guest_irq >= irq_rt->nr_rt_entries ||
751     + hlist_empty(&irq_rt->map[guest_irq])) {
752     + pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
753     + guest_irq, irq_rt->nr_rt_entries);
754     + goto out;
755     + }
756    
757     hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
758     if (e->type != KVM_IRQ_ROUTING_MSI)
759     @@ -11599,12 +11613,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
760    
761     if (set)
762     ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
763     - else {
764     - /* suppress notification event before unposting */
765     - pi_set_sn(vcpu_to_pi_desc(vcpu));
766     + else
767     ret = irq_set_vcpu_affinity(host_irq, NULL);
768     - pi_clear_sn(vcpu_to_pi_desc(vcpu));
769     - }
770    
771     if (ret < 0) {
772     printk(KERN_INFO "%s: failed to update PI IRTE\n",
773     diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
774     index 2a1fa10c6a98..955be01dd9cc 100644
775     --- a/arch/x86/mm/fault.c
776     +++ b/arch/x86/mm/fault.c
777     @@ -192,8 +192,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
778     * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
779     * faulted on a pte with its pkey=4.
780     */
781     -static void fill_sig_info_pkey(int si_code, siginfo_t *info,
782     - struct vm_area_struct *vma)
783     +static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
784     {
785     /* This is effectively an #ifdef */
786     if (!boot_cpu_has(X86_FEATURE_OSPKE))
787     @@ -209,7 +208,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
788     * valid VMA, so we should never reach this without a
789     * valid VMA.
790     */
791     - if (!vma) {
792     + if (!pkey) {
793     WARN_ONCE(1, "PKU fault with no VMA passed in");
794     info->si_pkey = 0;
795     return;
796     @@ -219,13 +218,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
797     * absolutely guranteed to be 100% accurate because of
798     * the race explained above.
799     */
800     - info->si_pkey = vma_pkey(vma);
801     + info->si_pkey = *pkey;
802     }
803    
804     static void
805     force_sig_info_fault(int si_signo, int si_code, unsigned long address,
806     - struct task_struct *tsk, struct vm_area_struct *vma,
807     - int fault)
808     + struct task_struct *tsk, u32 *pkey, int fault)
809     {
810     unsigned lsb = 0;
811     siginfo_t info;
812     @@ -240,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
813     lsb = PAGE_SHIFT;
814     info.si_addr_lsb = lsb;
815    
816     - fill_sig_info_pkey(si_code, &info, vma);
817     + fill_sig_info_pkey(si_code, &info, pkey);
818    
819     force_sig_info(si_signo, &info, tsk);
820     }
821     @@ -758,8 +756,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
822     struct task_struct *tsk = current;
823     unsigned long flags;
824     int sig;
825     - /* No context means no VMA to pass down */
826     - struct vm_area_struct *vma = NULL;
827    
828     /* Are we prepared to handle this kernel fault? */
829     if (fixup_exception(regs, X86_TRAP_PF)) {
830     @@ -784,7 +780,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
831    
832     /* XXX: hwpoison faults will set the wrong code. */
833     force_sig_info_fault(signal, si_code, address,
834     - tsk, vma, 0);
835     + tsk, NULL, 0);
836     }
837    
838     /*
839     @@ -893,8 +889,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
840    
841     static void
842     __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
843     - unsigned long address, struct vm_area_struct *vma,
844     - int si_code)
845     + unsigned long address, u32 *pkey, int si_code)
846     {
847     struct task_struct *tsk = current;
848    
849     @@ -942,7 +937,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
850     tsk->thread.error_code = error_code;
851     tsk->thread.trap_nr = X86_TRAP_PF;
852    
853     - force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
854     + force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
855    
856     return;
857     }
858     @@ -955,9 +950,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
859    
860     static noinline void
861     bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
862     - unsigned long address, struct vm_area_struct *vma)
863     + unsigned long address, u32 *pkey)
864     {
865     - __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
866     + __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
867     }
868    
869     static void
870     @@ -965,6 +960,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
871     unsigned long address, struct vm_area_struct *vma, int si_code)
872     {
873     struct mm_struct *mm = current->mm;
874     + u32 pkey;
875     +
876     + if (vma)
877     + pkey = vma_pkey(vma);
878    
879     /*
880     * Something tried to access memory that isn't in our memory map..
881     @@ -972,7 +971,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
882     */
883     up_read(&mm->mmap_sem);
884    
885     - __bad_area_nosemaphore(regs, error_code, address, vma, si_code);
886     + __bad_area_nosemaphore(regs, error_code, address,
887     + (vma) ? &pkey : NULL, si_code);
888     }
889    
890     static noinline void
891     @@ -1015,7 +1015,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
892    
893     static void
894     do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
895     - struct vm_area_struct *vma, unsigned int fault)
896     + u32 *pkey, unsigned int fault)
897     {
898     struct task_struct *tsk = current;
899     int code = BUS_ADRERR;
900     @@ -1042,13 +1042,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
901     code = BUS_MCEERR_AR;
902     }
903     #endif
904     - force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
905     + force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
906     }
907    
908     static noinline void
909     mm_fault_error(struct pt_regs *regs, unsigned long error_code,
910     - unsigned long address, struct vm_area_struct *vma,
911     - unsigned int fault)
912     + unsigned long address, u32 *pkey, unsigned int fault)
913     {
914     if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
915     no_context(regs, error_code, address, 0, 0);
916     @@ -1072,9 +1071,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
917     } else {
918     if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
919     VM_FAULT_HWPOISON_LARGE))
920     - do_sigbus(regs, error_code, address, vma, fault);
921     + do_sigbus(regs, error_code, address, pkey, fault);
922     else if (fault & VM_FAULT_SIGSEGV)
923     - bad_area_nosemaphore(regs, error_code, address, vma);
924     + bad_area_nosemaphore(regs, error_code, address, pkey);
925     else
926     BUG();
927     }
928     @@ -1268,6 +1267,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
929     struct mm_struct *mm;
930     int fault, major = 0;
931     unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
932     + u32 pkey;
933    
934     tsk = current;
935     mm = tsk->mm;
936     @@ -1468,9 +1468,10 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
937     return;
938     }
939    
940     + pkey = vma_pkey(vma);
941     up_read(&mm->mmap_sem);
942     if (unlikely(fault & VM_FAULT_ERROR)) {
943     - mm_fault_error(regs, error_code, address, vma, fault);
944     + mm_fault_error(regs, error_code, address, &pkey, fault);
945     return;
946     }
947    
948     diff --git a/block/bsg-lib.c b/block/bsg-lib.c
949     index dd56d7460cb9..c587c71d78af 100644
950     --- a/block/bsg-lib.c
951     +++ b/block/bsg-lib.c
952     @@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
953     failjob_rls_rqst_payload:
954     kfree(job->request_payload.sg_list);
955     failjob_rls_job:
956     - kfree(job);
957     return -ENOMEM;
958     }
959    
960     diff --git a/crypto/drbg.c b/crypto/drbg.c
961     index 633a88e93ab0..70018397e59a 100644
962     --- a/crypto/drbg.c
963     +++ b/crypto/drbg.c
964     @@ -1133,10 +1133,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
965     {
966     if (!drbg)
967     return;
968     - kzfree(drbg->V);
969     - drbg->Vbuf = NULL;
970     - kzfree(drbg->C);
971     - drbg->Cbuf = NULL;
972     + kzfree(drbg->Vbuf);
973     + drbg->V = NULL;
974     + kzfree(drbg->Cbuf);
975     + drbg->C = NULL;
976     kzfree(drbg->scratchpadbuf);
977     drbg->scratchpadbuf = NULL;
978     drbg->reseed_ctr = 0;
979     diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
980     index c99f8730de82..6ce97fc6d22c 100644
981     --- a/drivers/base/power/main.c
982     +++ b/drivers/base/power/main.c
983     @@ -1835,10 +1835,13 @@ void device_pm_check_callbacks(struct device *dev)
984     {
985     spin_lock_irq(&dev->power.lock);
986     dev->power.no_pm_callbacks =
987     - (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
988     - (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
989     + (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
990     + !dev->bus->suspend && !dev->bus->resume)) &&
991     + (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
992     + !dev->class->suspend && !dev->class->resume)) &&
993     (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
994     (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
995     - (!dev->driver || pm_ops_is_empty(dev->driver->pm));
996     + (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
997     + !dev->driver->suspend && !dev->driver->resume));
998     spin_unlock_irq(&dev->power.lock);
999     }
1000     diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
1001     index a8cc14fd8ae4..a6de32530693 100644
1002     --- a/drivers/base/power/opp/core.c
1003     +++ b/drivers/base/power/opp/core.c
1004     @@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
1005    
1006     opp->available = availability_req;
1007    
1008     + dev_pm_opp_get(opp);
1009     + mutex_unlock(&opp_table->lock);
1010     +
1011     /* Notify the change of the OPP availability */
1012     if (availability_req)
1013     blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
1014     @@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
1015     blocking_notifier_call_chain(&opp_table->head,
1016     OPP_EVENT_DISABLE, opp);
1017    
1018     + dev_pm_opp_put(opp);
1019     + goto put_table;
1020     +
1021     unlock:
1022     mutex_unlock(&opp_table->lock);
1023     +put_table:
1024     dev_pm_opp_put_opp_table(opp_table);
1025     return r;
1026     }
1027     diff --git a/drivers/block/brd.c b/drivers/block/brd.c
1028     index 104b71c0490d..b7dce4e3f5ff 100644
1029     --- a/drivers/block/brd.c
1030     +++ b/drivers/block/brd.c
1031     @@ -339,7 +339,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
1032    
1033     if (!brd)
1034     return -ENODEV;
1035     - page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
1036     + page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
1037     if (!page)
1038     return -ENOSPC;
1039     *kaddr = page_address(page);
1040     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1041     index 79791c690858..dff88838dce7 100644
1042     --- a/drivers/crypto/talitos.c
1043     +++ b/drivers/crypto/talitos.c
1044     @@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1045     req_ctx->swinit = 0;
1046     } else {
1047     desc->ptr[1] = zero_entry;
1048     - /* Indicate next op is not the first. */
1049     - req_ctx->first = 0;
1050     }
1051     + /* Indicate next op is not the first. */
1052     + req_ctx->first = 0;
1053    
1054     /* HMAC key */
1055     if (ctx->keylen)
1056     @@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1057    
1058     sg_count = edesc->src_nents ?: 1;
1059     if (is_sec1 && sg_count > 1)
1060     - sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
1061     + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1062     else
1063     sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1064     DMA_TO_DEVICE);
1065     @@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1066     t_alg->algt.alg.hash.final = ahash_final;
1067     t_alg->algt.alg.hash.finup = ahash_finup;
1068     t_alg->algt.alg.hash.digest = ahash_digest;
1069     - t_alg->algt.alg.hash.setkey = ahash_setkey;
1070     + if (!strncmp(alg->cra_name, "hmac", 4))
1071     + t_alg->algt.alg.hash.setkey = ahash_setkey;
1072     t_alg->algt.alg.hash.import = ahash_import;
1073     t_alg->algt.alg.hash.export = ahash_export;
1074    
1075     diff --git a/drivers/dax/super.c b/drivers/dax/super.c
1076     index 938eb4868f7f..8b458f1b30c7 100644
1077     --- a/drivers/dax/super.c
1078     +++ b/drivers/dax/super.c
1079     @@ -189,8 +189,10 @@ static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
1080     if (!dax_dev)
1081     return 0;
1082    
1083     - if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush)
1084     +#ifndef CONFIG_ARCH_HAS_PMEM_API
1085     + if (a == &dev_attr_write_cache.attr)
1086     return 0;
1087     +#endif
1088     return a->mode;
1089     }
1090    
1091     @@ -255,18 +257,23 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
1092     }
1093     EXPORT_SYMBOL_GPL(dax_copy_from_iter);
1094    
1095     -void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
1096     - size_t size)
1097     +#ifdef CONFIG_ARCH_HAS_PMEM_API
1098     +void arch_wb_cache_pmem(void *addr, size_t size);
1099     +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
1100     {
1101     - if (!dax_alive(dax_dev))
1102     + if (unlikely(!dax_alive(dax_dev)))
1103     return;
1104    
1105     - if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))
1106     + if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
1107     return;
1108    
1109     - if (dax_dev->ops->flush)
1110     - dax_dev->ops->flush(dax_dev, pgoff, addr, size);
1111     + arch_wb_cache_pmem(addr, size);
1112     }
1113     +#else
1114     +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
1115     +{
1116     +}
1117     +#endif
1118     EXPORT_SYMBOL_GPL(dax_flush);
1119    
1120     void dax_write_cache(struct dax_device *dax_dev, bool wc)
1121     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
1122     index 5173ca1fd159..5e371abf3633 100644
1123     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
1124     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
1125     @@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
1126     NUM_BANKS(ADDR_SURF_2_BANK);
1127     for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1128     WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
1129     - } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) {
1130     + } else if (adev->asic_type == CHIP_OLAND) {
1131     + tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1132     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1133     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1134     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1135     + NUM_BANKS(ADDR_SURF_16_BANK) |
1136     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1137     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1138     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
1139     + tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1140     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1141     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1142     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1143     + NUM_BANKS(ADDR_SURF_16_BANK) |
1144     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1145     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1146     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
1147     + tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1148     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1149     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1150     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1151     + NUM_BANKS(ADDR_SURF_16_BANK) |
1152     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1153     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1154     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
1155     + tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1156     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1157     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1158     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1159     + NUM_BANKS(ADDR_SURF_16_BANK) |
1160     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1161     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1162     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
1163     + tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1164     + ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1165     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1166     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1167     + NUM_BANKS(ADDR_SURF_16_BANK) |
1168     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1169     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1170     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1171     + tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1172     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1173     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1174     + TILE_SPLIT(split_equal_to_row_size) |
1175     + NUM_BANKS(ADDR_SURF_16_BANK) |
1176     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1177     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1178     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1179     + tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1180     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1181     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1182     + TILE_SPLIT(split_equal_to_row_size) |
1183     + NUM_BANKS(ADDR_SURF_16_BANK) |
1184     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1185     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1186     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1187     + tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1188     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1189     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1190     + TILE_SPLIT(split_equal_to_row_size) |
1191     + NUM_BANKS(ADDR_SURF_16_BANK) |
1192     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1193     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1194     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
1195     + tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1196     + ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1197     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1198     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1199     + NUM_BANKS(ADDR_SURF_16_BANK) |
1200     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1201     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1202     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1203     + tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1204     + ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1205     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1206     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1207     + NUM_BANKS(ADDR_SURF_16_BANK) |
1208     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1209     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1210     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1211     + tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1212     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1213     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1214     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1215     + NUM_BANKS(ADDR_SURF_16_BANK) |
1216     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1217     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1218     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
1219     + tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1220     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1221     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1222     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1223     + NUM_BANKS(ADDR_SURF_16_BANK) |
1224     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1225     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1226     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1227     + tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
1228     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1229     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1230     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1231     + NUM_BANKS(ADDR_SURF_16_BANK) |
1232     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1233     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1234     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1235     + tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1236     + ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1237     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1238     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1239     + NUM_BANKS(ADDR_SURF_16_BANK) |
1240     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1241     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1242     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1243     + tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1244     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1245     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1246     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1247     + NUM_BANKS(ADDR_SURF_16_BANK) |
1248     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1249     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1250     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1251     + tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1252     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1253     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1254     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1255     + NUM_BANKS(ADDR_SURF_16_BANK) |
1256     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1257     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1258     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1259     + tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1260     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1261     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1262     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1263     + NUM_BANKS(ADDR_SURF_16_BANK) |
1264     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1265     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1266     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1267     + tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1268     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1269     + PIPE_CONFIG(ADDR_SURF_P4_8x16) |
1270     + TILE_SPLIT(split_equal_to_row_size) |
1271     + NUM_BANKS(ADDR_SURF_16_BANK) |
1272     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1273     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1274     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1275     + tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1276     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1277     + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1278     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1279     + NUM_BANKS(ADDR_SURF_16_BANK) |
1280     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
1281     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1282     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1283     + tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1284     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1285     + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1286     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1287     + NUM_BANKS(ADDR_SURF_16_BANK) |
1288     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1289     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1290     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
1291     + tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1292     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1293     + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1294     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1295     + NUM_BANKS(ADDR_SURF_16_BANK) |
1296     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1297     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1298     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1299     + tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1300     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1301     + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1302     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1303     + NUM_BANKS(ADDR_SURF_16_BANK) |
1304     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1305     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1306     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
1307     + tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
1308     + ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1309     + PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
1310     + TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
1311     + NUM_BANKS(ADDR_SURF_8_BANK) |
1312     + BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1313     + BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1314     + MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
1315     + for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
1316     + WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
1317     + } else if (adev->asic_type == CHIP_HAINAN) {
1318     tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
1319     ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1320     PIPE_CONFIG(ADDR_SURF_P2) |
1321     diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1322     index 9a3bea738330..87b95eeedd9e 100644
1323     --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1324     +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
1325     @@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
1326     void etnaviv_gem_free_object(struct drm_gem_object *obj)
1327     {
1328     struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
1329     + struct etnaviv_drm_private *priv = obj->dev->dev_private;
1330     struct etnaviv_vram_mapping *mapping, *tmp;
1331    
1332     /* object should not be active */
1333     WARN_ON(is_active(etnaviv_obj));
1334    
1335     + mutex_lock(&priv->gem_lock);
1336     list_del(&etnaviv_obj->gem_node);
1337     + mutex_unlock(&priv->gem_lock);
1338    
1339     list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
1340     obj_node) {
1341     diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1342     index 6463fc2c736f..b95362186f9c 100644
1343     --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1344     +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
1345     @@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
1346     cmdbuf->user_size = ALIGN(args->stream_size, 8);
1347    
1348     ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
1349     - if (ret == 0)
1350     - cmdbuf = NULL;
1351     + if (ret)
1352     + goto out;
1353     +
1354     + cmdbuf = NULL;
1355    
1356     if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
1357     /*
1358     diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
1359     index 242bd50faa26..bcc94e559cd0 100644
1360     --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
1361     +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
1362     @@ -176,6 +176,7 @@ static int exynos_drm_suspend(struct device *dev)
1363     if (pm_runtime_suspended(dev) || !drm_dev)
1364     return 0;
1365    
1366     + drm_modeset_lock_all(drm_dev);
1367     drm_connector_list_iter_begin(drm_dev, &conn_iter);
1368     drm_for_each_connector_iter(connector, &conn_iter) {
1369     int old_dpms = connector->dpms;
1370     @@ -187,6 +188,7 @@ static int exynos_drm_suspend(struct device *dev)
1371     connector->dpms = old_dpms;
1372     }
1373     drm_connector_list_iter_end(&conn_iter);
1374     + drm_modeset_unlock_all(drm_dev);
1375    
1376     return 0;
1377     }
1378     @@ -200,6 +202,7 @@ static int exynos_drm_resume(struct device *dev)
1379     if (pm_runtime_suspended(dev) || !drm_dev)
1380     return 0;
1381    
1382     + drm_modeset_lock_all(drm_dev);
1383     drm_connector_list_iter_begin(drm_dev, &conn_iter);
1384     drm_for_each_connector_iter(connector, &conn_iter) {
1385     if (connector->funcs->dpms) {
1386     @@ -210,6 +213,7 @@ static int exynos_drm_resume(struct device *dev)
1387     }
1388     }
1389     drm_connector_list_iter_end(&conn_iter);
1390     + drm_modeset_unlock_all(drm_dev);
1391    
1392     return 0;
1393     }
1394     diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
1395     index 40af17ec6312..ff3154fe6588 100644
1396     --- a/drivers/gpu/drm/i915/gvt/cfg_space.c
1397     +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
1398     @@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
1399     static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
1400     void *p_data, unsigned int bytes)
1401     {
1402     - unsigned int bar_index =
1403     - (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
1404     u32 new = *(u32 *)(p_data);
1405     bool lo = IS_ALIGNED(offset, 8);
1406     u64 size;
1407     int ret = 0;
1408     bool mmio_enabled =
1409     vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
1410     + struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
1411    
1412     - if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
1413     - return -EINVAL;
1414     -
1415     + /*
1416     + * Power-up software can determine how much address
1417     + * space the device requires by writing a value of
1418     + * all 1's to the register and then reading the value
1419     + * back. The device will return 0's in all don't-care
1420     + * address bits.
1421     + */
1422     if (new == 0xffffffff) {
1423     - /*
1424     - * Power-up software can determine how much address
1425     - * space the device requires by writing a value of
1426     - * all 1's to the register and then reading the value
1427     - * back. The device will return 0's in all don't-care
1428     - * address bits.
1429     - */
1430     - size = vgpu->cfg_space.bar[bar_index].size;
1431     - if (lo) {
1432     - new = rounddown(new, size);
1433     - } else {
1434     - u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
1435     - /* for 32bit mode bar it returns all-0 in upper 32
1436     - * bit, for 64bit mode bar it will calculate the
1437     - * size with lower 32bit and return the corresponding
1438     - * value
1439     + switch (offset) {
1440     + case PCI_BASE_ADDRESS_0:
1441     + case PCI_BASE_ADDRESS_1:
1442     + size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
1443     + intel_vgpu_write_pci_bar(vgpu, offset,
1444     + size >> (lo ? 0 : 32), lo);
1445     + /*
1446     + * Untrap the BAR, since guest hasn't configured a
1447     + * valid GPA
1448     */
1449     - if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1450     - new &= (~(size-1)) >> 32;
1451     - else
1452     - new = 0;
1453     - }
1454     - /*
1455     - * Unmapp & untrap the BAR, since guest hasn't configured a
1456     - * valid GPA
1457     - */
1458     - switch (bar_index) {
1459     - case INTEL_GVT_PCI_BAR_GTTMMIO:
1460     ret = trap_gttmmio(vgpu, false);
1461     break;
1462     - case INTEL_GVT_PCI_BAR_APERTURE:
1463     + case PCI_BASE_ADDRESS_2:
1464     + case PCI_BASE_ADDRESS_3:
1465     + size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
1466     + intel_vgpu_write_pci_bar(vgpu, offset,
1467     + size >> (lo ? 0 : 32), lo);
1468     ret = map_aperture(vgpu, false);
1469     break;
1470     + default:
1471     + /* Unimplemented BARs */
1472     + intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
1473     }
1474     - intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
1475     } else {
1476     - /*
1477     - * Unmapp & untrap the old BAR first, since guest has
1478     - * re-configured the BAR
1479     - */
1480     - switch (bar_index) {
1481     - case INTEL_GVT_PCI_BAR_GTTMMIO:
1482     - ret = trap_gttmmio(vgpu, false);
1483     + switch (offset) {
1484     + case PCI_BASE_ADDRESS_0:
1485     + case PCI_BASE_ADDRESS_1:
1486     + /*
1487     + * Untrap the old BAR first, since guest has
1488     + * re-configured the BAR
1489     + */
1490     + trap_gttmmio(vgpu, false);
1491     + intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
1492     + ret = trap_gttmmio(vgpu, mmio_enabled);
1493     break;
1494     - case INTEL_GVT_PCI_BAR_APERTURE:
1495     - ret = map_aperture(vgpu, false);
1496     + case PCI_BASE_ADDRESS_2:
1497     + case PCI_BASE_ADDRESS_3:
1498     + map_aperture(vgpu, false);
1499     + intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
1500     + ret = map_aperture(vgpu, mmio_enabled);
1501     break;
1502     - }
1503     - intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
1504     - /* Track the new BAR */
1505     - if (mmio_enabled) {
1506     - switch (bar_index) {
1507     - case INTEL_GVT_PCI_BAR_GTTMMIO:
1508     - ret = trap_gttmmio(vgpu, true);
1509     - break;
1510     - case INTEL_GVT_PCI_BAR_APERTURE:
1511     - ret = map_aperture(vgpu, true);
1512     - break;
1513     - }
1514     + default:
1515     + intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
1516     }
1517     }
1518     return ret;
1519     @@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
1520     }
1521    
1522     switch (rounddown(offset, 4)) {
1523     - case PCI_BASE_ADDRESS_0:
1524     - case PCI_BASE_ADDRESS_1:
1525     - case PCI_BASE_ADDRESS_2:
1526     - case PCI_BASE_ADDRESS_3:
1527     + case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
1528     if (WARN_ON(!IS_ALIGNED(offset, 4)))
1529     return -EINVAL;
1530     return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
1531     @@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
1532     struct intel_gvt *gvt = vgpu->gvt;
1533     const struct intel_gvt_device_info *info = &gvt->device_info;
1534     u16 *gmch_ctl;
1535     - int i;
1536    
1537     memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
1538     info->cfg_space_size);
1539     @@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
1540     */
1541     memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
1542     memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
1543     + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
1544     memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
1545    
1546     - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
1547     - vgpu->cfg_space.bar[i].size = pci_resource_len(
1548     - gvt->dev_priv->drm.pdev, i * 2);
1549     - vgpu->cfg_space.bar[i].tracked = false;
1550     - }
1551     + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
1552     + pci_resource_len(gvt->dev_priv->drm.pdev, 0);
1553     + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
1554     + pci_resource_len(gvt->dev_priv->drm.pdev, 2);
1555     }
1556    
1557     /**
1558     diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
1559     index 50ec836da8b1..4b8f6e070b5f 100644
1560     --- a/drivers/gpu/drm/i915/intel_dsi.c
1561     +++ b/drivers/gpu/drm/i915/intel_dsi.c
1562     @@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
1563     struct intel_crtc_state *old_crtc_state,
1564     struct drm_connector_state *old_conn_state)
1565     {
1566     - struct drm_device *dev = encoder->base.dev;
1567     - struct drm_i915_private *dev_priv = dev->dev_private;
1568     struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
1569     enum port port;
1570    
1571     @@ -902,15 +900,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
1572     intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
1573     intel_panel_disable_backlight(old_conn_state);
1574    
1575     - /*
1576     - * Disable Device ready before the port shutdown in order
1577     - * to avoid split screen
1578     - */
1579     - if (IS_BROXTON(dev_priv)) {
1580     - for_each_dsi_port(port, intel_dsi->ports)
1581     - I915_WRITE(MIPI_DEVICE_READY(port), 0);
1582     - }
1583     -
1584     /*
1585     * According to the spec we should send SHUTDOWN before
1586     * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
1587     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1588     index 997131d58c7f..ffc10cadcf34 100644
1589     --- a/drivers/gpu/drm/radeon/radeon_device.c
1590     +++ b/drivers/gpu/drm/radeon/radeon_device.c
1591     @@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1592     radeon_agp_suspend(rdev);
1593    
1594     pci_save_state(dev->pdev);
1595     - if (freeze && rdev->family >= CHIP_CEDAR) {
1596     + if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1597     rdev->asic->asic_reset(rdev, true);
1598     pci_restore_state(dev->pdev);
1599     } else if (suspend) {
1600     diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
1601     index e49b34c3b136..ca846fbe16c4 100644
1602     --- a/drivers/infiniband/hw/cxgb4/cm.c
1603     +++ b/drivers/infiniband/hw/cxgb4/cm.c
1604     @@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1605     unsigned int stid = GET_TID(rpl);
1606     struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
1607    
1608     + if (!ep) {
1609     + pr_debug("%s stid %d lookup failure!\n", __func__, stid);
1610     + goto out;
1611     + }
1612     pr_debug("%s ep %p\n", __func__, ep);
1613     c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1614     c4iw_put_ep(&ep->com);
1615     +out:
1616     return 0;
1617     }
1618    
1619     @@ -2594,9 +2599,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1620     c4iw_put_ep(&child_ep->com);
1621     reject:
1622     reject_cr(dev, hwtid, skb);
1623     +out:
1624     if (parent_ep)
1625     c4iw_put_ep(&parent_ep->com);
1626     -out:
1627     return 0;
1628     }
1629    
1630     @@ -3458,7 +3463,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
1631     cm_id->provider_data = ep;
1632     goto out;
1633     }
1634     -
1635     + remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
1636     cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
1637     ep->com.local_addr.ss_family);
1638     fail2:
1639     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1640     index 2e075377242e..6cd61638b441 100644
1641     --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1642     +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
1643     @@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1644     */
1645     priv->dev->broadcast[8] = priv->pkey >> 8;
1646     priv->dev->broadcast[9] = priv->pkey & 0xff;
1647     -
1648     - /*
1649     - * Update the broadcast address in the priv->broadcast object,
1650     - * in case it already exists, otherwise no one will do that.
1651     - */
1652     - if (priv->broadcast) {
1653     - spin_lock_irq(&priv->lock);
1654     - memcpy(priv->broadcast->mcmember.mgid.raw,
1655     - priv->dev->broadcast + 4,
1656     - sizeof(union ib_gid));
1657     - spin_unlock_irq(&priv->lock);
1658     - }
1659     -
1660     return 0;
1661     }
1662    
1663     diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1664     index 3acce09bba35..240941eb3f68 100644
1665     --- a/drivers/md/dm-integrity.c
1666     +++ b/drivers/md/dm-integrity.c
1667     @@ -1697,7 +1697,11 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
1668    
1669     if (need_sync_io) {
1670     wait_for_completion_io(&read_comp);
1671     - integrity_metadata(&dio->work);
1672     + if (likely(!bio->bi_status))
1673     + integrity_metadata(&dio->work);
1674     + else
1675     + dec_in_flight(dio);
1676     +
1677     } else {
1678     INIT_WORK(&dio->work, integrity_metadata);
1679     queue_work(ic->metadata_wq, &dio->work);
1680     diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
1681     index 41971a090e34..208800610af8 100644
1682     --- a/drivers/md/dm-linear.c
1683     +++ b/drivers/md/dm-linear.c
1684     @@ -184,20 +184,6 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
1685     return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
1686     }
1687    
1688     -static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
1689     - size_t size)
1690     -{
1691     - struct linear_c *lc = ti->private;
1692     - struct block_device *bdev = lc->dev->bdev;
1693     - struct dax_device *dax_dev = lc->dev->dax_dev;
1694     - sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
1695     -
1696     - dev_sector = linear_map_sector(ti, sector);
1697     - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
1698     - return;
1699     - dax_flush(dax_dev, pgoff, addr, size);
1700     -}
1701     -
1702     static struct target_type linear_target = {
1703     .name = "linear",
1704     .version = {1, 4, 0},
1705     @@ -212,7 +198,6 @@ static struct target_type linear_target = {
1706     .iterate_devices = linear_iterate_devices,
1707     .direct_access = linear_dax_direct_access,
1708     .dax_copy_from_iter = linear_dax_copy_from_iter,
1709     - .dax_flush = linear_dax_flush,
1710     };
1711    
1712     int __init dm_linear_init(void)
1713     diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
1714     index a0375530b07f..1690bb299b3f 100644
1715     --- a/drivers/md/dm-stripe.c
1716     +++ b/drivers/md/dm-stripe.c
1717     @@ -351,25 +351,6 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
1718     return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
1719     }
1720    
1721     -static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
1722     - size_t size)
1723     -{
1724     - sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
1725     - struct stripe_c *sc = ti->private;
1726     - struct dax_device *dax_dev;
1727     - struct block_device *bdev;
1728     - uint32_t stripe;
1729     -
1730     - stripe_map_sector(sc, sector, &stripe, &dev_sector);
1731     - dev_sector += sc->stripe[stripe].physical_start;
1732     - dax_dev = sc->stripe[stripe].dev->dax_dev;
1733     - bdev = sc->stripe[stripe].dev->bdev;
1734     -
1735     - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
1736     - return;
1737     - dax_flush(dax_dev, pgoff, addr, size);
1738     -}
1739     -
1740     /*
1741     * Stripe status:
1742     *
1743     @@ -491,7 +472,6 @@ static struct target_type stripe_target = {
1744     .io_hints = stripe_io_hints,
1745     .direct_access = stripe_dax_direct_access,
1746     .dax_copy_from_iter = stripe_dax_copy_from_iter,
1747     - .dax_flush = stripe_dax_flush,
1748     };
1749    
1750     int __init dm_stripe_init(void)
1751     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1752     index d669fddd9290..825eaffc24da 100644
1753     --- a/drivers/md/dm.c
1754     +++ b/drivers/md/dm.c
1755     @@ -987,24 +987,6 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1756     return ret;
1757     }
1758    
1759     -static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
1760     - size_t size)
1761     -{
1762     - struct mapped_device *md = dax_get_private(dax_dev);
1763     - sector_t sector = pgoff * PAGE_SECTORS;
1764     - struct dm_target *ti;
1765     - int srcu_idx;
1766     -
1767     - ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1768     -
1769     - if (!ti)
1770     - goto out;
1771     - if (ti->type->dax_flush)
1772     - ti->type->dax_flush(ti, pgoff, addr, size);
1773     - out:
1774     - dm_put_live_table(md, srcu_idx);
1775     -}
1776     -
1777     /*
1778     * A target may call dm_accept_partial_bio only from the map routine. It is
1779     * allowed for all bio types except REQ_PREFLUSH.
1780     @@ -2992,7 +2974,6 @@ static const struct block_device_operations dm_blk_dops = {
1781     static const struct dax_operations dm_dax_ops = {
1782     .direct_access = dm_dax_direct_access,
1783     .copy_from_iter = dm_dax_copy_from_iter,
1784     - .flush = dm_dax_flush,
1785     };
1786    
1787     /*
1788     diff --git a/drivers/md/md.c b/drivers/md/md.c
1789     index b01e458d31e9..0d993ea63043 100644
1790     --- a/drivers/md/md.c
1791     +++ b/drivers/md/md.c
1792     @@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
1793     * call has finished, the bio has been linked into some internal structure
1794     * and so is visible to ->quiesce(), so we don't need the refcount any more.
1795     */
1796     +void md_handle_request(struct mddev *mddev, struct bio *bio)
1797     +{
1798     +check_suspended:
1799     + rcu_read_lock();
1800     + if (mddev->suspended) {
1801     + DEFINE_WAIT(__wait);
1802     + for (;;) {
1803     + prepare_to_wait(&mddev->sb_wait, &__wait,
1804     + TASK_UNINTERRUPTIBLE);
1805     + if (!mddev->suspended)
1806     + break;
1807     + rcu_read_unlock();
1808     + schedule();
1809     + rcu_read_lock();
1810     + }
1811     + finish_wait(&mddev->sb_wait, &__wait);
1812     + }
1813     + atomic_inc(&mddev->active_io);
1814     + rcu_read_unlock();
1815     +
1816     + if (!mddev->pers->make_request(mddev, bio)) {
1817     + atomic_dec(&mddev->active_io);
1818     + wake_up(&mddev->sb_wait);
1819     + goto check_suspended;
1820     + }
1821     +
1822     + if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
1823     + wake_up(&mddev->sb_wait);
1824     +}
1825     +EXPORT_SYMBOL(md_handle_request);
1826     +
1827     static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
1828     {
1829     const int rw = bio_data_dir(bio);
1830     @@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
1831     bio_endio(bio);
1832     return BLK_QC_T_NONE;
1833     }
1834     -check_suspended:
1835     - rcu_read_lock();
1836     - if (mddev->suspended) {
1837     - DEFINE_WAIT(__wait);
1838     - for (;;) {
1839     - prepare_to_wait(&mddev->sb_wait, &__wait,
1840     - TASK_UNINTERRUPTIBLE);
1841     - if (!mddev->suspended)
1842     - break;
1843     - rcu_read_unlock();
1844     - schedule();
1845     - rcu_read_lock();
1846     - }
1847     - finish_wait(&mddev->sb_wait, &__wait);
1848     - }
1849     - atomic_inc(&mddev->active_io);
1850     - rcu_read_unlock();
1851    
1852     /*
1853     * save the sectors now since our bio can
1854     @@ -310,20 +324,14 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
1855     sectors = bio_sectors(bio);
1856     /* bio could be mergeable after passing to underlayer */
1857     bio->bi_opf &= ~REQ_NOMERGE;
1858     - if (!mddev->pers->make_request(mddev, bio)) {
1859     - atomic_dec(&mddev->active_io);
1860     - wake_up(&mddev->sb_wait);
1861     - goto check_suspended;
1862     - }
1863     +
1864     + md_handle_request(mddev, bio);
1865    
1866     cpu = part_stat_lock();
1867     part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
1868     part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
1869     part_stat_unlock();
1870    
1871     - if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
1872     - wake_up(&mddev->sb_wait);
1873     -
1874     return BLK_QC_T_NONE;
1875     }
1876    
1877     @@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
1878     struct mddev *mddev = container_of(ws, struct mddev, flush_work);
1879     struct bio *bio = mddev->flush_bio;
1880    
1881     + /*
1882     + * must reset flush_bio before calling into md_handle_request to avoid a
1883     + * deadlock, because other bios passed md_handle_request suspend check
1884     + * could wait for this and below md_handle_request could wait for those
1885     + * bios because of suspend check
1886     + */
1887     + mddev->flush_bio = NULL;
1888     + wake_up(&mddev->sb_wait);
1889     +
1890     if (bio->bi_iter.bi_size == 0)
1891     /* an empty barrier - all done */
1892     bio_endio(bio);
1893     else {
1894     bio->bi_opf &= ~REQ_PREFLUSH;
1895     - mddev->pers->make_request(mddev, bio);
1896     + md_handle_request(mddev, bio);
1897     }
1898     -
1899     - mddev->flush_bio = NULL;
1900     - wake_up(&mddev->sb_wait);
1901     }
1902    
1903     void md_flush_request(struct mddev *mddev, struct bio *bio)
1904     diff --git a/drivers/md/md.h b/drivers/md/md.h
1905     index 09db03455801..60b09bea886b 100644
1906     --- a/drivers/md/md.h
1907     +++ b/drivers/md/md.h
1908     @@ -686,6 +686,7 @@ extern void md_stop_writes(struct mddev *mddev);
1909     extern int md_rdev_init(struct md_rdev *rdev);
1910     extern void md_rdev_clear(struct md_rdev *rdev);
1911    
1912     +extern void md_handle_request(struct mddev *mddev, struct bio *bio);
1913     extern void mddev_suspend(struct mddev *mddev);
1914     extern void mddev_resume(struct mddev *mddev);
1915     extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
1916     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1917     index e13a8ce7f589..fc48813eaa08 100644
1918     --- a/drivers/md/raid5.c
1919     +++ b/drivers/md/raid5.c
1920     @@ -812,6 +812,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
1921     spin_unlock(&head->batch_head->batch_lock);
1922     goto unlock_out;
1923     }
1924     + /*
1925     + * We must assign batch_head of this stripe within the
1926     + * batch_lock, otherwise clear_batch_ready of batch head
1927     + * stripe could clear BATCH_READY bit of this stripe and
1928     + * this stripe->batch_head doesn't get assigned, which
1929     + * could confuse clear_batch_ready for this stripe
1930     + */
1931     + sh->batch_head = head->batch_head;
1932    
1933     /*
1934     * at this point, head's BATCH_READY could be cleared, but we
1935     @@ -819,8 +827,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
1936     */
1937     list_add(&sh->batch_list, &head->batch_list);
1938     spin_unlock(&head->batch_head->batch_lock);
1939     -
1940     - sh->batch_head = head->batch_head;
1941     } else {
1942     head->batch_head = head;
1943     sh->batch_head = head->batch_head;
1944     @@ -4608,7 +4614,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
1945    
1946     set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
1947     (1 << STRIPE_PREREAD_ACTIVE) |
1948     - (1 << STRIPE_DEGRADED)),
1949     + (1 << STRIPE_DEGRADED) |
1950     + (1 << STRIPE_ON_UNPLUG_LIST)),
1951     head_sh->state & (1 << STRIPE_INSYNC));
1952    
1953     sh->check_state = head_sh->check_state;
1954     diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
1955     index affa7370ba82..74c663b1c0a7 100644
1956     --- a/drivers/mmc/core/queue.c
1957     +++ b/drivers/mmc/core/queue.c
1958     @@ -242,6 +242,12 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
1959     if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
1960     limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
1961    
1962     + /*
1963     + * mmc_init_request() depends on card->bouncesz so it must be calculated
1964     + * before blk_init_allocated_queue() starts allocating requests.
1965     + */
1966     + card->bouncesz = mmc_queue_calc_bouncesz(host);
1967     +
1968     mq->card = card;
1969     mq->queue = blk_alloc_queue(GFP_KERNEL);
1970     if (!mq->queue)
1971     @@ -265,7 +271,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
1972     if (mmc_can_erase(card))
1973     mmc_queue_setup_discard(mq->queue, card);
1974    
1975     - card->bouncesz = mmc_queue_calc_bouncesz(host);
1976     if (card->bouncesz) {
1977     blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
1978     blk_queue_max_segments(mq->queue, card->bouncesz / 512);
1979     diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
1980     index e1721ac37919..ba8a0f58fe08 100644
1981     --- a/drivers/mmc/host/sdhci-pci-core.c
1982     +++ b/drivers/mmc/host/sdhci-pci-core.c
1983     @@ -393,6 +393,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
1984    
1985     enum {
1986     INTEL_DSM_FNS = 0,
1987     + INTEL_DSM_V18_SWITCH = 3,
1988     INTEL_DSM_DRV_STRENGTH = 9,
1989     INTEL_DSM_D3_RETUNE = 10,
1990     };
1991     @@ -558,6 +559,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
1992     sdhci_writel(host, val, INTEL_HS400_ES_REG);
1993     }
1994    
1995     +static void sdhci_intel_voltage_switch(struct sdhci_host *host)
1996     +{
1997     + struct sdhci_pci_slot *slot = sdhci_priv(host);
1998     + struct intel_host *intel_host = sdhci_pci_priv(slot);
1999     + struct device *dev = &slot->chip->pdev->dev;
2000     + u32 result = 0;
2001     + int err;
2002     +
2003     + err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result);
2004     + pr_debug("%s: %s DSM error %d result %u\n",
2005     + mmc_hostname(host->mmc), __func__, err, result);
2006     +}
2007     +
2008     static const struct sdhci_ops sdhci_intel_byt_ops = {
2009     .set_clock = sdhci_set_clock,
2010     .set_power = sdhci_intel_set_power,
2011     @@ -566,6 +580,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
2012     .reset = sdhci_reset,
2013     .set_uhs_signaling = sdhci_set_uhs_signaling,
2014     .hw_reset = sdhci_pci_hw_reset,
2015     + .voltage_switch = sdhci_intel_voltage_switch,
2016     };
2017    
2018     static void byt_read_dsm(struct sdhci_pci_slot *slot)
2019     diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
2020     index 5736b0c90b33..a308e707392d 100644
2021     --- a/drivers/mtd/mtdpart.c
2022     +++ b/drivers/mtd/mtdpart.c
2023     @@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
2024     slave->mtd.erasesize = parent->erasesize;
2025     }
2026    
2027     + /*
2028     + * Slave erasesize might differ from the master one if the master
2029     + * exposes several regions with different erasesize. Adjust
2030     + * wr_alignment accordingly.
2031     + */
2032     + if (!(slave->mtd.flags & MTD_NO_ERASE))
2033     + wr_alignment = slave->mtd.erasesize;
2034     +
2035     tmp = slave->offset;
2036     remainder = do_div(tmp, wr_alignment);
2037     if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
2038     diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
2039     index 8c210a5776bc..24be19fb9591 100644
2040     --- a/drivers/mtd/nand/atmel/pmecc.c
2041     +++ b/drivers/mtd/nand/atmel/pmecc.c
2042     @@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
2043     size += (req->ecc.strength + 1) * sizeof(u16);
2044     /* Reserve space for mu, dmu and delta. */
2045     size = ALIGN(size, sizeof(s32));
2046     - size += (req->ecc.strength + 1) * sizeof(s32);
2047     + size += (req->ecc.strength + 1) * sizeof(s32) * 3;
2048    
2049     user = kzalloc(size, GFP_KERNEL);
2050     if (!user)
2051     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2052     index c8852acc1462..6467ffac9811 100644
2053     --- a/drivers/net/wireless/mac80211_hwsim.c
2054     +++ b/drivers/net/wireless/mac80211_hwsim.c
2055     @@ -1362,8 +1362,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
2056     txi->control.rates,
2057     ARRAY_SIZE(txi->control.rates));
2058    
2059     - txi->rate_driver_data[0] = channel;
2060     -
2061     if (skb->len >= 24 + 8 &&
2062     ieee80211_is_probe_resp(hdr->frame_control)) {
2063     /* fake header transmission time */
2064     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
2065     index 5f1c6756e57c..f49a29abb11f 100644
2066     --- a/drivers/nvdimm/namespace_devs.c
2067     +++ b/drivers/nvdimm/namespace_devs.c
2068     @@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev)
2069     struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2070     struct nd_namespace_index *nsindex;
2071    
2072     + /*
2073     + * If any of the DIMMs do not support labels the only
2074     + * possible BTT format is v1.
2075     + */
2076     + if (!ndd) {
2077     + loop_bitmask = 0;
2078     + break;
2079     + }
2080     +
2081     nsindex = to_namespace_index(ndd, ndd->ns_current);
2082     if (nsindex == NULL)
2083     loop_bitmask |= 1;
2084     diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
2085     index f7099adaabc0..88c128258760 100644
2086     --- a/drivers/nvdimm/pmem.c
2087     +++ b/drivers/nvdimm/pmem.c
2088     @@ -243,16 +243,9 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
2089     return copy_from_iter_flushcache(addr, bytes, i);
2090     }
2091    
2092     -static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff,
2093     - void *addr, size_t size)
2094     -{
2095     - arch_wb_cache_pmem(addr, size);
2096     -}
2097     -
2098     static const struct dax_operations pmem_dax_ops = {
2099     .direct_access = pmem_dax_direct_access,
2100     .copy_from_iter = pmem_copy_from_iter,
2101     - .flush = pmem_dax_flush,
2102     };
2103    
2104     static const struct attribute_group *pmem_attribute_groups[] = {
2105     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2106     index ea892e732268..cdf4c0e471b9 100644
2107     --- a/drivers/nvme/host/pci.c
2108     +++ b/drivers/nvme/host/pci.c
2109     @@ -1609,18 +1609,16 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
2110     dev->host_mem_descs = NULL;
2111     }
2112    
2113     -static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
2114     +static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
2115     + u32 chunk_size)
2116     {
2117     struct nvme_host_mem_buf_desc *descs;
2118     - u32 chunk_size, max_entries, len;
2119     + u32 max_entries, len;
2120     dma_addr_t descs_dma;
2121     int i = 0;
2122     void **bufs;
2123     u64 size = 0, tmp;
2124    
2125     - /* start big and work our way down */
2126     - chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
2127     -retry:
2128     tmp = (preferred + chunk_size - 1);
2129     do_div(tmp, chunk_size);
2130     max_entries = tmp;
2131     @@ -1647,15 +1645,9 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
2132     i++;
2133     }
2134    
2135     - if (!size || (min && size < min)) {
2136     - dev_warn(dev->ctrl.device,
2137     - "failed to allocate host memory buffer.\n");
2138     + if (!size)
2139     goto out_free_bufs;
2140     - }
2141    
2142     - dev_info(dev->ctrl.device,
2143     - "allocated %lld MiB host memory buffer.\n",
2144     - size >> ilog2(SZ_1M));
2145     dev->nr_host_mem_descs = i;
2146     dev->host_mem_size = size;
2147     dev->host_mem_descs = descs;
2148     @@ -1676,21 +1668,35 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
2149     dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
2150     descs_dma);
2151     out:
2152     - /* try a smaller chunk size if we failed early */
2153     - if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
2154     - chunk_size /= 2;
2155     - goto retry;
2156     - }
2157     dev->host_mem_descs = NULL;
2158     return -ENOMEM;
2159     }
2160    
2161     -static void nvme_setup_host_mem(struct nvme_dev *dev)
2162     +static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
2163     +{
2164     + u32 chunk_size;
2165     +
2166     + /* start big and work our way down */
2167     + for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
2168     + chunk_size >= PAGE_SIZE * 2;
2169     + chunk_size /= 2) {
2170     + if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
2171     + if (!min || dev->host_mem_size >= min)
2172     + return 0;
2173     + nvme_free_host_mem(dev);
2174     + }
2175     + }
2176     +
2177     + return -ENOMEM;
2178     +}
2179     +
2180     +static int nvme_setup_host_mem(struct nvme_dev *dev)
2181     {
2182     u64 max = (u64)max_host_mem_size_mb * SZ_1M;
2183     u64 preferred = (u64)dev->ctrl.hmpre * 4096;
2184     u64 min = (u64)dev->ctrl.hmmin * 4096;
2185     u32 enable_bits = NVME_HOST_MEM_ENABLE;
2186     + int ret = 0;
2187    
2188     preferred = min(preferred, max);
2189     if (min > max) {
2190     @@ -1698,7 +1704,7 @@ static void nvme_setup_host_mem(struct nvme_dev *dev)
2191     "min host memory (%lld MiB) above limit (%d MiB).\n",
2192     min >> ilog2(SZ_1M), max_host_mem_size_mb);
2193     nvme_free_host_mem(dev);
2194     - return;
2195     + return 0;
2196     }
2197    
2198     /*
2199     @@ -1712,12 +1718,21 @@ static void nvme_setup_host_mem(struct nvme_dev *dev)
2200     }
2201    
2202     if (!dev->host_mem_descs) {
2203     - if (nvme_alloc_host_mem(dev, min, preferred))
2204     - return;
2205     + if (nvme_alloc_host_mem(dev, min, preferred)) {
2206     + dev_warn(dev->ctrl.device,
2207     + "failed to allocate host memory buffer.\n");
2208     + return 0; /* controller must work without HMB */
2209     + }
2210     +
2211     + dev_info(dev->ctrl.device,
2212     + "allocated %lld MiB host memory buffer.\n",
2213     + dev->host_mem_size >> ilog2(SZ_1M));
2214     }
2215    
2216     - if (nvme_set_host_mem(dev, enable_bits))
2217     + ret = nvme_set_host_mem(dev, enable_bits);
2218     + if (ret)
2219     nvme_free_host_mem(dev);
2220     + return ret;
2221     }
2222    
2223     static int nvme_setup_io_queues(struct nvme_dev *dev)
2224     @@ -2161,8 +2176,11 @@ static void nvme_reset_work(struct work_struct *work)
2225     "unable to allocate dma for dbbuf\n");
2226     }
2227    
2228     - if (dev->ctrl.hmpre)
2229     - nvme_setup_host_mem(dev);
2230     + if (dev->ctrl.hmpre) {
2231     + result = nvme_setup_host_mem(dev);
2232     + if (result < 0)
2233     + goto out;
2234     + }
2235    
2236     result = nvme_setup_io_queues(dev);
2237     if (result)
2238     diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
2239     index 2f3780b50723..6337bce27c36 100644
2240     --- a/drivers/pci/pci-sysfs.c
2241     +++ b/drivers/pci/pci-sysfs.c
2242     @@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev,
2243     const char *buf, size_t count)
2244     {
2245     struct pci_dev *pdev = to_pci_dev(dev);
2246     - char *driver_override, *old = pdev->driver_override, *cp;
2247     + char *driver_override, *old, *cp;
2248    
2249     /* We need to keep extra room for a newline */
2250     if (count >= (PAGE_SIZE - 1))
2251     @@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev,
2252     if (cp)
2253     *cp = '\0';
2254    
2255     + device_lock(dev);
2256     + old = pdev->driver_override;
2257     if (strlen(driver_override)) {
2258     pdev->driver_override = driver_override;
2259     } else {
2260     kfree(driver_override);
2261     pdev->driver_override = NULL;
2262     }
2263     + device_unlock(dev);
2264    
2265     kfree(old);
2266    
2267     @@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev,
2268     struct device_attribute *attr, char *buf)
2269     {
2270     struct pci_dev *pdev = to_pci_dev(dev);
2271     + ssize_t len;
2272    
2273     - return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
2274     + device_lock(dev);
2275     + len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
2276     + device_unlock(dev);
2277     + return len;
2278     }
2279     static DEVICE_ATTR_RW(driver_override);
2280    
2281     diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
2282     index 85de30f93a9c..56a8195096a2 100644
2283     --- a/drivers/platform/x86/fujitsu-laptop.c
2284     +++ b/drivers/platform/x86/fujitsu-laptop.c
2285     @@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b)
2286     {
2287     struct acpi_device *device = bl_get_data(b);
2288    
2289     - if (b->props.power == FB_BLANK_POWERDOWN)
2290     - call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
2291     - else
2292     - call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
2293     + if (fext) {
2294     + if (b->props.power == FB_BLANK_POWERDOWN)
2295     + call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
2296     + else
2297     + call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
2298     + }
2299    
2300     return set_lcd_level(device, b->props.brightness);
2301     }
2302     diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
2303     index b051d97af468..e431ad40b533 100644
2304     --- a/drivers/scsi/aacraid/aachba.c
2305     +++ b/drivers/scsi/aacraid/aachba.c
2306     @@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
2307     int status;
2308    
2309     dresp = (struct aac_mount *) fib_data(fibptr);
2310     - if (!(fibptr->dev->supplement_adapter_info.supported_options2 &
2311     - AAC_OPTION_VARIABLE_BLOCK_SIZE))
2312     + if (!aac_supports_2T(fibptr->dev)) {
2313     dresp->mnt[0].capacityhigh = 0;
2314     - if ((le32_to_cpu(dresp->status) != ST_OK) ||
2315     - (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
2316     - _aac_probe_container2(context, fibptr);
2317     - return;
2318     + if ((le32_to_cpu(dresp->status) == ST_OK) &&
2319     + (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
2320     + _aac_probe_container2(context, fibptr);
2321     + return;
2322     + }
2323     }
2324     scsicmd = (struct scsi_cmnd *) context;
2325    
2326     diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
2327     index ee2667e20e42..c9e2170fa22d 100644
2328     --- a/drivers/scsi/aacraid/aacraid.h
2329     +++ b/drivers/scsi/aacraid/aacraid.h
2330     @@ -2700,6 +2700,11 @@ static inline int aac_is_src(struct aac_dev *dev)
2331     return 0;
2332     }
2333    
2334     +static inline int aac_supports_2T(struct aac_dev *dev)
2335     +{
2336     + return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64);
2337     +}
2338     +
2339     char * get_container_type(unsigned type);
2340     extern int numacb;
2341     extern char aac_driver_version[];
2342     diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
2343     index 48c2b2b34b72..0c9361c87ec8 100644
2344     --- a/drivers/scsi/aacraid/src.c
2345     +++ b/drivers/scsi/aacraid/src.c
2346     @@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev)
2347     aac_set_intx_mode(dev);
2348    
2349     src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
2350     +
2351     + msleep(5000);
2352     }
2353    
2354     static void aac_send_hardware_soft_reset(struct aac_dev *dev)
2355     diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
2356     index 892fbd9800d9..bea06de60827 100644
2357     --- a/drivers/scsi/scsi_transport_fc.c
2358     +++ b/drivers/scsi/scsi_transport_fc.c
2359     @@ -3550,7 +3550,7 @@ fc_vport_sched_delete(struct work_struct *work)
2360     static enum blk_eh_timer_return
2361     fc_bsg_job_timeout(struct request *req)
2362     {
2363     - struct bsg_job *job = (void *) req->special;
2364     + struct bsg_job *job = blk_mq_rq_to_pdu(req);
2365     struct Scsi_Host *shost = fc_bsg_to_shost(job);
2366     struct fc_rport *rport = fc_bsg_to_rport(job);
2367     struct fc_internal *i = to_fc_internal(shost->transportt);
2368     diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
2369     index a424eaeafeb0..c55c6f3147ae 100644
2370     --- a/drivers/scsi/scsi_transport_iscsi.c
2371     +++ b/drivers/scsi/scsi_transport_iscsi.c
2372     @@ -3689,7 +3689,7 @@ iscsi_if_rx(struct sk_buff *skb)
2373     uint32_t group;
2374    
2375     nlh = nlmsg_hdr(skb);
2376     - if (nlh->nlmsg_len < sizeof(*nlh) ||
2377     + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
2378     skb->len < nlh->nlmsg_len) {
2379     break;
2380     }
2381     diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
2382     index b55fdac9c9f5..e4c91d748732 100644
2383     --- a/drivers/video/fbdev/aty/atyfb_base.c
2384     +++ b/drivers/video/fbdev/aty/atyfb_base.c
2385     @@ -1855,7 +1855,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
2386     #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
2387     case ATYIO_CLKR:
2388     if (M64_HAS(INTEGRATED)) {
2389     - struct atyclk clk;
2390     + struct atyclk clk = { 0 };
2391     union aty_pll *pll = &par->pll;
2392     u32 dsp_config = pll->ct.dsp_config;
2393     u32 dsp_on_off = pll->ct.dsp_on_off;
2394     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2395     index 24bcd5cd9cf2..df77ba89acbe 100644
2396     --- a/fs/btrfs/inode.c
2397     +++ b/fs/btrfs/inode.c
2398     @@ -135,6 +135,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
2399     const u64 offset,
2400     const u64 bytes)
2401     {
2402     + unsigned long index = offset >> PAGE_SHIFT;
2403     + unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
2404     + struct page *page;
2405     +
2406     + while (index <= end_index) {
2407     + page = find_get_page(inode->i_mapping, index);
2408     + index++;
2409     + if (!page)
2410     + continue;
2411     + ClearPagePrivate2(page);
2412     + put_page(page);
2413     + }
2414     return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
2415     bytes - PAGE_SIZE, false);
2416     }
2417     @@ -8297,6 +8309,7 @@ static void __endio_write_update_ordered(struct inode *inode,
2418     btrfs_work_func_t func;
2419     u64 ordered_offset = offset;
2420     u64 ordered_bytes = bytes;
2421     + u64 last_offset;
2422     int ret;
2423    
2424     if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
2425     @@ -8308,6 +8321,7 @@ static void __endio_write_update_ordered(struct inode *inode,
2426     }
2427    
2428     again:
2429     + last_offset = ordered_offset;
2430     ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
2431     &ordered_offset,
2432     ordered_bytes,
2433     @@ -8318,6 +8332,12 @@ static void __endio_write_update_ordered(struct inode *inode,
2434     btrfs_init_work(&ordered->work, func, finish_ordered_fn, NULL, NULL);
2435     btrfs_queue_work(wq, &ordered->work);
2436     out_test:
2437     + /*
2438     + * If btrfs_dec_test_ordered_pending does not find any ordered extent
2439     + * in the range, we can exit.
2440     + */
2441     + if (ordered_offset == last_offset)
2442     + return;
2443     /*
2444     * our bio might span multiple ordered extents. If we haven't
2445     * completed the accounting for the whole dio, go back and try again
2446     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2447     index fa1b78cf25f6..9afd08539519 100644
2448     --- a/fs/btrfs/ioctl.c
2449     +++ b/fs/btrfs/ioctl.c
2450     @@ -3063,7 +3063,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
2451     out:
2452     if (ret)
2453     btrfs_cmp_data_free(cmp);
2454     - return 0;
2455     + return ret;
2456     }
2457    
2458     static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
2459     @@ -4072,6 +4072,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
2460     ret = PTR_ERR(new_root);
2461     goto out;
2462     }
2463     + if (!is_fstree(new_root->objectid)) {
2464     + ret = -ENOENT;
2465     + goto out;
2466     + }
2467    
2468     path = btrfs_alloc_path();
2469     if (!path) {
2470     diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
2471     index 65661d1aae4e..6445de8e9ece 100644
2472     --- a/fs/btrfs/relocation.c
2473     +++ b/fs/btrfs/relocation.c
2474     @@ -2393,11 +2393,11 @@ void free_reloc_roots(struct list_head *list)
2475     while (!list_empty(list)) {
2476     reloc_root = list_entry(list->next, struct btrfs_root,
2477     root_list);
2478     + __del_reloc_root(reloc_root);
2479     free_extent_buffer(reloc_root->node);
2480     free_extent_buffer(reloc_root->commit_root);
2481     reloc_root->node = NULL;
2482     reloc_root->commit_root = NULL;
2483     - __del_reloc_root(reloc_root);
2484     }
2485     }
2486    
2487     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2488     index 180b3356ff86..a92bdb89bde3 100644
2489     --- a/fs/cifs/cifsfs.c
2490     +++ b/fs/cifs/cifsfs.c
2491     @@ -1447,7 +1447,7 @@ exit_cifs(void)
2492     exit_cifs_idmap();
2493     #endif
2494     #ifdef CONFIG_CIFS_UPCALL
2495     - unregister_key_type(&cifs_spnego_key_type);
2496     + exit_cifs_spnego();
2497     #endif
2498     cifs_destroy_request_bufs();
2499     cifs_destroy_mids();
2500     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2501     index 221693fe49ec..03b6eae0ae28 100644
2502     --- a/fs/cifs/cifsglob.h
2503     +++ b/fs/cifs/cifsglob.h
2504     @@ -188,6 +188,8 @@ enum smb_version {
2505     #ifdef CONFIG_CIFS_SMB311
2506     Smb_311,
2507     #endif /* SMB311 */
2508     + Smb_3any,
2509     + Smb_default,
2510     Smb_version_err
2511     };
2512    
2513     @@ -1701,6 +1703,10 @@ extern struct smb_version_values smb20_values;
2514     #define SMB21_VERSION_STRING "2.1"
2515     extern struct smb_version_operations smb21_operations;
2516     extern struct smb_version_values smb21_values;
2517     +#define SMBDEFAULT_VERSION_STRING "default"
2518     +extern struct smb_version_values smbdefault_values;
2519     +#define SMB3ANY_VERSION_STRING "3"
2520     +extern struct smb_version_values smb3any_values;
2521     #define SMB30_VERSION_STRING "3.0"
2522     extern struct smb_version_operations smb30_operations;
2523     extern struct smb_version_values smb30_values;
2524     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2525     index 83a8f52cd879..9e12679ffef5 100644
2526     --- a/fs/cifs/connect.c
2527     +++ b/fs/cifs/connect.c
2528     @@ -301,6 +301,8 @@ static const match_table_t cifs_smb_version_tokens = {
2529     { Smb_311, SMB311_VERSION_STRING },
2530     { Smb_311, ALT_SMB311_VERSION_STRING },
2531     #endif /* SMB311 */
2532     + { Smb_3any, SMB3ANY_VERSION_STRING },
2533     + { Smb_default, SMBDEFAULT_VERSION_STRING },
2534     { Smb_version_err, NULL }
2535     };
2536    
2537     @@ -1147,6 +1149,14 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol)
2538     vol->vals = &smb311_values;
2539     break;
2540     #endif /* SMB311 */
2541     + case Smb_3any:
2542     + vol->ops = &smb30_operations; /* currently identical with 3.0 */
2543     + vol->vals = &smb3any_values;
2544     + break;
2545     + case Smb_default:
2546     + vol->ops = &smb30_operations; /* currently identical with 3.0 */
2547     + vol->vals = &smbdefault_values;
2548     + break;
2549     default:
2550     cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value);
2551     return 1;
2552     @@ -1273,9 +1283,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
2553    
2554     vol->actimeo = CIFS_DEF_ACTIMEO;
2555    
2556     - /* FIXME: add autonegotiation for SMB3 or later rather than just SMB3 */
2557     - vol->ops = &smb30_operations; /* both secure and accepted widely */
2558     - vol->vals = &smb30_values;
2559     + /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
2560     + vol->ops = &smb30_operations;
2561     + vol->vals = &smbdefault_values;
2562    
2563     vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
2564    
2565     @@ -1987,11 +1997,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
2566    
2567     if (got_version == false)
2568     pr_warn("No dialect specified on mount. Default has changed to "
2569     - "a more secure dialect, SMB3 (vers=3.0), from CIFS "
2570     + "a more secure dialect, SMB2.1 or later (e.g. SMB3), from CIFS "
2571     "(SMB1). To use the less secure SMB1 dialect to access "
2572     - "old servers which do not support SMB3 specify vers=1.0"
2573     - " on mount. For somewhat newer servers such as Windows "
2574     - "7 try vers=2.1.\n");
2575     + "old servers which do not support SMB3 (or SMB2.1) specify vers=1.0"
2576     + " on mount.\n");
2577    
2578     kfree(mountdata_copy);
2579     return 0;
2580     @@ -2132,6 +2141,7 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
2581     if (vol->nosharesock)
2582     return 0;
2583    
2584     + /* BB update this for smb3any and default case */
2585     if ((server->vals != vol->vals) || (server->ops != vol->ops))
2586     return 0;
2587    
2588     @@ -4143,6 +4153,14 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
2589     cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
2590     server->sec_mode, server->capabilities, server->timeAdj);
2591    
2592     + if (ses->auth_key.response) {
2593     + cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
2594     + ses->auth_key.response);
2595     + kfree(ses->auth_key.response);
2596     + ses->auth_key.response = NULL;
2597     + ses->auth_key.len = 0;
2598     + }
2599     +
2600     if (server->ops->sess_setup)
2601     rc = server->ops->sess_setup(xid, ses, nls_info);
2602    
2603     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2604     index bc09df6b473a..c3bf300e7c47 100644
2605     --- a/fs/cifs/file.c
2606     +++ b/fs/cifs/file.c
2607     @@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
2608     if (backup_cred(cifs_sb))
2609     create_options |= CREATE_OPEN_BACKUP_INTENT;
2610    
2611     + /* O_SYNC also has bit for O_DSYNC so following check picks up either */
2612     + if (f_flags & O_SYNC)
2613     + create_options |= CREATE_WRITE_THROUGH;
2614     +
2615     + if (f_flags & O_DIRECT)
2616     + create_options |= CREATE_NO_BUFFER;
2617     +
2618     oparms.tcon = tcon;
2619     oparms.cifs_sb = cifs_sb;
2620     oparms.desired_access = desired_access;
2621     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2622     index a8693632235f..7c732cb44164 100644
2623     --- a/fs/cifs/inode.c
2624     +++ b/fs/cifs/inode.c
2625     @@ -234,6 +234,8 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
2626     fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
2627     fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
2628     fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
2629     + /* old POSIX extensions don't get create time */
2630     +
2631     fattr->cf_mode = le64_to_cpu(info->Permissions);
2632    
2633     /*
2634     @@ -2024,6 +2026,19 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
2635     stat->blksize = CIFS_MAX_MSGSIZE;
2636     stat->ino = CIFS_I(inode)->uniqueid;
2637    
2638     + /* old CIFS Unix Extensions doesn't return create time */
2639     + if (CIFS_I(inode)->createtime) {
2640     + stat->result_mask |= STATX_BTIME;
2641     + stat->btime =
2642     + cifs_NTtimeToUnix(cpu_to_le64(CIFS_I(inode)->createtime));
2643     + }
2644     +
2645     + stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED);
2646     + if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_COMPRESSED)
2647     + stat->attributes |= STATX_ATTR_COMPRESSED;
2648     + if (CIFS_I(inode)->cifsAttrs & FILE_ATTRIBUTE_ENCRYPTED)
2649     + stat->attributes |= STATX_ATTR_ENCRYPTED;
2650     +
2651     /*
2652     * If on a multiuser mount without unix extensions or cifsacl being
2653     * enabled, and the admin hasn't overridden them, set the ownership
2654     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2655     index cfacf2c97e94..a6c94812cfa3 100644
2656     --- a/fs/cifs/smb2ops.c
2657     +++ b/fs/cifs/smb2ops.c
2658     @@ -2906,6 +2906,46 @@ struct smb_version_values smb21_values = {
2659     .create_lease_size = sizeof(struct create_lease),
2660     };
2661    
2662     +struct smb_version_values smb3any_values = {
2663     + .version_string = SMB3ANY_VERSION_STRING,
2664     + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
2665     + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2666     + .large_lock_type = 0,
2667     + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2668     + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2669     + .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
2670     + .header_size = sizeof(struct smb2_hdr),
2671     + .max_header_size = MAX_SMB2_HDR_SIZE,
2672     + .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
2673     + .lock_cmd = SMB2_LOCK,
2674     + .cap_unix = 0,
2675     + .cap_nt_find = SMB2_NT_FIND,
2676     + .cap_large_files = SMB2_LARGE_FILES,
2677     + .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
2678     + .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
2679     + .create_lease_size = sizeof(struct create_lease_v2),
2680     +};
2681     +
2682     +struct smb_version_values smbdefault_values = {
2683     + .version_string = SMBDEFAULT_VERSION_STRING,
2684     + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
2685     + .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
2686     + .large_lock_type = 0,
2687     + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
2688     + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
2689     + .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
2690     + .header_size = sizeof(struct smb2_hdr),
2691     + .max_header_size = MAX_SMB2_HDR_SIZE,
2692     + .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
2693     + .lock_cmd = SMB2_LOCK,
2694     + .cap_unix = 0,
2695     + .cap_nt_find = SMB2_NT_FIND,
2696     + .cap_large_files = SMB2_LARGE_FILES,
2697     + .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
2698     + .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
2699     + .create_lease_size = sizeof(struct create_lease_v2),
2700     +};
2701     +
2702     struct smb_version_values smb30_values = {
2703     .version_string = SMB30_VERSION_STRING,
2704     .protocol_id = SMB30_PROT_ID,
2705     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2706     index 7aa67206f6da..ddc633ef6064 100644
2707     --- a/fs/cifs/smb2pdu.c
2708     +++ b/fs/cifs/smb2pdu.c
2709     @@ -427,7 +427,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req)
2710     build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
2711     req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
2712     req->NegotiateContextCount = cpu_to_le16(2);
2713     - inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2
2714     + inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
2715     + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
2716     }
2717     #else
2718     @@ -479,10 +479,25 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
2719    
2720     req->hdr.sync_hdr.SessionId = 0;
2721    
2722     - req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
2723     -
2724     - req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
2725     - inc_rfc1001_len(req, 2);
2726     + if (strcmp(ses->server->vals->version_string,
2727     + SMB3ANY_VERSION_STRING) == 0) {
2728     + req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
2729     + req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
2730     + req->DialectCount = cpu_to_le16(2);
2731     + inc_rfc1001_len(req, 4);
2732     + } else if (strcmp(ses->server->vals->version_string,
2733     + SMBDEFAULT_VERSION_STRING) == 0) {
2734     + req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
2735     + req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
2736     + req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
2737     + req->DialectCount = cpu_to_le16(3);
2738     + inc_rfc1001_len(req, 6);
2739     + } else {
2740     + /* otherwise send specific dialect */
2741     + req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
2742     + req->DialectCount = cpu_to_le16(1);
2743     + inc_rfc1001_len(req, 2);
2744     + }
2745    
2746     /* only one of SMB2 signing flags may be set in SMB2 request */
2747     if (ses->sign)
2748     @@ -516,16 +531,43 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
2749     */
2750     if (rc == -EOPNOTSUPP) {
2751     cifs_dbg(VFS, "Dialect not supported by server. Consider "
2752     - "specifying vers=1.0 or vers=2.1 on mount for accessing"
2753     + "specifying vers=1.0 or vers=2.0 on mount for accessing"
2754     " older servers\n");
2755     goto neg_exit;
2756     } else if (rc != 0)
2757     goto neg_exit;
2758    
2759     + if (strcmp(ses->server->vals->version_string,
2760     + SMB3ANY_VERSION_STRING) == 0) {
2761     + if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
2762     + cifs_dbg(VFS,
2763     + "SMB2 dialect returned but not requested\n");
2764     + return -EIO;
2765     + } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
2766     + cifs_dbg(VFS,
2767     + "SMB2.1 dialect returned but not requested\n");
2768     + return -EIO;
2769     + }
2770     + } else if (strcmp(ses->server->vals->version_string,
2771     + SMBDEFAULT_VERSION_STRING) == 0) {
2772     + if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
2773     + cifs_dbg(VFS,
2774     + "SMB2 dialect returned but not requested\n");
2775     + return -EIO;
2776     + } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
2777     + /* ops set to 3.0 by default for default so update */
2778     + ses->server->ops = &smb21_operations;
2779     + }
2780     + } else if (le16_to_cpu(rsp->DialectRevision) !=
2781     + ses->server->vals->protocol_id) {
2782     + /* if requested single dialect ensure returned dialect matched */
2783     + cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
2784     + le16_to_cpu(rsp->DialectRevision));
2785     + return -EIO;
2786     + }
2787     +
2788     cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
2789    
2790     - /* BB we may eventually want to match the negotiated vs. requested
2791     - dialect, even though we are only requesting one at a time */
2792     if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
2793     cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
2794     else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
2795     @@ -546,6 +588,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
2796     }
2797     server->dialect = le16_to_cpu(rsp->DialectRevision);
2798    
2799     + /* BB: add check that dialect was valid given dialect(s) we asked for */
2800     +
2801     /* SMB2 only has an extended negflavor */
2802     server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
2803     /* set it to the maximum buffer size value we can send with 1 credit */
2804     @@ -594,20 +638,28 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
2805     struct validate_negotiate_info_req vneg_inbuf;
2806     struct validate_negotiate_info_rsp *pneg_rsp;
2807     u32 rsplen;
2808     + u32 inbuflen; /* max of 4 dialects */
2809    
2810     cifs_dbg(FYI, "validate negotiate\n");
2811    
2812     /*
2813     * validation ioctl must be signed, so no point sending this if we
2814     - * can not sign it. We could eventually change this to selectively
2815     + * can not sign it (ie are not known user). Even if signing is not
2816     + * required (enabled but not negotiated), in those cases we selectively
2817     * sign just this, the first and only signed request on a connection.
2818     - * This is good enough for now since a user who wants better security
2819     - * would also enable signing on the mount. Having validation of
2820     - * negotiate info for signed connections helps reduce attack vectors
2821     + * Having validation of negotiate info helps reduce attack vectors.
2822     */
2823     - if (tcon->ses->server->sign == false)
2824     + if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
2825     return 0; /* validation requires signing */
2826    
2827     + if (tcon->ses->user_name == NULL) {
2828     + cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
2829     + return 0; /* validation requires signing */
2830     + }
2831     +
2832     + if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
2833     + cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
2834     +
2835     vneg_inbuf.Capabilities =
2836     cpu_to_le32(tcon->ses->server->vals->req_capabilities);
2837     memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
2838     @@ -622,9 +674,30 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
2839     else
2840     vneg_inbuf.SecurityMode = 0;
2841    
2842     - vneg_inbuf.DialectCount = cpu_to_le16(1);
2843     - vneg_inbuf.Dialects[0] =
2844     - cpu_to_le16(tcon->ses->server->vals->protocol_id);
2845     +
2846     + if (strcmp(tcon->ses->server->vals->version_string,
2847     + SMB3ANY_VERSION_STRING) == 0) {
2848     + vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
2849     + vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
2850     + vneg_inbuf.DialectCount = cpu_to_le16(2);
2851     + /* structure is big enough for 3 dialects, sending only 2 */
2852     + inbuflen = sizeof(struct validate_negotiate_info_req) - 2;
2853     + } else if (strcmp(tcon->ses->server->vals->version_string,
2854     + SMBDEFAULT_VERSION_STRING) == 0) {
2855     + vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
2856     + vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
2857     + vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
2858     + vneg_inbuf.DialectCount = cpu_to_le16(3);
2859     + /* structure is big enough for 3 dialects */
2860     + inbuflen = sizeof(struct validate_negotiate_info_req);
2861     + } else {
2862     + /* otherwise specific dialect was requested */
2863     + vneg_inbuf.Dialects[0] =
2864     + cpu_to_le16(tcon->ses->server->vals->protocol_id);
2865     + vneg_inbuf.DialectCount = cpu_to_le16(1);
2866     + /* structure is big enough for 3 dialects, sending only 1 */
2867     + inbuflen = sizeof(struct validate_negotiate_info_req) - 4;
2868     + }
2869    
2870     rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2871     FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
2872     @@ -1098,6 +1171,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
2873     while (sess_data->func)
2874     sess_data->func(sess_data);
2875    
2876     + if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
2877     + cifs_dbg(VFS, "signing requested but authenticated as guest\n");
2878     rc = sess_data->result;
2879     out:
2880     kfree(sess_data);
2881     @@ -1622,7 +1697,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2882     struct cifs_tcon *tcon = oparms->tcon;
2883     struct cifs_ses *ses = tcon->ses;
2884     struct kvec iov[4];
2885     - struct kvec rsp_iov;
2886     + struct kvec rsp_iov = {NULL, 0};
2887     int resp_buftype;
2888     int uni_path_len;
2889     __le16 *copy_path = NULL;
2890     @@ -1751,7 +1826,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2891    
2892     if (rc != 0) {
2893     cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
2894     - if (err_buf)
2895     + if (err_buf && rsp)
2896     *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
2897     GFP_KERNEL);
2898     goto creat_exit;
2899     diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
2900     index 2826882c81d1..46b6cbce9675 100644
2901     --- a/fs/cifs/smb2pdu.h
2902     +++ b/fs/cifs/smb2pdu.h
2903     @@ -716,7 +716,7 @@ struct validate_negotiate_info_req {
2904     __u8 Guid[SMB2_CLIENT_GUID_SIZE];
2905     __le16 SecurityMode;
2906     __le16 DialectCount;
2907     - __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
2908     + __le16 Dialects[3]; /* BB expand this if autonegotiate > 3 dialects */
2909     } __packed;
2910    
2911     struct validate_negotiate_info_rsp {
2912     diff --git a/fs/dax.c b/fs/dax.c
2913     index ab925dc6647a..ede5bc978db3 100644
2914     --- a/fs/dax.c
2915     +++ b/fs/dax.c
2916     @@ -786,7 +786,7 @@ static int dax_writeback_one(struct block_device *bdev,
2917     }
2918    
2919     dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
2920     - dax_flush(dax_dev, pgoff, kaddr, size);
2921     + dax_flush(dax_dev, kaddr, size);
2922     /*
2923     * After we have flushed the cache, we can clear the dirty tag. There
2924     * cannot be new dirty data in the pfn after the flush has completed as
2925     @@ -981,7 +981,7 @@ int __dax_zero_page_range(struct block_device *bdev,
2926     return rc;
2927     }
2928     memset(kaddr + offset, 0, size);
2929     - dax_flush(dax_dev, pgoff, kaddr + offset, size);
2930     + dax_flush(dax_dev, kaddr + offset, size);
2931     dax_read_unlock(id);
2932     }
2933     return 0;
2934     diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
2935     index c38ab6c81898..410714c9eff7 100644
2936     --- a/fs/gfs2/glock.c
2937     +++ b/fs/gfs2/glock.c
2938     @@ -1863,13 +1863,9 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2939     {
2940     struct gfs2_glock_iter *gi = seq->private;
2941     loff_t n = *pos;
2942     - int ret;
2943     -
2944     - if (gi->last_pos <= *pos)
2945     - n = (*pos - gi->last_pos);
2946    
2947     - ret = rhashtable_walk_start(&gi->hti);
2948     - if (ret)
2949     + rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2950     + if (rhashtable_walk_start(&gi->hti) != 0)
2951     return NULL;
2952    
2953     do {
2954     @@ -1877,6 +1873,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
2955     } while (gi->gl && n--);
2956    
2957     gi->last_pos = *pos;
2958     +
2959     return gi->gl;
2960     }
2961    
2962     @@ -1888,6 +1885,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
2963     (*pos)++;
2964     gi->last_pos = *pos;
2965     gfs2_glock_iter_next(gi);
2966     +
2967     return gi->gl;
2968     }
2969    
2970     @@ -1897,6 +1895,7 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2971    
2972     gi->gl = NULL;
2973     rhashtable_walk_stop(&gi->hti);
2974     + rhashtable_walk_exit(&gi->hti);
2975     }
2976    
2977     static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2978     @@ -1959,12 +1958,10 @@ static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2979     struct gfs2_glock_iter *gi = seq->private;
2980    
2981     gi->sdp = inode->i_private;
2982     - gi->last_pos = 0;
2983     seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2984     if (seq->buf)
2985     seq->size = GFS2_SEQ_GOODSIZE;
2986     gi->gl = NULL;
2987     - rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2988     }
2989     return ret;
2990     }
2991     @@ -1980,7 +1977,6 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
2992     struct gfs2_glock_iter *gi = seq->private;
2993    
2994     gi->gl = NULL;
2995     - rhashtable_walk_exit(&gi->hti);
2996     return seq_release_private(inode, file);
2997     }
2998    
2999     diff --git a/fs/proc/array.c b/fs/proc/array.c
3000     index 88c355574aa0..525157ca25cb 100644
3001     --- a/fs/proc/array.c
3002     +++ b/fs/proc/array.c
3003     @@ -62,6 +62,7 @@
3004     #include <linux/mman.h>
3005     #include <linux/sched/mm.h>
3006     #include <linux/sched/numa_balancing.h>
3007     +#include <linux/sched/task_stack.h>
3008     #include <linux/sched/task.h>
3009     #include <linux/sched/cputime.h>
3010     #include <linux/proc_fs.h>
3011     @@ -421,7 +422,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
3012     * esp and eip are intentionally zeroed out. There is no
3013     * non-racy way to read them without freezing the task.
3014     * Programs that need reliable values can use ptrace(2).
3015     + *
3016     + * The only exception is if the task is core dumping because
3017     + * a program is not able to use ptrace(2) in that case. It is
3018     + * safe because the task has stopped executing permanently.
3019     */
3020     + if (permitted && (task->flags & PF_DUMPCORE)) {
3021     + eip = KSTK_EIP(task);
3022     + esp = KSTK_ESP(task);
3023     + }
3024     }
3025    
3026     get_task_comm(tcomm, task);
3027     diff --git a/fs/read_write.c b/fs/read_write.c
3028     index 0cc7033aa413..52872c1e57dd 100644
3029     --- a/fs/read_write.c
3030     +++ b/fs/read_write.c
3031     @@ -112,7 +112,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
3032     * In the generic case the entire file is data, so as long as
3033     * offset isn't at the end of the file then the offset is data.
3034     */
3035     - if (offset >= eof)
3036     + if ((unsigned long long)offset >= eof)
3037     return -ENXIO;
3038     break;
3039     case SEEK_HOLE:
3040     @@ -120,7 +120,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
3041     * There is a virtual hole at the end of the file, so as long as
3042     * offset isn't i_size or larger, return i_size.
3043     */
3044     - if (offset >= eof)
3045     + if ((unsigned long long)offset >= eof)
3046     return -ENXIO;
3047     offset = eof;
3048     break;
3049     diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
3050     index 5049e8ab6e30..aa75389be8cf 100644
3051     --- a/fs/xfs/xfs_ioctl.c
3052     +++ b/fs/xfs/xfs_ioctl.c
3053     @@ -1088,6 +1088,7 @@ xfs_ioctl_setattr_dax_invalidate(
3054     int *join_flags)
3055     {
3056     struct inode *inode = VFS_I(ip);
3057     + struct super_block *sb = inode->i_sb;
3058     int error;
3059    
3060     *join_flags = 0;
3061     @@ -1100,7 +1101,7 @@ xfs_ioctl_setattr_dax_invalidate(
3062     if (fa->fsx_xflags & FS_XFLAG_DAX) {
3063     if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
3064     return -EINVAL;
3065     - if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE)
3066     + if (bdev_dax_supported(sb, sb->s_blocksize) < 0)
3067     return -EINVAL;
3068     }
3069    
3070     diff --git a/include/linux/dax.h b/include/linux/dax.h
3071     index df97b7af7e2c..0d8f35f6c53d 100644
3072     --- a/include/linux/dax.h
3073     +++ b/include/linux/dax.h
3074     @@ -19,8 +19,6 @@ struct dax_operations {
3075     /* copy_from_iter: required operation for fs-dax direct-i/o */
3076     size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
3077     struct iov_iter *);
3078     - /* flush: optional driver-specific cache management after writes */
3079     - void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
3080     };
3081    
3082     extern struct attribute_group dax_attribute_group;
3083     @@ -84,8 +82,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
3084     void **kaddr, pfn_t *pfn);
3085     size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
3086     size_t bytes, struct iov_iter *i);
3087     -void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
3088     - size_t size);
3089     +void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
3090     void dax_write_cache(struct dax_device *dax_dev, bool wc);
3091     bool dax_write_cache_enabled(struct dax_device *dax_dev);
3092    
3093     diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
3094     index 4f2b3b2076c4..17c378ecbbdd 100644
3095     --- a/include/linux/device-mapper.h
3096     +++ b/include/linux/device-mapper.h
3097     @@ -134,8 +134,6 @@ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
3098     long nr_pages, void **kaddr, pfn_t *pfn);
3099     typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
3100     void *addr, size_t bytes, struct iov_iter *i);
3101     -typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
3102     - size_t size);
3103     #define PAGE_SECTORS (PAGE_SIZE / 512)
3104    
3105     void dm_error(const char *message);
3106     @@ -186,7 +184,6 @@ struct target_type {
3107     dm_io_hints_fn io_hints;
3108     dm_dax_direct_access_fn direct_access;
3109     dm_dax_copy_from_iter_fn dax_copy_from_iter;
3110     - dm_dax_flush_fn dax_flush;
3111    
3112     /* For internal device-mapper use. */
3113     struct list_head list;
3114     diff --git a/include/linux/key.h b/include/linux/key.h
3115     index 044114185120..e315e16b6ff8 100644
3116     --- a/include/linux/key.h
3117     +++ b/include/linux/key.h
3118     @@ -187,6 +187,7 @@ struct key {
3119     #define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
3120     #define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
3121     #define KEY_FLAG_KEEP 10 /* set if key should not be removed */
3122     +#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
3123    
3124     /* the key type and key description string
3125     * - the desc is used to match a key against search criteria
3126     @@ -243,6 +244,7 @@ extern struct key *key_alloc(struct key_type *type,
3127     #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
3128     #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
3129     #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
3130     +#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
3131    
3132     extern void key_revoke(struct key *key);
3133     extern void key_invalidate(struct key *key);
3134     diff --git a/include/net/mac80211.h b/include/net/mac80211.h
3135     index f8149ca192b4..885690fa39c8 100644
3136     --- a/include/net/mac80211.h
3137     +++ b/include/net/mac80211.h
3138     @@ -919,21 +919,10 @@ struct ieee80211_tx_info {
3139     unsigned long jiffies;
3140     };
3141     /* NB: vif can be NULL for injected frames */
3142     - union {
3143     - /* NB: vif can be NULL for injected frames */
3144     - struct ieee80211_vif *vif;
3145     -
3146     - /* When packets are enqueued on txq it's easy
3147     - * to re-construct the vif pointer. There's no
3148     - * more space in tx_info so it can be used to
3149     - * store the necessary enqueue time for packet
3150     - * sojourn time computation.
3151     - */
3152     - codel_time_t enqueue_time;
3153     - };
3154     + struct ieee80211_vif *vif;
3155     struct ieee80211_key_conf *hw_key;
3156     u32 flags;
3157     - /* 4 bytes free */
3158     + codel_time_t enqueue_time;
3159     } control;
3160     struct {
3161     u64 cookie;
3162     diff --git a/kernel/exit.c b/kernel/exit.c
3163     index c5548faa9f37..6d31fc5ba50d 100644
3164     --- a/kernel/exit.c
3165     +++ b/kernel/exit.c
3166     @@ -1601,12 +1601,10 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
3167     struct waitid_info info = {.status = 0};
3168     long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
3169     int signo = 0;
3170     +
3171     if (err > 0) {
3172     signo = SIGCHLD;
3173     err = 0;
3174     - }
3175     -
3176     - if (!err) {
3177     if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
3178     return -EFAULT;
3179     }
3180     @@ -1724,16 +1722,15 @@ COMPAT_SYSCALL_DEFINE5(waitid,
3181     if (err > 0) {
3182     signo = SIGCHLD;
3183     err = 0;
3184     - }
3185     -
3186     - if (!err && uru) {
3187     - /* kernel_waitid() overwrites everything in ru */
3188     - if (COMPAT_USE_64BIT_TIME)
3189     - err = copy_to_user(uru, &ru, sizeof(ru));
3190     - else
3191     - err = put_compat_rusage(&ru, uru);
3192     - if (err)
3193     - return -EFAULT;
3194     + if (uru) {
3195     + /* kernel_waitid() overwrites everything in ru */
3196     + if (COMPAT_USE_64BIT_TIME)
3197     + err = copy_to_user(uru, &ru, sizeof(ru));
3198     + else
3199     + err = put_compat_rusage(&ru, uru);
3200     + if (err)
3201     + return -EFAULT;
3202     + }
3203     }
3204    
3205     if (!infop)
3206     diff --git a/kernel/extable.c b/kernel/extable.c
3207     index 38c2412401a1..9aa1cc41ecf7 100644
3208     --- a/kernel/extable.c
3209     +++ b/kernel/extable.c
3210     @@ -102,15 +102,7 @@ int core_kernel_data(unsigned long addr)
3211    
3212     int __kernel_text_address(unsigned long addr)
3213     {
3214     - if (core_kernel_text(addr))
3215     - return 1;
3216     - if (is_module_text_address(addr))
3217     - return 1;
3218     - if (is_ftrace_trampoline(addr))
3219     - return 1;
3220     - if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
3221     - return 1;
3222     - if (is_bpf_text_address(addr))
3223     + if (kernel_text_address(addr))
3224     return 1;
3225     /*
3226     * There might be init symbols in saved stacktraces.
3227     @@ -127,17 +119,42 @@ int __kernel_text_address(unsigned long addr)
3228    
3229     int kernel_text_address(unsigned long addr)
3230     {
3231     + bool no_rcu;
3232     + int ret = 1;
3233     +
3234     if (core_kernel_text(addr))
3235     return 1;
3236     +
3237     + /*
3238     + * If a stack dump happens while RCU is not watching, then
3239     + * RCU needs to be notified that it requires to start
3240     + * watching again. This can happen either by tracing that
3241     + * triggers a stack trace, or a WARN() that happens during
3242     + * coming back from idle, or cpu on or offlining.
3243     + *
3244     + * is_module_text_address() as well as the kprobe slots
3245     + * and is_bpf_text_address() require RCU to be watching.
3246     + */
3247     + no_rcu = !rcu_is_watching();
3248     +
3249     + /* Treat this like an NMI as it can happen anywhere */
3250     + if (no_rcu)
3251     + rcu_nmi_enter();
3252     +
3253     if (is_module_text_address(addr))
3254     - return 1;
3255     + goto out;
3256     if (is_ftrace_trampoline(addr))
3257     - return 1;
3258     + goto out;
3259     if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
3260     - return 1;
3261     + goto out;
3262     if (is_bpf_text_address(addr))
3263     - return 1;
3264     - return 0;
3265     + goto out;
3266     + ret = 0;
3267     +out:
3268     + if (no_rcu)
3269     + rcu_nmi_exit();
3270     +
3271     + return ret;
3272     }
3273    
3274     /*
3275     diff --git a/kernel/futex.c b/kernel/futex.c
3276     index f50b434756c1..bf57ab12ffe8 100644
3277     --- a/kernel/futex.c
3278     +++ b/kernel/futex.c
3279     @@ -821,8 +821,6 @@ static void get_pi_state(struct futex_pi_state *pi_state)
3280     /*
3281     * Drops a reference to the pi_state object and frees or caches it
3282     * when the last reference is gone.
3283     - *
3284     - * Must be called with the hb lock held.
3285     */
3286     static void put_pi_state(struct futex_pi_state *pi_state)
3287     {
3288     @@ -837,16 +835,22 @@ static void put_pi_state(struct futex_pi_state *pi_state)
3289     * and has cleaned up the pi_state already
3290     */
3291     if (pi_state->owner) {
3292     - raw_spin_lock_irq(&pi_state->owner->pi_lock);
3293     - list_del_init(&pi_state->list);
3294     - raw_spin_unlock_irq(&pi_state->owner->pi_lock);
3295     + struct task_struct *owner;
3296    
3297     - rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
3298     + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3299     + owner = pi_state->owner;
3300     + if (owner) {
3301     + raw_spin_lock(&owner->pi_lock);
3302     + list_del_init(&pi_state->list);
3303     + raw_spin_unlock(&owner->pi_lock);
3304     + }
3305     + rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
3306     + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
3307     }
3308    
3309     - if (current->pi_state_cache)
3310     + if (current->pi_state_cache) {
3311     kfree(pi_state);
3312     - else {
3313     + } else {
3314     /*
3315     * pi_state->list is already empty.
3316     * clear pi_state->owner.
3317     @@ -905,13 +909,14 @@ void exit_pi_state_list(struct task_struct *curr)
3318     raw_spin_unlock_irq(&curr->pi_lock);
3319    
3320     spin_lock(&hb->lock);
3321     -
3322     - raw_spin_lock_irq(&curr->pi_lock);
3323     + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3324     + raw_spin_lock(&curr->pi_lock);
3325     /*
3326     * We dropped the pi-lock, so re-check whether this
3327     * task still owns the PI-state:
3328     */
3329     if (head->next != next) {
3330     + raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
3331     spin_unlock(&hb->lock);
3332     continue;
3333     }
3334     @@ -920,9 +925,10 @@ void exit_pi_state_list(struct task_struct *curr)
3335     WARN_ON(list_empty(&pi_state->list));
3336     list_del_init(&pi_state->list);
3337     pi_state->owner = NULL;
3338     - raw_spin_unlock_irq(&curr->pi_lock);
3339     + raw_spin_unlock(&curr->pi_lock);
3340    
3341     get_pi_state(pi_state);
3342     + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
3343     spin_unlock(&hb->lock);
3344    
3345     rt_mutex_futex_unlock(&pi_state->pi_mutex);
3346     @@ -1204,6 +1210,10 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
3347    
3348     WARN_ON(!list_empty(&pi_state->list));
3349     list_add(&pi_state->list, &p->pi_state_list);
3350     + /*
3351     + * Assignment without holding pi_state->pi_mutex.wait_lock is safe
3352     + * because there is no concurrency as the object is not published yet.
3353     + */
3354     pi_state->owner = p;
3355     raw_spin_unlock_irq(&p->pi_lock);
3356    
3357     @@ -2820,6 +2830,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
3358     raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3359     spin_unlock(&hb->lock);
3360    
3361     + /* drops pi_state->pi_mutex.wait_lock */
3362     ret = wake_futex_pi(uaddr, uval, pi_state);
3363    
3364     put_pi_state(pi_state);
3365     diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
3366     index 3675c6004f2a..75a70a267029 100644
3367     --- a/kernel/irq/chip.c
3368     +++ b/kernel/irq/chip.c
3369     @@ -202,7 +202,7 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
3370    
3371     irqd_clr_managed_shutdown(d);
3372    
3373     - if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
3374     + if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
3375     /*
3376     * Catch code which fiddles with enable_irq() on a managed
3377     * and potentially shutdown IRQ. Chained interrupt
3378     diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
3379     index f7086b78ad6e..5270a54b9fa4 100644
3380     --- a/kernel/irq/generic-chip.c
3381     +++ b/kernel/irq/generic-chip.c
3382     @@ -322,7 +322,6 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
3383     /* Calc pointer to the next generic chip */
3384     tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
3385     }
3386     - d->name = name;
3387     return 0;
3388     }
3389     EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
3390     diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
3391     index 73be2b3909bd..82afb7ed369f 100644
3392     --- a/kernel/irq/irqdesc.c
3393     +++ b/kernel/irq/irqdesc.c
3394     @@ -421,10 +421,8 @@ static void free_desc(unsigned int irq)
3395     * The sysfs entry must be serialized against a concurrent
3396     * irq_sysfs_init() as well.
3397     */
3398     - mutex_lock(&sparse_irq_lock);
3399     kobject_del(&desc->kobj);
3400     delete_irq_desc(irq);
3401     - mutex_unlock(&sparse_irq_lock);
3402    
3403     /*
3404     * We free the descriptor, masks and stat fields via RCU. That
3405     @@ -462,20 +460,15 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
3406     desc = alloc_desc(start + i, node, flags, mask, owner);
3407     if (!desc)
3408     goto err;
3409     - mutex_lock(&sparse_irq_lock);
3410     irq_insert_desc(start + i, desc);
3411     irq_sysfs_add(start + i, desc);
3412     - mutex_unlock(&sparse_irq_lock);
3413     }
3414     + bitmap_set(allocated_irqs, start, cnt);
3415     return start;
3416    
3417     err:
3418     for (i--; i >= 0; i--)
3419     free_desc(start + i);
3420     -
3421     - mutex_lock(&sparse_irq_lock);
3422     - bitmap_clear(allocated_irqs, start, cnt);
3423     - mutex_unlock(&sparse_irq_lock);
3424     return -ENOMEM;
3425     }
3426    
3427     @@ -575,6 +568,7 @@ static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
3428    
3429     desc->owner = owner;
3430     }
3431     + bitmap_set(allocated_irqs, start, cnt);
3432     return start;
3433     }
3434    
3435     @@ -670,10 +664,10 @@ void irq_free_descs(unsigned int from, unsigned int cnt)
3436     if (from >= nr_irqs || (from + cnt) > nr_irqs)
3437     return;
3438    
3439     + mutex_lock(&sparse_irq_lock);
3440     for (i = 0; i < cnt; i++)
3441     free_desc(from + i);
3442    
3443     - mutex_lock(&sparse_irq_lock);
3444     bitmap_clear(allocated_irqs, from, cnt);
3445     mutex_unlock(&sparse_irq_lock);
3446     }
3447     @@ -720,19 +714,15 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
3448     from, cnt, 0);
3449     ret = -EEXIST;
3450     if (irq >=0 && start != irq)
3451     - goto err;
3452     + goto unlock;
3453    
3454     if (start + cnt > nr_irqs) {
3455     ret = irq_expand_nr_irqs(start + cnt);
3456     if (ret)
3457     - goto err;
3458     + goto unlock;
3459     }
3460     -
3461     - bitmap_set(allocated_irqs, start, cnt);
3462     - mutex_unlock(&sparse_irq_lock);
3463     - return alloc_descs(start, cnt, node, affinity, owner);
3464     -
3465     -err:
3466     + ret = alloc_descs(start, cnt, node, affinity, owner);
3467     +unlock:
3468     mutex_unlock(&sparse_irq_lock);
3469     return ret;
3470     }
3471     diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
3472     index 48eadf416c24..3fa4bd59f569 100644
3473     --- a/kernel/irq/msi.c
3474     +++ b/kernel/irq/msi.c
3475     @@ -315,11 +315,12 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
3476    
3477     ops->set_desc(arg, desc);
3478     /* Assumes the domain mutex is held! */
3479     - ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
3480     + ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1,
3481     + arg);
3482     if (ret)
3483     break;
3484    
3485     - irq_set_msi_desc_off(virq, 0, desc);
3486     + irq_set_msi_desc_off(desc->irq, 0, desc);
3487     }
3488    
3489     if (ret) {
3490     diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
3491     index 51d4c3acf32d..63bee8e1b193 100644
3492     --- a/kernel/rcu/tree.c
3493     +++ b/kernel/rcu/tree.c
3494     @@ -888,6 +888,11 @@ void rcu_irq_exit(void)
3495    
3496     RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
3497     rdtp = this_cpu_ptr(&rcu_dynticks);
3498     +
3499     + /* Page faults can happen in NMI handlers, so check... */
3500     + if (READ_ONCE(rdtp->dynticks_nmi_nesting))
3501     + return;
3502     +
3503     WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
3504     rdtp->dynticks_nesting < 1);
3505     if (rdtp->dynticks_nesting <= 1) {
3506     @@ -1020,6 +1025,11 @@ void rcu_irq_enter(void)
3507    
3508     RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
3509     rdtp = this_cpu_ptr(&rcu_dynticks);
3510     +
3511     + /* Page faults can happen in NMI handlers, so check... */
3512     + if (READ_ONCE(rdtp->dynticks_nmi_nesting))
3513     + return;
3514     +
3515     oldval = rdtp->dynticks_nesting;
3516     rdtp->dynticks_nesting++;
3517     WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
3518     diff --git a/kernel/seccomp.c b/kernel/seccomp.c
3519     index 98b59b5db90b..f3218fec7f2d 100644
3520     --- a/kernel/seccomp.c
3521     +++ b/kernel/seccomp.c
3522     @@ -458,14 +458,19 @@ static long seccomp_attach_filter(unsigned int flags,
3523     return 0;
3524     }
3525    
3526     +void __get_seccomp_filter(struct seccomp_filter *filter)
3527     +{
3528     + /* Reference count is bounded by the number of total processes. */
3529     + refcount_inc(&filter->usage);
3530     +}
3531     +
3532     /* get_seccomp_filter - increments the reference count of the filter on @tsk */
3533     void get_seccomp_filter(struct task_struct *tsk)
3534     {
3535     struct seccomp_filter *orig = tsk->seccomp.filter;
3536     if (!orig)
3537     return;
3538     - /* Reference count is bounded by the number of total processes. */
3539     - refcount_inc(&orig->usage);
3540     + __get_seccomp_filter(orig);
3541     }
3542    
3543     static inline void seccomp_filter_free(struct seccomp_filter *filter)
3544     @@ -476,10 +481,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
3545     }
3546     }
3547    
3548     -/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
3549     -void put_seccomp_filter(struct task_struct *tsk)
3550     +static void __put_seccomp_filter(struct seccomp_filter *orig)
3551     {
3552     - struct seccomp_filter *orig = tsk->seccomp.filter;
3553     /* Clean up single-reference branches iteratively. */
3554     while (orig && refcount_dec_and_test(&orig->usage)) {
3555     struct seccomp_filter *freeme = orig;
3556     @@ -488,6 +491,12 @@ void put_seccomp_filter(struct task_struct *tsk)
3557     }
3558     }
3559    
3560     +/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
3561     +void put_seccomp_filter(struct task_struct *tsk)
3562     +{
3563     + __put_seccomp_filter(tsk->seccomp.filter);
3564     +}
3565     +
3566     static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
3567     {
3568     memset(info, 0, sizeof(*info));
3569     @@ -908,13 +917,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
3570     if (!data)
3571     goto out;
3572    
3573     - get_seccomp_filter(task);
3574     + __get_seccomp_filter(filter);
3575     spin_unlock_irq(&task->sighand->siglock);
3576    
3577     if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
3578     ret = -EFAULT;
3579    
3580     - put_seccomp_filter(task);
3581     + __put_seccomp_filter(filter);
3582     return ret;
3583    
3584     out:
3585     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
3586     index 6648fbbb8157..423554ad3610 100644
3587     --- a/kernel/sysctl.c
3588     +++ b/kernel/sysctl.c
3589     @@ -367,7 +367,8 @@ static struct ctl_table kern_table[] = {
3590     .data = &sysctl_sched_time_avg,
3591     .maxlen = sizeof(unsigned int),
3592     .mode = 0644,
3593     - .proc_handler = proc_dointvec,
3594     + .proc_handler = proc_dointvec_minmax,
3595     + .extra1 = &one,
3596     },
3597     #ifdef CONFIG_SCHEDSTATS
3598     {
3599     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3600     index 5efb4b63174e..27a723480b13 100644
3601     --- a/kernel/trace/trace.c
3602     +++ b/kernel/trace/trace.c
3603     @@ -4017,11 +4017,17 @@ static int tracing_open(struct inode *inode, struct file *file)
3604     /* If this file was open for write, then erase contents */
3605     if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3606     int cpu = tracing_get_cpu(inode);
3607     + struct trace_buffer *trace_buf = &tr->trace_buffer;
3608     +
3609     +#ifdef CONFIG_TRACER_MAX_TRACE
3610     + if (tr->current_trace->print_max)
3611     + trace_buf = &tr->max_buffer;
3612     +#endif
3613    
3614     if (cpu == RING_BUFFER_ALL_CPUS)
3615     - tracing_reset_online_cpus(&tr->trace_buffer);
3616     + tracing_reset_online_cpus(trace_buf);
3617     else
3618     - tracing_reset(&tr->trace_buffer, cpu);
3619     + tracing_reset(trace_buf, cpu);
3620     }
3621    
3622     if (file->f_mode & FMODE_READ) {
3623     @@ -5664,7 +5670,7 @@ static int tracing_wait_pipe(struct file *filp)
3624     *
3625     * iter->pos will be 0 if we haven't read anything.
3626     */
3627     - if (!tracing_is_on() && iter->pos)
3628     + if (!tracer_tracing_is_on(iter->tr) && iter->pos)
3629     break;
3630    
3631     mutex_unlock(&iter->mutex);
3632     diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
3633     index a4df67cbc711..49cb41412eec 100644
3634     --- a/kernel/trace/trace_stack.c
3635     +++ b/kernel/trace/trace_stack.c
3636     @@ -96,23 +96,9 @@ check_stack(unsigned long ip, unsigned long *stack)
3637     if (in_nmi())
3638     return;
3639    
3640     - /*
3641     - * There's a slight chance that we are tracing inside the
3642     - * RCU infrastructure, and rcu_irq_enter() will not work
3643     - * as expected.
3644     - */
3645     - if (unlikely(rcu_irq_enter_disabled()))
3646     - return;
3647     -
3648     local_irq_save(flags);
3649     arch_spin_lock(&stack_trace_max_lock);
3650    
3651     - /*
3652     - * RCU may not be watching, make it see us.
3653     - * The stack trace code uses rcu_sched.
3654     - */
3655     - rcu_irq_enter();
3656     -
3657     /* In case another CPU set the tracer_frame on us */
3658     if (unlikely(!frame_size))
3659     this_size -= tracer_frame;
3660     @@ -205,7 +191,6 @@ check_stack(unsigned long ip, unsigned long *stack)
3661     }
3662    
3663     out:
3664     - rcu_irq_exit();
3665     arch_spin_unlock(&stack_trace_max_lock);
3666     local_irq_restore(flags);
3667     }
3668     diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
3669     index f358d0bfa76b..79d14d70b7ea 100644
3670     --- a/net/ceph/osdmap.c
3671     +++ b/net/ceph/osdmap.c
3672     @@ -2445,19 +2445,34 @@ static void apply_upmap(struct ceph_osdmap *osdmap,
3673    
3674     pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
3675     if (pg) {
3676     - for (i = 0; i < raw->size; i++) {
3677     - for (j = 0; j < pg->pg_upmap_items.len; j++) {
3678     - int from = pg->pg_upmap_items.from_to[j][0];
3679     - int to = pg->pg_upmap_items.from_to[j][1];
3680     -
3681     - if (from == raw->osds[i]) {
3682     - if (!(to != CRUSH_ITEM_NONE &&
3683     - to < osdmap->max_osd &&
3684     - osdmap->osd_weight[to] == 0))
3685     - raw->osds[i] = to;
3686     + /*
3687     + * Note: this approach does not allow a bidirectional swap,
3688     + * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
3689     + */
3690     + for (i = 0; i < pg->pg_upmap_items.len; i++) {
3691     + int from = pg->pg_upmap_items.from_to[i][0];
3692     + int to = pg->pg_upmap_items.from_to[i][1];
3693     + int pos = -1;
3694     + bool exists = false;
3695     +
3696     + /* make sure replacement doesn't already appear */
3697     + for (j = 0; j < raw->size; j++) {
3698     + int osd = raw->osds[j];
3699     +
3700     + if (osd == to) {
3701     + exists = true;
3702     break;
3703     }
3704     + /* ignore mapping if target is marked out */
3705     + if (osd == from && pos < 0 &&
3706     + !(to != CRUSH_ITEM_NONE &&
3707     + to < osdmap->max_osd &&
3708     + osdmap->osd_weight[to] == 0)) {
3709     + pos = j;
3710     + }
3711     }
3712     + if (!exists && pos >= 0)
3713     + raw->osds[pos] = to;
3714     }
3715     }
3716     }
3717     diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
3718     index 2b36eff5d97e..2849a1fc41c5 100644
3719     --- a/net/mac80211/agg-rx.c
3720     +++ b/net/mac80211/agg-rx.c
3721     @@ -245,10 +245,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
3722     ieee80211_tx_skb(sdata, skb);
3723     }
3724    
3725     -void __ieee80211_start_rx_ba_session(struct sta_info *sta,
3726     - u8 dialog_token, u16 timeout,
3727     - u16 start_seq_num, u16 ba_policy, u16 tid,
3728     - u16 buf_size, bool tx, bool auto_seq)
3729     +void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
3730     + u8 dialog_token, u16 timeout,
3731     + u16 start_seq_num, u16 ba_policy, u16 tid,
3732     + u16 buf_size, bool tx, bool auto_seq)
3733     {
3734     struct ieee80211_local *local = sta->sdata->local;
3735     struct tid_ampdu_rx *tid_agg_rx;
3736     @@ -267,7 +267,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
3737     ht_dbg(sta->sdata,
3738     "STA %pM requests BA session on unsupported tid %d\n",
3739     sta->sta.addr, tid);
3740     - goto end_no_lock;
3741     + goto end;
3742     }
3743    
3744     if (!sta->sta.ht_cap.ht_supported) {
3745     @@ -275,14 +275,14 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
3746     "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
3747     sta->sta.addr, tid);
3748     /* send a response anyway, it's an error case if we get here */
3749     - goto end_no_lock;
3750     + goto end;
3751     }
3752    
3753     if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
3754     ht_dbg(sta->sdata,
3755     "Suspend in progress - Denying ADDBA request (%pM tid %d)\n",
3756     sta->sta.addr, tid);
3757     - goto end_no_lock;
3758     + goto end;
3759     }
3760    
3761     /* sanity check for incoming parameters:
3762     @@ -296,7 +296,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
3763     ht_dbg_ratelimited(sta->sdata,
3764     "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
3765     sta->sta.addr, tid, ba_policy, buf_size);
3766     - goto end_no_lock;
3767     + goto end;
3768     }
3769     /* determine default buffer size */
3770     if (buf_size == 0)
3771     @@ -311,7 +311,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
3772     buf_size, sta->sta.addr);
3773    
3774     /* examine state machine */
3775     - mutex_lock(&sta->ampdu_mlme.mtx);
3776     + lockdep_assert_held(&sta->ampdu_mlme.mtx);
3777    
3778     if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
3779     if (sta->ampdu_mlme.tid_rx_token[tid] == dialog_token) {
3780     @@ -415,15 +415,25 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
3781     __clear_bit(tid, sta->ampdu_mlme.unexpected_agg);
3782     sta->ampdu_mlme.tid_rx_token[tid] = dialog_token;
3783     }
3784     - mutex_unlock(&sta->ampdu_mlme.mtx);
3785    
3786     -end_no_lock:
3787     if (tx)
3788     ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
3789     dialog_token, status, 1, buf_size,
3790     timeout);
3791     }
3792    
3793     +void __ieee80211_start_rx_ba_session(struct sta_info *sta,
3794     + u8 dialog_token, u16 timeout,
3795     + u16 start_seq_num, u16 ba_policy, u16 tid,
3796     + u16 buf_size, bool tx, bool auto_seq)
3797     +{
3798     + mutex_lock(&sta->ampdu_mlme.mtx);
3799     + ___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
3800     + start_seq_num, ba_policy, tid,
3801     + buf_size, tx, auto_seq);
3802     + mutex_unlock(&sta->ampdu_mlme.mtx);
3803     +}
3804     +
3805     void ieee80211_process_addba_request(struct ieee80211_local *local,
3806     struct sta_info *sta,
3807     struct ieee80211_mgmt *mgmt,
3808     diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
3809     index c92df492e898..198b2d3e56fd 100644
3810     --- a/net/mac80211/ht.c
3811     +++ b/net/mac80211/ht.c
3812     @@ -333,9 +333,9 @@ void ieee80211_ba_session_work(struct work_struct *work)
3813    
3814     if (test_and_clear_bit(tid,
3815     sta->ampdu_mlme.tid_rx_manage_offl))
3816     - __ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
3817     - IEEE80211_MAX_AMPDU_BUF,
3818     - false, true);
3819     + ___ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid,
3820     + IEEE80211_MAX_AMPDU_BUF,
3821     + false, true);
3822    
3823     if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS,
3824     sta->ampdu_mlme.tid_rx_manage_offl))
3825     diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
3826     index 2197c62a0a6e..9675814f64db 100644
3827     --- a/net/mac80211/ieee80211_i.h
3828     +++ b/net/mac80211/ieee80211_i.h
3829     @@ -1760,6 +1760,10 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
3830     u8 dialog_token, u16 timeout,
3831     u16 start_seq_num, u16 ba_policy, u16 tid,
3832     u16 buf_size, bool tx, bool auto_seq);
3833     +void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
3834     + u8 dialog_token, u16 timeout,
3835     + u16 start_seq_num, u16 ba_policy, u16 tid,
3836     + u16 buf_size, bool tx, bool auto_seq);
3837     void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
3838     enum ieee80211_agg_stop_reason reason);
3839     void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
3840     diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
3841     index 9228ac73c429..44399322f356 100644
3842     --- a/net/mac80211/iface.c
3843     +++ b/net/mac80211/iface.c
3844     @@ -792,6 +792,7 @@ static int ieee80211_open(struct net_device *dev)
3845     static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
3846     bool going_down)
3847     {
3848     + struct ieee80211_sub_if_data *txq_sdata = sdata;
3849     struct ieee80211_local *local = sdata->local;
3850     struct fq *fq = &local->fq;
3851     unsigned long flags;
3852     @@ -937,6 +938,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
3853    
3854     switch (sdata->vif.type) {
3855     case NL80211_IFTYPE_AP_VLAN:
3856     + txq_sdata = container_of(sdata->bss,
3857     + struct ieee80211_sub_if_data, u.ap);
3858     +
3859     mutex_lock(&local->mtx);
3860     list_del(&sdata->u.vlan.list);
3861     mutex_unlock(&local->mtx);
3862     @@ -1007,8 +1011,17 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
3863     }
3864     spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
3865    
3866     - if (sdata->vif.txq) {
3867     - struct txq_info *txqi = to_txq_info(sdata->vif.txq);
3868     + if (txq_sdata->vif.txq) {
3869     + struct txq_info *txqi = to_txq_info(txq_sdata->vif.txq);
3870     +
3871     + /*
3872     + * FIXME FIXME
3873     + *
3874     + * We really shouldn't purge the *entire* txqi since that
3875     + * contains frames for the other AP_VLANs (and possibly
3876     + * the AP itself) as well, but there's no API in FQ now
3877     + * to be able to filter.
3878     + */
3879    
3880     spin_lock_bh(&fq->lock);
3881     ieee80211_txq_purge(local, txqi);
3882     diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
3883     index f8e7a8bbc618..faf4f6055000 100644
3884     --- a/net/mac80211/offchannel.c
3885     +++ b/net/mac80211/offchannel.c
3886     @@ -707,6 +707,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,
3887     if (!cookie)
3888     return -ENOENT;
3889    
3890     + flush_work(&local->hw_roc_start);
3891     +
3892     mutex_lock(&local->mtx);
3893     list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
3894     if (!mgmt_tx && roc->cookie != cookie)
3895     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
3896     index 8858f4f185e9..94826680cf2b 100644
3897     --- a/net/mac80211/tx.c
3898     +++ b/net/mac80211/tx.c
3899     @@ -1276,11 +1276,6 @@ static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
3900     IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
3901     }
3902    
3903     -static void ieee80211_set_skb_vif(struct sk_buff *skb, struct txq_info *txqi)
3904     -{
3905     - IEEE80211_SKB_CB(skb)->control.vif = txqi->txq.vif;
3906     -}
3907     -
3908     static u32 codel_skb_len_func(const struct sk_buff *skb)
3909     {
3910     return skb->len;
3911     @@ -3414,6 +3409,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
3912     struct ieee80211_tx_info *info;
3913     struct ieee80211_tx_data tx;
3914     ieee80211_tx_result r;
3915     + struct ieee80211_vif *vif;
3916    
3917     spin_lock_bh(&fq->lock);
3918    
3919     @@ -3430,8 +3426,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
3920     if (!skb)
3921     goto out;
3922    
3923     - ieee80211_set_skb_vif(skb, txqi);
3924     -
3925     hdr = (struct ieee80211_hdr *)skb->data;
3926     info = IEEE80211_SKB_CB(skb);
3927    
3928     @@ -3488,6 +3482,34 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
3929     }
3930     }
3931    
3932     + switch (tx.sdata->vif.type) {
3933     + case NL80211_IFTYPE_MONITOR:
3934     + if (tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
3935     + vif = &tx.sdata->vif;
3936     + break;
3937     + }
3938     + tx.sdata = rcu_dereference(local->monitor_sdata);
3939     + if (tx.sdata) {
3940     + vif = &tx.sdata->vif;
3941     + info->hw_queue =
3942     + vif->hw_queue[skb_get_queue_mapping(skb)];
3943     + } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
3944     + ieee80211_free_txskb(&local->hw, skb);
3945     + goto begin;
3946     + } else {
3947     + vif = NULL;
3948     + }
3949     + break;
3950     + case NL80211_IFTYPE_AP_VLAN:
3951     + tx.sdata = container_of(tx.sdata->bss,
3952     + struct ieee80211_sub_if_data, u.ap);
3953     + /* fall through */
3954     + default:
3955     + vif = &tx.sdata->vif;
3956     + break;
3957     + }
3958     +
3959     + IEEE80211_SKB_CB(skb)->control.vif = vif;
3960     out:
3961     spin_unlock_bh(&fq->lock);
3962    
3963     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3964     index 8ce85420ecb0..750ba5d24a49 100644
3965     --- a/net/wireless/nl80211.c
3966     +++ b/net/wireless/nl80211.c
3967     @@ -10903,6 +10903,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
3968     if (err)
3969     return err;
3970    
3971     + if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
3972     + !tb[NL80211_REKEY_DATA_KCK])
3973     + return -EINVAL;
3974     if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
3975     return -ERANGE;
3976     if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
3977     diff --git a/security/keys/Kconfig b/security/keys/Kconfig
3978     index a7a23b5541f8..91eafada3164 100644
3979     --- a/security/keys/Kconfig
3980     +++ b/security/keys/Kconfig
3981     @@ -45,10 +45,8 @@ config BIG_KEYS
3982     bool "Large payload keys"
3983     depends on KEYS
3984     depends on TMPFS
3985     - depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
3986     select CRYPTO_AES
3987     - select CRYPTO_ECB
3988     - select CRYPTO_RNG
3989     + select CRYPTO_GCM
3990     help
3991     This option provides support for holding large keys within the kernel
3992     (for example Kerberos ticket caches). The data may be stored out to
3993     diff --git a/security/keys/big_key.c b/security/keys/big_key.c
3994     index 835c1ab30d01..9c3b16ee1768 100644
3995     --- a/security/keys/big_key.c
3996     +++ b/security/keys/big_key.c
3997     @@ -1,5 +1,6 @@
3998     /* Large capacity key type
3999     *
4000     + * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4001     * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
4002     * Written by David Howells (dhowells@redhat.com)
4003     *
4004     @@ -16,10 +17,10 @@
4005     #include <linux/shmem_fs.h>
4006     #include <linux/err.h>
4007     #include <linux/scatterlist.h>
4008     +#include <linux/random.h>
4009     #include <keys/user-type.h>
4010     #include <keys/big_key-type.h>
4011     -#include <crypto/rng.h>
4012     -#include <crypto/skcipher.h>
4013     +#include <crypto/aead.h>
4014    
4015     /*
4016     * Layout of key payload words.
4017     @@ -49,7 +50,12 @@ enum big_key_op {
4018     /*
4019     * Key size for big_key data encryption
4020     */
4021     -#define ENC_KEY_SIZE 16
4022     +#define ENC_KEY_SIZE 32
4023     +
4024     +/*
4025     + * Authentication tag length
4026     + */
4027     +#define ENC_AUTHTAG_SIZE 16
4028    
4029     /*
4030     * big_key defined keys take an arbitrary string as the description and an
4031     @@ -64,57 +70,62 @@ struct key_type key_type_big_key = {
4032     .destroy = big_key_destroy,
4033     .describe = big_key_describe,
4034     .read = big_key_read,
4035     + /* no ->update(); don't add it without changing big_key_crypt() nonce */
4036     };
4037    
4038     /*
4039     - * Crypto names for big_key data encryption
4040     + * Crypto names for big_key data authenticated encryption
4041     */
4042     -static const char big_key_rng_name[] = "stdrng";
4043     -static const char big_key_alg_name[] = "ecb(aes)";
4044     +static const char big_key_alg_name[] = "gcm(aes)";
4045    
4046     /*
4047     - * Crypto algorithms for big_key data encryption
4048     + * Crypto algorithms for big_key data authenticated encryption
4049     */
4050     -static struct crypto_rng *big_key_rng;
4051     -static struct crypto_skcipher *big_key_skcipher;
4052     +static struct crypto_aead *big_key_aead;
4053    
4054     /*
4055     - * Generate random key to encrypt big_key data
4056     + * Since changing the key affects the entire object, we need a mutex.
4057     */
4058     -static inline int big_key_gen_enckey(u8 *key)
4059     -{
4060     - return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
4061     -}
4062     +static DEFINE_MUTEX(big_key_aead_lock);
4063    
4064     /*
4065     * Encrypt/decrypt big_key data
4066     */
4067     static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
4068     {
4069     - int ret = -EINVAL;
4070     + int ret;
4071     struct scatterlist sgio;
4072     - SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher);
4073     -
4074     - if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) {
4075     + struct aead_request *aead_req;
4076     + /* We always use a zero nonce. The reason we can get away with this is
4077     + * because we're using a different randomly generated key for every
4078     + * different encryption. Notably, too, key_type_big_key doesn't define
4079     + * an .update function, so there's no chance we'll wind up reusing the
4080     + * key to encrypt updated data. Simply put: one key, one encryption.
4081     + */
4082     + u8 zero_nonce[crypto_aead_ivsize(big_key_aead)];
4083     +
4084     + aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
4085     + if (!aead_req)
4086     + return -ENOMEM;
4087     +
4088     + memset(zero_nonce, 0, sizeof(zero_nonce));
4089     + sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0));
4090     + aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
4091     + aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
4092     + aead_request_set_ad(aead_req, 0);
4093     +
4094     + mutex_lock(&big_key_aead_lock);
4095     + if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
4096     ret = -EAGAIN;
4097     goto error;
4098     }
4099     -
4100     - skcipher_request_set_tfm(req, big_key_skcipher);
4101     - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
4102     - NULL, NULL);
4103     -
4104     - sg_init_one(&sgio, data, datalen);
4105     - skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL);
4106     -
4107     if (op == BIG_KEY_ENC)
4108     - ret = crypto_skcipher_encrypt(req);
4109     + ret = crypto_aead_encrypt(aead_req);
4110     else
4111     - ret = crypto_skcipher_decrypt(req);
4112     -
4113     - skcipher_request_zero(req);
4114     -
4115     + ret = crypto_aead_decrypt(aead_req);
4116     error:
4117     + mutex_unlock(&big_key_aead_lock);
4118     + aead_request_free(aead_req);
4119     return ret;
4120     }
4121    
4122     @@ -146,15 +157,13 @@ int big_key_preparse(struct key_preparsed_payload *prep)
4123     *
4124     * File content is stored encrypted with randomly generated key.
4125     */
4126     - size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
4127     + size_t enclen = datalen + ENC_AUTHTAG_SIZE;
4128    
4129     - /* prepare aligned data to encrypt */
4130     data = kmalloc(enclen, GFP_KERNEL);
4131     if (!data)
4132     return -ENOMEM;
4133    
4134     memcpy(data, prep->data, datalen);
4135     - memset(data + datalen, 0x00, enclen - datalen);
4136    
4137     /* generate random key */
4138     enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
4139     @@ -162,13 +171,12 @@ int big_key_preparse(struct key_preparsed_payload *prep)
4140     ret = -ENOMEM;
4141     goto error;
4142     }
4143     -
4144     - ret = big_key_gen_enckey(enckey);
4145     - if (ret)
4146     + ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE);
4147     + if (unlikely(ret))
4148     goto err_enckey;
4149    
4150     /* encrypt aligned data */
4151     - ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey);
4152     + ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
4153     if (ret)
4154     goto err_enckey;
4155    
4156     @@ -194,7 +202,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
4157     *path = file->f_path;
4158     path_get(path);
4159     fput(file);
4160     - kfree(data);
4161     + kzfree(data);
4162     } else {
4163     /* Just store the data in a buffer */
4164     void *data = kmalloc(datalen, GFP_KERNEL);
4165     @@ -210,9 +218,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
4166     err_fput:
4167     fput(file);
4168     err_enckey:
4169     - kfree(enckey);
4170     + kzfree(enckey);
4171     error:
4172     - kfree(data);
4173     + kzfree(data);
4174     return ret;
4175     }
4176    
4177     @@ -226,7 +234,7 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
4178    
4179     path_put(path);
4180     }
4181     - kfree(prep->payload.data[big_key_data]);
4182     + kzfree(prep->payload.data[big_key_data]);
4183     }
4184    
4185     /*
4186     @@ -258,7 +266,7 @@ void big_key_destroy(struct key *key)
4187     path->mnt = NULL;
4188     path->dentry = NULL;
4189     }
4190     - kfree(key->payload.data[big_key_data]);
4191     + kzfree(key->payload.data[big_key_data]);
4192     key->payload.data[big_key_data] = NULL;
4193     }
4194    
4195     @@ -294,7 +302,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
4196     struct file *file;
4197     u8 *data;
4198     u8 *enckey = (u8 *)key->payload.data[big_key_data];
4199     - size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
4200     + size_t enclen = datalen + ENC_AUTHTAG_SIZE;
4201    
4202     data = kmalloc(enclen, GFP_KERNEL);
4203     if (!data)
4204     @@ -326,7 +334,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
4205     err_fput:
4206     fput(file);
4207     error:
4208     - kfree(data);
4209     + kzfree(data);
4210     } else {
4211     ret = datalen;
4212     if (copy_to_user(buffer, key->payload.data[big_key_data],
4213     @@ -342,47 +350,31 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
4214     */
4215     static int __init big_key_init(void)
4216     {
4217     - struct crypto_skcipher *cipher;
4218     - struct crypto_rng *rng;
4219     int ret;
4220    
4221     - rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
4222     - if (IS_ERR(rng)) {
4223     - pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
4224     - return PTR_ERR(rng);
4225     - }
4226     -
4227     - big_key_rng = rng;
4228     -
4229     - /* seed RNG */
4230     - ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
4231     - if (ret) {
4232     - pr_err("Can't reset rng: %d\n", ret);
4233     - goto error_rng;
4234     - }
4235     -
4236     /* init block cipher */
4237     - cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
4238     - if (IS_ERR(cipher)) {
4239     - ret = PTR_ERR(cipher);
4240     + big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
4241     + if (IS_ERR(big_key_aead)) {
4242     + ret = PTR_ERR(big_key_aead);
4243     pr_err("Can't alloc crypto: %d\n", ret);
4244     - goto error_rng;
4245     + return ret;
4246     + }
4247     + ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
4248     + if (ret < 0) {
4249     + pr_err("Can't set crypto auth tag len: %d\n", ret);
4250     + goto free_aead;
4251     }
4252     -
4253     - big_key_skcipher = cipher;
4254    
4255     ret = register_key_type(&key_type_big_key);
4256     if (ret < 0) {
4257     pr_err("Can't register type: %d\n", ret);
4258     - goto error_cipher;
4259     + goto free_aead;
4260     }
4261    
4262     return 0;
4263    
4264     -error_cipher:
4265     - crypto_free_skcipher(big_key_skcipher);
4266     -error_rng:
4267     - crypto_free_rng(big_key_rng);
4268     +free_aead:
4269     + crypto_free_aead(big_key_aead);
4270     return ret;
4271     }
4272    
4273     diff --git a/security/keys/internal.h b/security/keys/internal.h
4274     index 1c02c6547038..503adbae7b0d 100644
4275     --- a/security/keys/internal.h
4276     +++ b/security/keys/internal.h
4277     @@ -141,7 +141,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
4278     extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
4279     extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
4280    
4281     -extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
4282     +extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
4283    
4284     extern int install_user_keyrings(void);
4285     extern int install_thread_keyring_to_cred(struct cred *);
4286     diff --git a/security/keys/key.c b/security/keys/key.c
4287     index 83da68d98b40..e5c0896c3a8f 100644
4288     --- a/security/keys/key.c
4289     +++ b/security/keys/key.c
4290     @@ -302,6 +302,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
4291     key->flags |= 1 << KEY_FLAG_IN_QUOTA;
4292     if (flags & KEY_ALLOC_BUILT_IN)
4293     key->flags |= 1 << KEY_FLAG_BUILTIN;
4294     + if (flags & KEY_ALLOC_UID_KEYRING)
4295     + key->flags |= 1 << KEY_FLAG_UID_KEYRING;
4296    
4297     #ifdef KEY_DEBUGGING
4298     key->magic = KEY_DEBUG_MAGIC;
4299     diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
4300     index ab0b337c84b4..6a82090c7fc1 100644
4301     --- a/security/keys/keyctl.c
4302     +++ b/security/keys/keyctl.c
4303     @@ -766,6 +766,11 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
4304    
4305     key = key_ref_to_ptr(key_ref);
4306    
4307     + if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
4308     + ret = -ENOKEY;
4309     + goto error2;
4310     + }
4311     +
4312     /* see if we can read it directly */
4313     ret = key_permission(key_ref, KEY_NEED_READ);
4314     if (ret == 0)
4315     diff --git a/security/keys/keyring.c b/security/keys/keyring.c
4316     index de81793f9920..4fa82a8a9c0e 100644
4317     --- a/security/keys/keyring.c
4318     +++ b/security/keys/keyring.c
4319     @@ -423,7 +423,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
4320     }
4321    
4322     struct keyring_read_iterator_context {
4323     - size_t qty;
4324     + size_t buflen;
4325     size_t count;
4326     key_serial_t __user *buffer;
4327     };
4328     @@ -435,9 +435,9 @@ static int keyring_read_iterator(const void *object, void *data)
4329     int ret;
4330    
4331     kenter("{%s,%d},,{%zu/%zu}",
4332     - key->type->name, key->serial, ctx->count, ctx->qty);
4333     + key->type->name, key->serial, ctx->count, ctx->buflen);
4334    
4335     - if (ctx->count >= ctx->qty)
4336     + if (ctx->count >= ctx->buflen)
4337     return 1;
4338    
4339     ret = put_user(key->serial, ctx->buffer);
4340     @@ -472,16 +472,12 @@ static long keyring_read(const struct key *keyring,
4341     return 0;
4342    
4343     /* Calculate how much data we could return */
4344     - ctx.qty = nr_keys * sizeof(key_serial_t);
4345     -
4346     if (!buffer || !buflen)
4347     - return ctx.qty;
4348     -
4349     - if (buflen > ctx.qty)
4350     - ctx.qty = buflen;
4351     + return nr_keys * sizeof(key_serial_t);
4352    
4353     /* Copy the IDs of the subscribed keys into the buffer */
4354     ctx.buffer = (key_serial_t __user *)buffer;
4355     + ctx.buflen = buflen;
4356     ctx.count = 0;
4357     ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
4358     if (ret < 0) {
4359     @@ -1101,15 +1097,15 @@ key_ref_t find_key_to_update(key_ref_t keyring_ref,
4360     /*
4361     * Find a keyring with the specified name.
4362     *
4363     - * All named keyrings in the current user namespace are searched, provided they
4364     - * grant Search permission directly to the caller (unless this check is
4365     - * skipped). Keyrings whose usage points have reached zero or who have been
4366     - * revoked are skipped.
4367     + * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
4368     + * user in the current user namespace are considered. If @uid_keyring is %true,
4369     + * the keyring additionally must have been allocated as a user or user session
4370     + * keyring; otherwise, it must grant Search permission directly to the caller.
4371     *
4372     * Returns a pointer to the keyring with the keyring's refcount having being
4373     * incremented on success. -ENOKEY is returned if a key could not be found.
4374     */
4375     -struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
4376     +struct key *find_keyring_by_name(const char *name, bool uid_keyring)
4377     {
4378     struct key *keyring;
4379     int bucket;
4380     @@ -1137,10 +1133,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
4381     if (strcmp(keyring->description, name) != 0)
4382     continue;
4383    
4384     - if (!skip_perm_check &&
4385     - key_permission(make_key_ref(keyring, 0),
4386     - KEY_NEED_SEARCH) < 0)
4387     - continue;
4388     + if (uid_keyring) {
4389     + if (!test_bit(KEY_FLAG_UID_KEYRING,
4390     + &keyring->flags))
4391     + continue;
4392     + } else {
4393     + if (key_permission(make_key_ref(keyring, 0),
4394     + KEY_NEED_SEARCH) < 0)
4395     + continue;
4396     + }
4397    
4398     /* we've got a match but we might end up racing with
4399     * key_cleanup() if the keyring is currently 'dead'
4400     diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
4401     index 86bced9fdbdf..293d3598153b 100644
4402     --- a/security/keys/process_keys.c
4403     +++ b/security/keys/process_keys.c
4404     @@ -77,7 +77,8 @@ int install_user_keyrings(void)
4405     if (IS_ERR(uid_keyring)) {
4406     uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
4407     cred, user_keyring_perm,
4408     - KEY_ALLOC_IN_QUOTA,
4409     + KEY_ALLOC_UID_KEYRING |
4410     + KEY_ALLOC_IN_QUOTA,
4411     NULL, NULL);
4412     if (IS_ERR(uid_keyring)) {
4413     ret = PTR_ERR(uid_keyring);
4414     @@ -94,7 +95,8 @@ int install_user_keyrings(void)
4415     session_keyring =
4416     keyring_alloc(buf, user->uid, INVALID_GID,
4417     cred, user_keyring_perm,
4418     - KEY_ALLOC_IN_QUOTA,
4419     + KEY_ALLOC_UID_KEYRING |
4420     + KEY_ALLOC_IN_QUOTA,
4421     NULL, NULL);
4422     if (IS_ERR(session_keyring)) {
4423     ret = PTR_ERR(session_keyring);
4424     diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
4425     index 73f5ea6778ce..9380c3fc7cfe 100644
4426     --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
4427     +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
4428     @@ -6,10 +6,18 @@
4429     */
4430    
4431     #include <sys/types.h>
4432     -#include <asm/siginfo.h>
4433     -#define __have_siginfo_t 1
4434     -#define __have_sigval_t 1
4435     -#define __have_sigevent_t 1
4436     +
4437     +/*
4438     + * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
4439     + * we need to use the kernel's siginfo.h file and trick glibc
4440     + * into accepting it.
4441     + */
4442     +#if !__GLIBC_PREREQ(2, 26)
4443     +# include <asm/siginfo.h>
4444     +# define __have_siginfo_t 1
4445     +# define __have_sigval_t 1
4446     +# define __have_sigevent_t 1
4447     +#endif
4448    
4449     #include <errno.h>
4450     #include <linux/filter.h>
4451     @@ -676,7 +684,7 @@ TEST_F_SIGNAL(TRAP, ign, SIGSYS)
4452     syscall(__NR_getpid);
4453     }
4454    
4455     -static struct siginfo TRAP_info;
4456     +static siginfo_t TRAP_info;
4457     static volatile int TRAP_nr;
4458     static void TRAP_action(int nr, siginfo_t *info, void *void_context)
4459     {