Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0109-4.9.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 9 months ago) by niro
File size: 73914 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index c0c41c9fac0c..d2fe757a979d 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 9
9     +SUBLEVEL = 10
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
14     index 91ebe382147f..5f69c3bd59bb 100644
15     --- a/arch/arc/kernel/unaligned.c
16     +++ b/arch/arc/kernel/unaligned.c
17     @@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
18    
19     /* clear any remanants of delay slot */
20     if (delay_mode(regs)) {
21     - regs->ret = regs->bta ~1U;
22     + regs->ret = regs->bta & ~1U;
23     regs->status32 &= ~STATUS_DE_MASK;
24     } else {
25     regs->ret += state.instr_len;
26     diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
27     index 1ade1951e620..7aa120fbdc71 100644
28     --- a/arch/arm/boot/dts/imx6dl.dtsi
29     +++ b/arch/arm/boot/dts/imx6dl.dtsi
30     @@ -137,7 +137,7 @@
31     &gpio4 {
32     gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
33     <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
34     - <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
35     + <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
36     <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
37     <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
38     };
39     diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
40     index ce131ed5939d..ae738a6319f6 100644
41     --- a/arch/arm/kernel/ptrace.c
42     +++ b/arch/arm/kernel/ptrace.c
43     @@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
44     const void *kbuf, const void __user *ubuf)
45     {
46     int ret;
47     - struct pt_regs newregs;
48     + struct pt_regs newregs = *task_pt_regs(target);
49    
50     ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
51     &newregs,
52     diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
53     index 3a2e678b8d30..0122ad1a6027 100644
54     --- a/arch/arm/mm/fault.c
55     +++ b/arch/arm/mm/fault.c
56     @@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
57    
58     void __init early_abt_enable(void)
59     {
60     - fsr_info[22].fn = early_abort_handler;
61     + fsr_info[FSR_FS_AEA].fn = early_abort_handler;
62     local_abt_enable();
63     - fsr_info[22].fn = do_bad;
64     + fsr_info[FSR_FS_AEA].fn = do_bad;
65     }
66    
67     #ifndef CONFIG_ARM_LPAE
68     diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
69     index 67532f242271..afc1f84e763b 100644
70     --- a/arch/arm/mm/fault.h
71     +++ b/arch/arm/mm/fault.h
72     @@ -11,11 +11,15 @@
73     #define FSR_FS5_0 (0x3f)
74    
75     #ifdef CONFIG_ARM_LPAE
76     +#define FSR_FS_AEA 17
77     +
78     static inline int fsr_fs(unsigned int fsr)
79     {
80     return fsr & FSR_FS5_0;
81     }
82     #else
83     +#define FSR_FS_AEA 22
84     +
85     static inline int fsr_fs(unsigned int fsr)
86     {
87     return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
88     diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
89     index 9e1499f98def..13f5fad21066 100644
90     --- a/arch/powerpc/include/asm/reg.h
91     +++ b/arch/powerpc/include/asm/reg.h
92     @@ -641,9 +641,10 @@
93     #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
94     #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
95     #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
96     -#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
97     +#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
98     #define SRR1_WAKESYSERR 0x00300000 /* System error */
99     #define SRR1_WAKEEE 0x00200000 /* External interrupt */
100     +#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
101     #define SRR1_WAKEMT 0x00280000 /* mtctrl */
102     #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
103     #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
104     diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
105     index f0b238516e9b..e0b9e576905a 100644
106     --- a/arch/powerpc/include/asm/xics.h
107     +++ b/arch/powerpc/include/asm/xics.h
108     @@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
109    
110     #ifdef CONFIG_PPC_POWERNV
111     extern int icp_opal_init(void);
112     +extern void icp_opal_flush_interrupt(void);
113     #else
114     static inline int icp_opal_init(void) { return -ENODEV; }
115     #endif
116     diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
117     index 3493cf4e0452..71697ff70879 100644
118     --- a/arch/powerpc/mm/tlb-radix.c
119     +++ b/arch/powerpc/mm/tlb-radix.c
120     @@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
121     for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
122     __tlbiel_pid(pid, set, ric);
123     }
124     - if (cpu_has_feature(CPU_FTR_POWER9_DD1))
125     - asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
126     - return;
127     + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
128     }
129    
130     static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
131     @@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
132     asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
133     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
134     asm volatile("ptesync": : :"memory");
135     - if (cpu_has_feature(CPU_FTR_POWER9_DD1))
136     - asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
137     }
138    
139     static inline void _tlbie_va(unsigned long va, unsigned long pid,
140     diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
141     index c789258ae1e1..eec0e8d0454d 100644
142     --- a/arch/powerpc/platforms/powernv/smp.c
143     +++ b/arch/powerpc/platforms/powernv/smp.c
144     @@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
145     wmask = SRR1_WAKEMASK_P8;
146    
147     idle_states = pnv_get_supported_cpuidle_states();
148     +
149     /* We don't want to take decrementer interrupts while we are offline,
150     - * so clear LPCR:PECE1. We keep PECE2 enabled.
151     + * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
152     + * enabled as to let IPIs in.
153     */
154     mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
155    
156     @@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
157     * contains 0.
158     */
159     if (((srr1 & wmask) == SRR1_WAKEEE) ||
160     + ((srr1 & wmask) == SRR1_WAKEHVI) ||
161     (local_paca->irq_happened & PACA_IRQ_EE)) {
162     - icp_native_flush_interrupt();
163     + if (cpu_has_feature(CPU_FTR_ARCH_300))
164     + icp_opal_flush_interrupt();
165     + else
166     + icp_native_flush_interrupt();
167     } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
168     unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
169     asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
170     @@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
171     if (srr1 && !generic_check_cpu_restart(cpu))
172     DBG("CPU%d Unexpected exit while offline !\n", cpu);
173     }
174     +
175     + /* Re-enable decrementer interrupts */
176     mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
177     DBG("CPU%d coming online...\n", cpu);
178     }
179     diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
180     index 60c57657c772..c96c0cb95d87 100644
181     --- a/arch/powerpc/sysdev/xics/icp-opal.c
182     +++ b/arch/powerpc/sysdev/xics/icp-opal.c
183     @@ -132,6 +132,35 @@ static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
184     return smp_ipi_demux();
185     }
186    
187     +/*
188     + * Called when an interrupt is received on an off-line CPU to
189     + * clear the interrupt, so that the CPU can go back to nap mode.
190     + */
191     +void icp_opal_flush_interrupt(void)
192     +{
193     + unsigned int xirr;
194     + unsigned int vec;
195     +
196     + do {
197     + xirr = icp_opal_get_xirr();
198     + vec = xirr & 0x00ffffff;
199     + if (vec == XICS_IRQ_SPURIOUS)
200     + break;
201     + if (vec == XICS_IPI) {
202     + /* Clear pending IPI */
203     + int cpu = smp_processor_id();
204     + kvmppc_set_host_ipi(cpu, 0);
205     + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
206     + } else {
207     + pr_err("XICS: hw interrupt 0x%x to offline cpu, "
208     + "disabling\n", vec);
209     + xics_mask_unknown_vec(vec);
210     + }
211     +
212     + /* EOI the interrupt */
213     + } while (opal_int_eoi(xirr) > 0);
214     +}
215     +
216     #endif /* CONFIG_SMP */
217    
218     static const struct icp_ops icp_opal_ops = {
219     diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
220     index 984a7bf17f6a..83db0eae9979 100644
221     --- a/arch/x86/include/asm/processor.h
222     +++ b/arch/x86/include/asm/processor.h
223     @@ -104,6 +104,7 @@ struct cpuinfo_x86 {
224     __u8 x86_phys_bits;
225     /* CPUID returned core id bits: */
226     __u8 x86_coreid_bits;
227     + __u8 cu_id;
228     /* Max extended CPUID function supported: */
229     __u32 extended_cpuid_level;
230     /* Maximum supported CPUID level, -1=no CPUID: */
231     diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
232     index 7249f1500bcb..d1e25564b3c1 100644
233     --- a/arch/x86/kernel/apic/io_apic.c
234     +++ b/arch/x86/kernel/apic/io_apic.c
235     @@ -1876,7 +1876,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
236     .irq_ack = irq_chip_ack_parent,
237     .irq_eoi = ioapic_ack_level,
238     .irq_set_affinity = ioapic_set_affinity,
239     - .irq_retrigger = irq_chip_retrigger_hierarchy,
240     .flags = IRQCHIP_SKIP_SET_WAKE,
241     };
242    
243     @@ -1888,7 +1887,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
244     .irq_ack = irq_chip_ack_parent,
245     .irq_eoi = ioapic_ir_ack_level,
246     .irq_set_affinity = ioapic_set_affinity,
247     - .irq_retrigger = irq_chip_retrigger_hierarchy,
248     .flags = IRQCHIP_SKIP_SET_WAKE,
249     };
250    
251     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
252     index 1d3167269a67..2b4cf04239b6 100644
253     --- a/arch/x86/kernel/cpu/amd.c
254     +++ b/arch/x86/kernel/cpu/amd.c
255     @@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
256    
257     /* get information required for multi-node processors */
258     if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
259     + u32 eax, ebx, ecx, edx;
260    
261     - node_id = cpuid_ecx(0x8000001e) & 7;
262     + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
263     +
264     + node_id = ecx & 0xff;
265     + smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
266     +
267     + if (c->x86 == 0x15)
268     + c->cu_id = ebx & 0xff;
269     +
270     + if (c->x86 >= 0x17) {
271     + c->cpu_core_id = ebx & 0xff;
272     +
273     + if (smp_num_siblings > 1)
274     + c->x86_max_cores /= smp_num_siblings;
275     + }
276    
277     /*
278     * We may have multiple LLCs if L3 caches exist, so check if we
279     diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
280     index 023c7bfa24df..4eece91ada37 100644
281     --- a/arch/x86/kernel/cpu/common.c
282     +++ b/arch/x86/kernel/cpu/common.c
283     @@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
284     c->x86_model_id[0] = '\0'; /* Unset */
285     c->x86_max_cores = 1;
286     c->x86_coreid_bits = 0;
287     + c->cu_id = 0xff;
288     #ifdef CONFIG_X86_64
289     c->x86_clflush_size = 64;
290     c->x86_phys_bits = 36;
291     diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
292     index e9bbe02950ad..36171bcd91f8 100644
293     --- a/arch/x86/kernel/smpboot.c
294     +++ b/arch/x86/kernel/smpboot.c
295     @@ -423,9 +423,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
296     int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
297    
298     if (c->phys_proc_id == o->phys_proc_id &&
299     - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
300     - c->cpu_core_id == o->cpu_core_id)
301     - return topology_sane(c, o, "smt");
302     + per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
303     + if (c->cpu_core_id == o->cpu_core_id)
304     + return topology_sane(c, o, "smt");
305     +
306     + if ((c->cu_id != 0xff) &&
307     + (o->cu_id != 0xff) &&
308     + (c->cu_id == o->cu_id))
309     + return topology_sane(c, o, "smt");
310     + }
311    
312     } else if (c->phys_proc_id == o->phys_proc_id &&
313     c->cpu_core_id == o->cpu_core_id) {
314     diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
315     index ea9c49adaa1f..8aa6bea1cd6c 100644
316     --- a/arch/x86/mm/dump_pagetables.c
317     +++ b/arch/x86/mm/dump_pagetables.c
318     @@ -15,6 +15,7 @@
319     #include <linux/debugfs.h>
320     #include <linux/mm.h>
321     #include <linux/init.h>
322     +#include <linux/sched.h>
323     #include <linux/seq_file.h>
324    
325     #include <asm/pgtable.h>
326     @@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
327     } else
328     note_page(m, &st, __pgprot(0), 1);
329    
330     + cond_resched();
331     start++;
332     }
333    
334     diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
335     index e9c0993b131d..e8817e2f0597 100644
336     --- a/crypto/algif_aead.c
337     +++ b/crypto/algif_aead.c
338     @@ -671,9 +671,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
339     unlock:
340     list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
341     af_alg_free_sg(&rsgl->sgl);
342     + list_del(&rsgl->list);
343     if (rsgl != &ctx->first_rsgl)
344     sock_kfree_s(sk, rsgl, sizeof(*rsgl));
345     - list_del(&rsgl->list);
346     }
347     INIT_LIST_HEAD(&ctx->list);
348     aead_wmem_wakeup(sk);
349     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
350     index 312c4b4dc363..6eb6733a7a5c 100644
351     --- a/drivers/acpi/nfit/core.c
352     +++ b/drivers/acpi/nfit/core.c
353     @@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
354     struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
355     struct device *dev = acpi_desc->dev;
356     struct acpi_nfit_flush_work flush;
357     + int rc;
358    
359     /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
360     device_lock(dev);
361     @@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
362     INIT_WORK_ONSTACK(&flush.work, flush_probe);
363     COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
364     queue_work(nfit_wq, &flush.work);
365     - return wait_for_completion_interruptible(&flush.cmp);
366     +
367     + rc = wait_for_completion_interruptible(&flush.cmp);
368     + cancel_work_sync(&flush.work);
369     + return rc;
370     }
371    
372     static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
373     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
374     index 4737520ec823..80fa656da5ab 100644
375     --- a/drivers/cpufreq/intel_pstate.c
376     +++ b/drivers/cpufreq/intel_pstate.c
377     @@ -820,6 +820,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
378     wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
379     }
380    
381     +#define MSR_IA32_POWER_CTL_BIT_EE 19
382     +
383     +/* Disable energy efficiency optimization */
384     +static void intel_pstate_disable_ee(int cpu)
385     +{
386     + u64 power_ctl;
387     + int ret;
388     +
389     + ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
390     + if (ret)
391     + return;
392     +
393     + if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
394     + pr_info("Disabling energy efficiency optimization\n");
395     + power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
396     + wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
397     + }
398     +}
399     +
400     static int atom_get_min_pstate(void)
401     {
402     u64 value;
403     @@ -1420,6 +1439,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
404     {}
405     };
406    
407     +static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
408     + ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
409     + {}
410     +};
411     +
412     static int intel_pstate_init_cpu(unsigned int cpunum)
413     {
414     struct cpudata *cpu;
415     @@ -1435,6 +1459,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
416     cpu->cpu = cpunum;
417    
418     if (hwp_active) {
419     + const struct x86_cpu_id *id;
420     +
421     + id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
422     + if (id)
423     + intel_pstate_disable_ee(cpunum);
424     +
425     intel_pstate_hwp_enable(cpu);
426     pid_params.sample_rate_ms = 50;
427     pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
428     diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
429     index faf3cb3ddce2..a388bf2d67f4 100644
430     --- a/drivers/crypto/ccp/ccp-dev-v5.c
431     +++ b/drivers/crypto/ccp/ccp-dev-v5.c
432     @@ -955,7 +955,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
433     static void ccp5_config(struct ccp_device *ccp)
434     {
435     /* Public side */
436     - iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
437     + iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
438     }
439    
440     static void ccp5other_config(struct ccp_device *ccp)
441     diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
442     index da5f4a678083..340aef14d616 100644
443     --- a/drivers/crypto/ccp/ccp-dev.h
444     +++ b/drivers/crypto/ccp/ccp-dev.h
445     @@ -238,6 +238,7 @@ struct ccp_dma_chan {
446     struct ccp_device *ccp;
447    
448     spinlock_t lock;
449     + struct list_head created;
450     struct list_head pending;
451     struct list_head active;
452     struct list_head complete;
453     diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
454     index 6553912804f7..e5d9278f4019 100644
455     --- a/drivers/crypto/ccp/ccp-dmaengine.c
456     +++ b/drivers/crypto/ccp/ccp-dmaengine.c
457     @@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
458     ccp_free_desc_resources(chan->ccp, &chan->complete);
459     ccp_free_desc_resources(chan->ccp, &chan->active);
460     ccp_free_desc_resources(chan->ccp, &chan->pending);
461     + ccp_free_desc_resources(chan->ccp, &chan->created);
462    
463     spin_unlock_irqrestore(&chan->lock, flags);
464     }
465     @@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
466     spin_lock_irqsave(&chan->lock, flags);
467    
468     cookie = dma_cookie_assign(tx_desc);
469     + list_del(&desc->entry);
470     list_add_tail(&desc->entry, &chan->pending);
471    
472     spin_unlock_irqrestore(&chan->lock, flags);
473     @@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
474    
475     spin_lock_irqsave(&chan->lock, sflags);
476    
477     - list_add_tail(&desc->entry, &chan->pending);
478     + list_add_tail(&desc->entry, &chan->created);
479    
480     spin_unlock_irqrestore(&chan->lock, sflags);
481    
482     @@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
483     /*TODO: Purge the complete list? */
484     ccp_free_desc_resources(chan->ccp, &chan->active);
485     ccp_free_desc_resources(chan->ccp, &chan->pending);
486     + ccp_free_desc_resources(chan->ccp, &chan->created);
487    
488     spin_unlock_irqrestore(&chan->lock, flags);
489    
490     @@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
491     chan->ccp = ccp;
492    
493     spin_lock_init(&chan->lock);
494     + INIT_LIST_HEAD(&chan->created);
495     INIT_LIST_HEAD(&chan->pending);
496     INIT_LIST_HEAD(&chan->active);
497     INIT_LIST_HEAD(&chan->complete);
498     diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
499     index fb5f9bbfa09c..6aece3f25b08 100644
500     --- a/drivers/crypto/chelsio/chcr_core.c
501     +++ b/drivers/crypto/chelsio/chcr_core.c
502     @@ -51,6 +51,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
503     int assign_chcr_device(struct chcr_dev **dev)
504     {
505     struct uld_ctx *u_ctx;
506     + int ret = -ENXIO;
507    
508     /*
509     * Which device to use if multiple devices are available TODO
510     @@ -58,15 +59,14 @@ int assign_chcr_device(struct chcr_dev **dev)
511     * must go to the same device to maintain the ordering.
512     */
513     mutex_lock(&dev_mutex); /* TODO ? */
514     - u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
515     - if (!u_ctx) {
516     - mutex_unlock(&dev_mutex);
517     - return -ENXIO;
518     + list_for_each_entry(u_ctx, &uld_ctx_list, entry)
519     + if (u_ctx && u_ctx->dev) {
520     + *dev = u_ctx->dev;
521     + ret = 0;
522     + break;
523     }
524     -
525     - *dev = u_ctx->dev;
526     mutex_unlock(&dev_mutex);
527     - return 0;
528     + return ret;
529     }
530    
531     static int chcr_dev_add(struct uld_ctx *u_ctx)
532     @@ -203,10 +203,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
533    
534     static int __init chcr_crypto_init(void)
535     {
536     - if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
537     + if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
538     pr_err("ULD register fail: No chcr crypto support in cxgb4");
539     - return -1;
540     - }
541    
542     return 0;
543     }
544     diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
545     index bc5cbc193aae..5b2d78a5b5aa 100644
546     --- a/drivers/crypto/qat/qat_c62x/adf_drv.c
547     +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
548     @@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
549     &hw_data->accel_capabilities_mask);
550    
551     /* Find and map all the device's BARS */
552     - i = 0;
553     + i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
554     bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
555     for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
556     ADF_PCI_MAX_BARS * 2) {
557     diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
558     index e8822536530b..33f0a6251e38 100644
559     --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
560     +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
561     @@ -69,6 +69,7 @@
562     #define ADF_ERRSOU5 (0x3A000 + 0xD8)
563     #define ADF_DEVICE_FUSECTL_OFFSET 0x40
564     #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
565     +#define ADF_DEVICE_FUSECTL_MASK 0x80000000
566     #define ADF_PCI_MAX_BARS 3
567     #define ADF_DEVICE_NAME_LENGTH 32
568     #define ADF_ETR_MAX_RINGS_PER_BANK 16
569     diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
570     index 1e480f140663..8c4fd255a601 100644
571     --- a/drivers/crypto/qat/qat_common/qat_hal.c
572     +++ b/drivers/crypto/qat/qat_common/qat_hal.c
573     @@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
574     unsigned int csr_val;
575     int times = 30;
576    
577     - if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
578     + if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
579     return 0;
580    
581     csr_val = ADF_CSR_RD(csr_addr, 0);
582     @@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
583     (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
584     LOCAL_TO_XFER_REG_OFFSET);
585     handle->pci_dev = pci_info->pci_dev;
586     - if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
587     + if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
588     sram_bar =
589     &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
590     handle->hal_sram_addr_v = sram_bar->virt_addr;
591     diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
592     index e6862a744210..4e19bde4bbff 100644
593     --- a/drivers/gpu/drm/drm_atomic.c
594     +++ b/drivers/gpu/drm/drm_atomic.c
595     @@ -1759,16 +1759,16 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
596    
597     if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
598     /*
599     - * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
600     - * if they weren't, this code should be called on success
601     - * for TEST_ONLY too.
602     + * Free the allocated event. drm_atomic_helper_setup_commit
603     + * can allocate an event too, so only free it if it's ours
604     + * to prevent a double free in drm_atomic_state_clear.
605     */
606     -
607     for_each_crtc_in_state(state, crtc, crtc_state, i) {
608     - if (!crtc_state->event)
609     - continue;
610     -
611     - drm_event_cancel_free(dev, &crtc_state->event->base);
612     + struct drm_pending_vblank_event *event = crtc_state->event;
613     + if (event && (event->base.fence || event->base.file_priv)) {
614     + drm_event_cancel_free(dev, &event->base);
615     + crtc_state->event = NULL;
616     + }
617     }
618     }
619    
620     diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
621     index a218c2e395e7..0c400f852a76 100644
622     --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
623     +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
624     @@ -1215,14 +1215,14 @@ validate_exec_list(struct drm_device *dev,
625     if (exec[i].offset !=
626     gen8_canonical_addr(exec[i].offset & PAGE_MASK))
627     return -EINVAL;
628     -
629     - /* From drm_mm perspective address space is continuous,
630     - * so from this point we're always using non-canonical
631     - * form internally.
632     - */
633     - exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
634     }
635    
636     + /* From drm_mm perspective address space is continuous,
637     + * so from this point we're always using non-canonical
638     + * form internally.
639     + */
640     + exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
641     +
642     if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
643     return -EINVAL;
644    
645     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
646     index 8079e5b380cb..b9be8a6141d8 100644
647     --- a/drivers/gpu/drm/i915/intel_display.c
648     +++ b/drivers/gpu/drm/i915/intel_display.c
649     @@ -4280,10 +4280,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
650     drm_crtc_vblank_put(&intel_crtc->base);
651    
652     wake_up_all(&dev_priv->pending_flip_queue);
653     - queue_work(dev_priv->wq, &work->unpin_work);
654     -
655     trace_i915_flip_complete(intel_crtc->plane,
656     work->pending_flip_obj);
657     +
658     + queue_work(dev_priv->wq, &work->unpin_work);
659     }
660    
661     static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
662     diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
663     index 1c59ca50c430..cae27c55dd99 100644
664     --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
665     +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
666     @@ -1723,7 +1723,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
667     return NULL;
668    
669     if ((encoder->type == INTEL_OUTPUT_DP ||
670     - encoder->type == INTEL_OUTPUT_EDP) &&
671     + encoder->type == INTEL_OUTPUT_EDP ||
672     + encoder->type == INTEL_OUTPUT_DP_MST) &&
673     !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
674     return NULL;
675    
676     diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
677     index 16f91c8490fe..5fb4c6d9209b 100644
678     --- a/drivers/hv/channel.c
679     +++ b/drivers/hv/channel.c
680     @@ -39,7 +39,7 @@
681     * vmbus_setevent- Trigger an event notification on the specified
682     * channel.
683     */
684     -static void vmbus_setevent(struct vmbus_channel *channel)
685     +void vmbus_setevent(struct vmbus_channel *channel)
686     {
687     struct hv_monitor_page *monitorpage;
688    
689     @@ -65,6 +65,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
690     vmbus_set_event(channel);
691     }
692     }
693     +EXPORT_SYMBOL_GPL(vmbus_setevent);
694    
695     /*
696     * vmbus_open - Open the specified channel.
697     @@ -635,8 +636,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
698     u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
699     struct kvec bufferlist[3];
700     u64 aligned_data = 0;
701     - int ret;
702     - bool signal = false;
703     bool lock = channel->acquire_ring_lock;
704     int num_vecs = ((bufferlen != 0) ? 3 : 1);
705    
706     @@ -656,33 +655,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
707     bufferlist[2].iov_base = &aligned_data;
708     bufferlist[2].iov_len = (packetlen_aligned - packetlen);
709    
710     - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
711     - &signal, lock, channel->signal_policy);
712     -
713     - /*
714     - * Signalling the host is conditional on many factors:
715     - * 1. The ring state changed from being empty to non-empty.
716     - * This is tracked by the variable "signal".
717     - * 2. The variable kick_q tracks if more data will be placed
718     - * on the ring. We will not signal if more data is
719     - * to be placed.
720     - *
721     - * Based on the channel signal state, we will decide
722     - * which signaling policy will be applied.
723     - *
724     - * If we cannot write to the ring-buffer; signal the host
725     - * even if we may not have written anything. This is a rare
726     - * enough condition that it should not matter.
727     - * NOTE: in this case, the hvsock channel is an exception, because
728     - * it looks the host side's hvsock implementation has a throttling
729     - * mechanism which can hurt the performance otherwise.
730     - */
731     -
732     - if (((ret == 0) && kick_q && signal) ||
733     - (ret && !is_hvsock_channel(channel)))
734     - vmbus_setevent(channel);
735     + return hv_ringbuffer_write(channel, bufferlist, num_vecs,
736     + lock, kick_q);
737    
738     - return ret;
739     }
740     EXPORT_SYMBOL(vmbus_sendpacket_ctl);
741    
742     @@ -723,7 +698,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
743     u32 flags,
744     bool kick_q)
745     {
746     - int ret;
747     int i;
748     struct vmbus_channel_packet_page_buffer desc;
749     u32 descsize;
750     @@ -731,7 +705,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
751     u32 packetlen_aligned;
752     struct kvec bufferlist[3];
753     u64 aligned_data = 0;
754     - bool signal = false;
755     bool lock = channel->acquire_ring_lock;
756    
757     if (pagecount > MAX_PAGE_BUFFER_COUNT)
758     @@ -769,29 +742,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
759     bufferlist[2].iov_base = &aligned_data;
760     bufferlist[2].iov_len = (packetlen_aligned - packetlen);
761    
762     - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
763     - &signal, lock, channel->signal_policy);
764     -
765     - /*
766     - * Signalling the host is conditional on many factors:
767     - * 1. The ring state changed from being empty to non-empty.
768     - * This is tracked by the variable "signal".
769     - * 2. The variable kick_q tracks if more data will be placed
770     - * on the ring. We will not signal if more data is
771     - * to be placed.
772     - *
773     - * Based on the channel signal state, we will decide
774     - * which signaling policy will be applied.
775     - *
776     - * If we cannot write to the ring-buffer; signal the host
777     - * even if we may not have written anything. This is a rare
778     - * enough condition that it should not matter.
779     - */
780     -
781     - if (((ret == 0) && kick_q && signal) || (ret))
782     - vmbus_setevent(channel);
783     -
784     - return ret;
785     + return hv_ringbuffer_write(channel, bufferlist, 3,
786     + lock, kick_q);
787     }
788     EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
789    
790     @@ -822,12 +774,10 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
791     u32 desc_size,
792     void *buffer, u32 bufferlen, u64 requestid)
793     {
794     - int ret;
795     u32 packetlen;
796     u32 packetlen_aligned;
797     struct kvec bufferlist[3];
798     u64 aligned_data = 0;
799     - bool signal = false;
800     bool lock = channel->acquire_ring_lock;
801    
802     packetlen = desc_size + bufferlen;
803     @@ -848,13 +798,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
804     bufferlist[2].iov_base = &aligned_data;
805     bufferlist[2].iov_len = (packetlen_aligned - packetlen);
806    
807     - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
808     - &signal, lock, channel->signal_policy);
809     -
810     - if (ret == 0 && signal)
811     - vmbus_setevent(channel);
812     -
813     - return ret;
814     + return hv_ringbuffer_write(channel, bufferlist, 3,
815     + lock, true);
816     }
817     EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
818    
819     @@ -866,14 +811,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
820     struct hv_multipage_buffer *multi_pagebuffer,
821     void *buffer, u32 bufferlen, u64 requestid)
822     {
823     - int ret;
824     struct vmbus_channel_packet_multipage_buffer desc;
825     u32 descsize;
826     u32 packetlen;
827     u32 packetlen_aligned;
828     struct kvec bufferlist[3];
829     u64 aligned_data = 0;
830     - bool signal = false;
831     bool lock = channel->acquire_ring_lock;
832     u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
833     multi_pagebuffer->len);
834     @@ -913,13 +856,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
835     bufferlist[2].iov_base = &aligned_data;
836     bufferlist[2].iov_len = (packetlen_aligned - packetlen);
837    
838     - ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
839     - &signal, lock, channel->signal_policy);
840     -
841     - if (ret == 0 && signal)
842     - vmbus_setevent(channel);
843     -
844     - return ret;
845     + return hv_ringbuffer_write(channel, bufferlist, 3,
846     + lock, true);
847     }
848     EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
849    
850     @@ -941,16 +879,9 @@ __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
851     u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
852     bool raw)
853     {
854     - int ret;
855     - bool signal = false;
856     + return hv_ringbuffer_read(channel, buffer, bufferlen,
857     + buffer_actual_len, requestid, raw);
858    
859     - ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
860     - buffer_actual_len, requestid, &signal, raw);
861     -
862     - if (signal)
863     - vmbus_setevent(channel);
864     -
865     - return ret;
866     }
867    
868     int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
869     diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
870     index 1bc1d4795243..caf341842464 100644
871     --- a/drivers/hv/channel_mgmt.c
872     +++ b/drivers/hv/channel_mgmt.c
873     @@ -449,8 +449,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
874     }
875    
876     dev_type = hv_get_dev_type(newchannel);
877     - if (dev_type == HV_NIC)
878     - set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
879    
880     init_vp_index(newchannel, dev_type);
881    
882     diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
883     index a5b4442433c8..2b13f2a0a71e 100644
884     --- a/drivers/hv/hyperv_vmbus.h
885     +++ b/drivers/hv/hyperv_vmbus.h
886     @@ -527,14 +527,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
887    
888     void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
889    
890     -int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
891     +int hv_ringbuffer_write(struct vmbus_channel *channel,
892     struct kvec *kv_list,
893     - u32 kv_count, bool *signal, bool lock,
894     - enum hv_signal_policy policy);
895     + u32 kv_count, bool lock,
896     + bool kick_q);
897    
898     -int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
899     +int hv_ringbuffer_read(struct vmbus_channel *channel,
900     void *buffer, u32 buflen, u32 *buffer_actual_len,
901     - u64 *requestid, bool *signal, bool raw);
902     + u64 *requestid, bool raw);
903    
904     void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
905     struct hv_ring_buffer_debug_info *debug_info);
906     diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
907     index 08043da1a61c..308dbda700eb 100644
908     --- a/drivers/hv/ring_buffer.c
909     +++ b/drivers/hv/ring_buffer.c
910     @@ -66,21 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
911     * once the ring buffer is empty, it will clear the
912     * interrupt_mask and re-check to see if new data has
913     * arrived.
914     + *
915     + * KYS: Oct. 30, 2016:
916     + * It looks like Windows hosts have logic to deal with DOS attacks that
917     + * can be triggered if it receives interrupts when it is not expecting
918     + * the interrupt. The host expects interrupts only when the ring
919     + * transitions from empty to non-empty (or full to non full on the guest
920     + * to host ring).
921     + * So, base the signaling decision solely on the ring state until the
922     + * host logic is fixed.
923     */
924    
925     -static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
926     - enum hv_signal_policy policy)
927     +static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
928     + bool kick_q)
929     {
930     + struct hv_ring_buffer_info *rbi = &channel->outbound;
931     +
932     virt_mb();
933     if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
934     - return false;
935     -
936     - /*
937     - * When the client wants to control signaling,
938     - * we only honour the host interrupt mask.
939     - */
940     - if (policy == HV_SIGNAL_POLICY_EXPLICIT)
941     - return true;
942     + return;
943    
944     /* check interrupt_mask before read_index */
945     virt_rmb();
946     @@ -89,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
947     * ring transitions from being empty to non-empty.
948     */
949     if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
950     - return true;
951     + vmbus_setevent(channel);
952    
953     - return false;
954     + return;
955     }
956    
957     /* Get the next write location for the specified ring buffer. */
958     @@ -280,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
959     }
960    
961     /* Write to the ring buffer. */
962     -int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
963     - struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
964     - enum hv_signal_policy policy)
965     +int hv_ringbuffer_write(struct vmbus_channel *channel,
966     + struct kvec *kv_list, u32 kv_count, bool lock,
967     + bool kick_q)
968     {
969     int i = 0;
970     u32 bytes_avail_towrite;
971     @@ -292,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
972     u32 old_write;
973     u64 prev_indices = 0;
974     unsigned long flags = 0;
975     + struct hv_ring_buffer_info *outring_info = &channel->outbound;
976    
977     for (i = 0; i < kv_count; i++)
978     totalbytes_towrite += kv_list[i].iov_len;
979     @@ -344,13 +349,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
980     if (lock)
981     spin_unlock_irqrestore(&outring_info->ring_lock, flags);
982    
983     - *signal = hv_need_to_signal(old_write, outring_info, policy);
984     + hv_signal_on_write(old_write, channel, kick_q);
985     return 0;
986     }
987    
988     -int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
989     +int hv_ringbuffer_read(struct vmbus_channel *channel,
990     void *buffer, u32 buflen, u32 *buffer_actual_len,
991     - u64 *requestid, bool *signal, bool raw)
992     + u64 *requestid, bool raw)
993     {
994     u32 bytes_avail_toread;
995     u32 next_read_location = 0;
996     @@ -359,6 +364,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
997     u32 offset;
998     u32 packetlen;
999     int ret = 0;
1000     + struct hv_ring_buffer_info *inring_info = &channel->inbound;
1001    
1002     if (buflen <= 0)
1003     return -EINVAL;
1004     @@ -377,6 +383,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
1005     return ret;
1006     }
1007    
1008     + init_cached_read_index(channel);
1009     next_read_location = hv_get_next_read_location(inring_info);
1010     next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
1011     sizeof(desc),
1012     @@ -416,7 +423,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
1013     /* Update the read index */
1014     hv_set_next_read_location(inring_info, next_read_location);
1015    
1016     - *signal = hv_need_to_signal_on_read(inring_info);
1017     + hv_signal_on_read(channel);
1018    
1019     return ret;
1020     }
1021     diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
1022     index 1869152f1d23..9b732c5f89e1 100644
1023     --- a/drivers/infiniband/sw/rxe/rxe_mr.c
1024     +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
1025     @@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
1026    
1027     case RXE_MEM_TYPE_MR:
1028     case RXE_MEM_TYPE_FMR:
1029     - return ((iova < mem->iova) ||
1030     - ((iova + length) > (mem->iova + mem->length))) ?
1031     - -EFAULT : 0;
1032     + if (iova < mem->iova ||
1033     + length > mem->length ||
1034     + iova > mem->iova + mem->length - length)
1035     + return -EFAULT;
1036     + return 0;
1037    
1038     default:
1039     return -EFAULT;
1040     diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
1041     index dd3d88adc003..ccf624763565 100644
1042     --- a/drivers/infiniband/sw/rxe/rxe_resp.c
1043     +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
1044     @@ -472,7 +472,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
1045     goto err2;
1046     }
1047    
1048     - resid = mtu;
1049     + qp->resp.resid = mtu;
1050     } else {
1051     if (pktlen != resid) {
1052     state = RESPST_ERR_LENGTH;
1053     diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1054     index 92595b98e7ed..022be0e22eba 100644
1055     --- a/drivers/input/misc/uinput.c
1056     +++ b/drivers/input/misc/uinput.c
1057     @@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev)
1058     return -EINVAL;
1059     }
1060    
1061     - if (test_bit(ABS_MT_SLOT, dev->absbit)) {
1062     - nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
1063     - error = input_mt_init_slots(dev, nslot, 0);
1064     - if (error)
1065     + if (test_bit(EV_ABS, dev->evbit)) {
1066     + input_alloc_absinfo(dev);
1067     + if (!dev->absinfo) {
1068     + error = -EINVAL;
1069     goto fail1;
1070     - } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1071     - input_set_events_per_packet(dev, 60);
1072     + }
1073     +
1074     + if (test_bit(ABS_MT_SLOT, dev->absbit)) {
1075     + nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
1076     + error = input_mt_init_slots(dev, nslot, 0);
1077     + if (error)
1078     + goto fail1;
1079     + } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1080     + input_set_events_per_packet(dev, 60);
1081     + }
1082     }
1083    
1084     if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
1085     diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
1086     index 31a89c8832c0..2c965424d383 100644
1087     --- a/drivers/md/dm-rq.c
1088     +++ b/drivers/md/dm-rq.c
1089     @@ -804,6 +804,10 @@ static void dm_old_request_fn(struct request_queue *q)
1090     int srcu_idx;
1091     struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1092    
1093     + if (unlikely(!map)) {
1094     + dm_put_live_table(md, srcu_idx);
1095     + return;
1096     + }
1097     ti = dm_table_find_target(map, pos);
1098     dm_put_live_table(md, srcu_idx);
1099     }
1100     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1101     index 878950a42e6c..2cf8b1d82d6a 100644
1102     --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1103     +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1104     @@ -1007,9 +1007,7 @@
1105    
1106     static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
1107     {
1108     - u8 __iomem *reg_addr = ACCESS_ONCE(base);
1109     -
1110     - writel(value, reg_addr + reg);
1111     + writel(value, base + reg);
1112     }
1113    
1114     #define dsaf_write_dev(a, reg, value) \
1115     @@ -1017,9 +1015,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
1116    
1117     static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
1118     {
1119     - u8 __iomem *reg_addr = ACCESS_ONCE(base);
1120     -
1121     - return readl(reg_addr + reg);
1122     + return readl(base + reg);
1123     }
1124    
1125     static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
1126     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1127     index 27ff401cec20..51c6a57ca873 100644
1128     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1129     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1130     @@ -991,6 +991,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1131     {
1132     struct mlx5e_priv *priv = netdev_priv(dev);
1133     int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1134     + bool hash_changed = false;
1135     void *in;
1136    
1137     if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
1138     @@ -1012,14 +1013,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1139     mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1140     }
1141    
1142     - if (key)
1143     + if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
1144     + hfunc != priv->params.rss_hfunc) {
1145     + priv->params.rss_hfunc = hfunc;
1146     + hash_changed = true;
1147     + }
1148     +
1149     + if (key) {
1150     memcpy(priv->params.toeplitz_hash_key, key,
1151     sizeof(priv->params.toeplitz_hash_key));
1152     + hash_changed = hash_changed ||
1153     + priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
1154     + }
1155    
1156     - if (hfunc != ETH_RSS_HASH_NO_CHANGE)
1157     - priv->params.rss_hfunc = hfunc;
1158     -
1159     - mlx5e_modify_tirs_hash(priv, in, inlen);
1160     + if (hash_changed)
1161     + mlx5e_modify_tirs_hash(priv, in, inlen);
1162    
1163     mutex_unlock(&priv->state_lock);
1164    
1165     diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
1166     index 720b5fa9e625..c2ac39a940f7 100644
1167     --- a/drivers/net/hyperv/netvsc.c
1168     +++ b/drivers/net/hyperv/netvsc.c
1169     @@ -1288,6 +1288,9 @@ void netvsc_channel_cb(void *context)
1170     ndev = hv_get_drvdata(device);
1171     buffer = get_per_channel_state(channel);
1172    
1173     + /* commit_rd_index() -> hv_signal_on_read() needs this. */
1174     + init_cached_read_index(channel);
1175     +
1176     do {
1177     desc = get_next_pkt_raw(channel);
1178     if (desc != NULL) {
1179     @@ -1340,6 +1343,9 @@ void netvsc_channel_cb(void *context)
1180    
1181     bufferlen = bytes_recvd;
1182     }
1183     +
1184     + init_cached_read_index(channel);
1185     +
1186     } while (1);
1187    
1188     if (bufferlen > NETVSC_PACKET_SIZE)
1189     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
1190     index 8b6e37ce3f66..20bfb373dcd6 100644
1191     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
1192     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
1193     @@ -96,7 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
1194     struct rtl_priv *rtlpriv = rtl_priv(hw);
1195     struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1196     struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1197     - char *fw_name = "rtlwifi/rtl8192cfwU.bin";
1198     + char *fw_name;
1199    
1200     rtl8192ce_bt_reg_init(hw);
1201    
1202     @@ -168,8 +168,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
1203     }
1204    
1205     /* request fw */
1206     - if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
1207     + if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
1208     + !IS_92C_SERIAL(rtlhal->version))
1209     + fw_name = "rtlwifi/rtl8192cfwU.bin";
1210     + else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
1211     fw_name = "rtlwifi/rtl8192cfwU_B.bin";
1212     + else
1213     + fw_name = "rtlwifi/rtl8192cfw.bin";
1214    
1215     rtlpriv->max_fw_size = 0x4000;
1216     pr_info("Using firmware %s\n", fw_name);
1217     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1218     index bf2744e1e3db..0cdcb2169083 100644
1219     --- a/drivers/net/xen-netfront.c
1220     +++ b/drivers/net/xen-netfront.c
1221     @@ -1397,6 +1397,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1222     for (i = 0; i < num_queues && info->queues; ++i) {
1223     struct netfront_queue *queue = &info->queues[i];
1224    
1225     + del_timer_sync(&queue->rx_refill_timer);
1226     +
1227     if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1228     unbind_from_irqhandler(queue->tx_irq, queue);
1229     if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1230     @@ -1751,7 +1753,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
1231    
1232     if (netif_running(info->netdev))
1233     napi_disable(&queue->napi);
1234     - del_timer_sync(&queue->rx_refill_timer);
1235     netif_napi_del(&queue->napi);
1236     }
1237    
1238     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
1239     index 1480734c2d6e..aefca644219b 100644
1240     --- a/drivers/nvdimm/namespace_devs.c
1241     +++ b/drivers/nvdimm/namespace_devs.c
1242     @@ -962,8 +962,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1243     struct nvdimm_drvdata *ndd;
1244     struct nd_label_id label_id;
1245     u32 flags = 0, remainder;
1246     + int rc, i, id = -1;
1247     u8 *uuid = NULL;
1248     - int rc, i;
1249    
1250     if (dev->driver || ndns->claim)
1251     return -EBUSY;
1252     @@ -972,11 +972,13 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1253     struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1254    
1255     uuid = nspm->uuid;
1256     + id = nspm->id;
1257     } else if (is_namespace_blk(dev)) {
1258     struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1259    
1260     uuid = nsblk->uuid;
1261     flags = NSLABEL_FLAG_LOCAL;
1262     + id = nsblk->id;
1263     }
1264    
1265     /*
1266     @@ -1039,10 +1041,11 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1267    
1268     /*
1269     * Try to delete the namespace if we deleted all of its
1270     - * allocation, this is not the seed device for the region, and
1271     - * it is not actively claimed by a btt instance.
1272     + * allocation, this is not the seed or 0th device for the
1273     + * region, and it is not actively claimed by a btt, pfn, or dax
1274     + * instance.
1275     */
1276     - if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
1277     + if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1278     nd_device_unregister(dev, ND_ASYNC);
1279    
1280     return rc;
1281     diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
1282     index a2ac9e641aa9..6c033c9a2f06 100644
1283     --- a/drivers/nvdimm/pfn_devs.c
1284     +++ b/drivers/nvdimm/pfn_devs.c
1285     @@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
1286     size = resource_size(&nsio->res);
1287     npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
1288     if (nd_pfn->mode == PFN_MODE_PMEM) {
1289     - unsigned long memmap_size;
1290     -
1291     /*
1292     * vmemmap_populate_hugepages() allocates the memmap array in
1293     * HPAGE_SIZE chunks.
1294     */
1295     - memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
1296     - offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
1297     - nd_pfn->align) - start;
1298     + offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
1299     + max(nd_pfn->align, HPAGE_SIZE)) - start;
1300     } else if (nd_pfn->mode == PFN_MODE_RAM)
1301     offset = ALIGN(start + SZ_8K + dax_label_reserve,
1302     nd_pfn->align) - start;
1303     diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1304     index 75f820ca17b7..27ff38f839fc 100644
1305     --- a/drivers/s390/scsi/zfcp_fsf.c
1306     +++ b/drivers/s390/scsi/zfcp_fsf.c
1307     @@ -1583,7 +1583,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1308     int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1309     {
1310     struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1311     - struct zfcp_fsf_req *req = NULL;
1312     + struct zfcp_fsf_req *req;
1313     int retval = -EIO;
1314    
1315     spin_lock_irq(&qdio->req_q_lock);
1316     @@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1317     zfcp_fsf_req_free(req);
1318     out:
1319     spin_unlock_irq(&qdio->req_q_lock);
1320     - if (req && !IS_ERR(req))
1321     + if (!retval)
1322     zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
1323     return retval;
1324     }
1325     @@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1326     int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1327     {
1328     struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1329     - struct zfcp_fsf_req *req = NULL;
1330     + struct zfcp_fsf_req *req;
1331     int retval = -EIO;
1332    
1333     spin_lock_irq(&qdio->req_q_lock);
1334     @@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1335     zfcp_fsf_req_free(req);
1336     out:
1337     spin_unlock_irq(&qdio->req_q_lock);
1338     - if (req && !IS_ERR(req))
1339     + if (!retval)
1340     zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
1341     return retval;
1342     }
1343     diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
1344     index 341ea327ae79..792d3e7e35e2 100644
1345     --- a/drivers/scsi/aacraid/comminit.c
1346     +++ b/drivers/scsi/aacraid/comminit.c
1347     @@ -50,9 +50,13 @@ struct aac_common aac_config = {
1348    
1349     static inline int aac_is_msix_mode(struct aac_dev *dev)
1350     {
1351     - u32 status;
1352     + u32 status = 0;
1353    
1354     - status = src_readl(dev, MUnit.OMR);
1355     + if (dev->pdev->device == PMC_DEVICE_S6 ||
1356     + dev->pdev->device == PMC_DEVICE_S7 ||
1357     + dev->pdev->device == PMC_DEVICE_S8) {
1358     + status = src_readl(dev, MUnit.OMR);
1359     + }
1360     return (status & AAC_INT_MODE_MSIX);
1361     }
1362    
1363     diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1364     index e3b911c895b4..91dfd58b175d 100644
1365     --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1366     +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1367     @@ -3929,6 +3929,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
1368     static const struct target_core_fabric_ops ibmvscsis_ops = {
1369     .module = THIS_MODULE,
1370     .name = "ibmvscsis",
1371     + .max_data_sg_nents = MAX_TXU / PAGE_SIZE,
1372     .get_fabric_name = ibmvscsis_get_fabric_name,
1373     .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
1374     .tpg_get_tag = ibmvscsis_get_tag,
1375     diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1376     index f84a6087cebd..8a7941b8189f 100644
1377     --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1378     +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1379     @@ -51,6 +51,7 @@
1380     #include <linux/workqueue.h>
1381     #include <linux/delay.h>
1382     #include <linux/pci.h>
1383     +#include <linux/pci-aspm.h>
1384     #include <linux/interrupt.h>
1385     #include <linux/aer.h>
1386     #include <linux/raid_class.h>
1387     @@ -8706,6 +8707,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1388    
1389     switch (hba_mpi_version) {
1390     case MPI2_VERSION:
1391     + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
1392     + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
1393     /* Use mpt2sas driver host template for SAS 2.0 HBA's */
1394     shost = scsi_host_alloc(&mpt2sas_driver_template,
1395     sizeof(struct MPT3SAS_ADAPTER));
1396     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1397     index 078d797cb492..bea819e5336d 100644
1398     --- a/drivers/scsi/qla2xxx/qla_os.c
1399     +++ b/drivers/scsi/qla2xxx/qla_os.c
1400     @@ -1459,7 +1459,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1401     /* Don't abort commands in adapter during EEH
1402     * recovery as it's not accessible/responding.
1403     */
1404     - if (!ha->flags.eeh_busy) {
1405     + if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
1406     /* Get a reference to the sp and drop the lock.
1407     * The reference ensures this sp->done() call
1408     * - and not the call in qla2xxx_eh_abort() -
1409     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1410     index 6b423485c5d6..ea9617c7b403 100644
1411     --- a/drivers/target/target_core_device.c
1412     +++ b/drivers/target/target_core_device.c
1413     @@ -351,7 +351,15 @@ int core_enable_device_list_for_node(
1414     kfree(new);
1415     return -EINVAL;
1416     }
1417     - BUG_ON(orig->se_lun_acl != NULL);
1418     + if (orig->se_lun_acl != NULL) {
1419     + pr_warn_ratelimited("Detected existing explicit"
1420     + " se_lun_acl->se_lun_group reference for %s"
1421     + " mapped_lun: %llu, failing\n",
1422     + nacl->initiatorname, mapped_lun);
1423     + mutex_unlock(&nacl->lun_entry_mutex);
1424     + kfree(new);
1425     + return -EINVAL;
1426     + }
1427    
1428     rcu_assign_pointer(new->se_lun, lun);
1429     rcu_assign_pointer(new->se_lun_acl, lun_acl);
1430     diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
1431     index 04f616b3ba0a..aabd6602da6c 100644
1432     --- a/drivers/target/target_core_sbc.c
1433     +++ b/drivers/target/target_core_sbc.c
1434     @@ -450,6 +450,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1435     int *post_ret)
1436     {
1437     struct se_device *dev = cmd->se_dev;
1438     + sense_reason_t ret = TCM_NO_SENSE;
1439    
1440     /*
1441     * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
1442     @@ -457,9 +458,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1443     * sent to the backend driver.
1444     */
1445     spin_lock_irq(&cmd->t_state_lock);
1446     - if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
1447     + if (cmd->transport_state & CMD_T_SENT) {
1448     cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
1449     *post_ret = 1;
1450     +
1451     + if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
1452     + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1453     }
1454     spin_unlock_irq(&cmd->t_state_lock);
1455    
1456     @@ -469,7 +473,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1457     */
1458     up(&dev->caw_sem);
1459    
1460     - return TCM_NO_SENSE;
1461     + return ret;
1462     }
1463    
1464     static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
1465     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
1466     index 7dfefd66df93..767d1eb6e035 100644
1467     --- a/drivers/target/target_core_transport.c
1468     +++ b/drivers/target/target_core_transport.c
1469     @@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref)
1470     {
1471     struct se_node_acl *nacl = container_of(kref,
1472     struct se_node_acl, acl_kref);
1473     + struct se_portal_group *se_tpg = nacl->se_tpg;
1474    
1475     - complete(&nacl->acl_free_comp);
1476     + if (!nacl->dynamic_stop) {
1477     + complete(&nacl->acl_free_comp);
1478     + return;
1479     + }
1480     +
1481     + mutex_lock(&se_tpg->acl_node_mutex);
1482     + list_del(&nacl->acl_list);
1483     + mutex_unlock(&se_tpg->acl_node_mutex);
1484     +
1485     + core_tpg_wait_for_nacl_pr_ref(nacl);
1486     + core_free_device_list_for_node(nacl, se_tpg);
1487     + kfree(nacl);
1488     }
1489    
1490     void target_put_nacl(struct se_node_acl *nacl)
1491     @@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
1492     void transport_free_session(struct se_session *se_sess)
1493     {
1494     struct se_node_acl *se_nacl = se_sess->se_node_acl;
1495     +
1496     /*
1497     * Drop the se_node_acl->nacl_kref obtained from within
1498     * core_tpg_get_initiator_node_acl().
1499     */
1500     if (se_nacl) {
1501     + struct se_portal_group *se_tpg = se_nacl->se_tpg;
1502     + const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
1503     + unsigned long flags;
1504     +
1505     se_sess->se_node_acl = NULL;
1506     +
1507     + /*
1508     + * Also determine if we need to drop the extra ->cmd_kref if
1509     + * it had been previously dynamically generated, and
1510     + * the endpoint is not caching dynamic ACLs.
1511     + */
1512     + mutex_lock(&se_tpg->acl_node_mutex);
1513     + if (se_nacl->dynamic_node_acl &&
1514     + !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
1515     + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
1516     + if (list_empty(&se_nacl->acl_sess_list))
1517     + se_nacl->dynamic_stop = true;
1518     + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
1519     +
1520     + if (se_nacl->dynamic_stop)
1521     + list_del(&se_nacl->acl_list);
1522     + }
1523     + mutex_unlock(&se_tpg->acl_node_mutex);
1524     +
1525     + if (se_nacl->dynamic_stop)
1526     + target_put_nacl(se_nacl);
1527     +
1528     target_put_nacl(se_nacl);
1529     }
1530     if (se_sess->sess_cmd_map) {
1531     @@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session);
1532     void transport_deregister_session(struct se_session *se_sess)
1533     {
1534     struct se_portal_group *se_tpg = se_sess->se_tpg;
1535     - const struct target_core_fabric_ops *se_tfo;
1536     - struct se_node_acl *se_nacl;
1537     unsigned long flags;
1538     - bool drop_nacl = false;
1539    
1540     if (!se_tpg) {
1541     transport_free_session(se_sess);
1542     return;
1543     }
1544     - se_tfo = se_tpg->se_tpg_tfo;
1545    
1546     spin_lock_irqsave(&se_tpg->session_lock, flags);
1547     list_del(&se_sess->sess_list);
1548     @@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess)
1549     se_sess->fabric_sess_ptr = NULL;
1550     spin_unlock_irqrestore(&se_tpg->session_lock, flags);
1551    
1552     - /*
1553     - * Determine if we need to do extra work for this initiator node's
1554     - * struct se_node_acl if it had been previously dynamically generated.
1555     - */
1556     - se_nacl = se_sess->se_node_acl;
1557     -
1558     - mutex_lock(&se_tpg->acl_node_mutex);
1559     - if (se_nacl && se_nacl->dynamic_node_acl) {
1560     - if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
1561     - list_del(&se_nacl->acl_list);
1562     - drop_nacl = true;
1563     - }
1564     - }
1565     - mutex_unlock(&se_tpg->acl_node_mutex);
1566     -
1567     - if (drop_nacl) {
1568     - core_tpg_wait_for_nacl_pr_ref(se_nacl);
1569     - core_free_device_list_for_node(se_nacl, se_tpg);
1570     - se_sess->se_node_acl = NULL;
1571     - kfree(se_nacl);
1572     - }
1573     pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
1574     se_tpg->se_tpg_tfo->get_fabric_name());
1575     /*
1576     * If last kref is dropping now for an explicit NodeACL, awake sleeping
1577     * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
1578     * removal context from within transport_free_session() code.
1579     + *
1580     + * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
1581     + * to release all remaining generate_node_acl=1 created ACL resources.
1582     */
1583    
1584     transport_free_session(se_sess);
1585     @@ -3086,7 +3103,6 @@ static void target_tmr_work(struct work_struct *work)
1586     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1587     goto check_stop;
1588     }
1589     - cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
1590     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1591    
1592     cmd->se_tfo->queue_tm_rsp(cmd);
1593     @@ -3099,11 +3115,25 @@ int transport_generic_handle_tmr(
1594     struct se_cmd *cmd)
1595     {
1596     unsigned long flags;
1597     + bool aborted = false;
1598    
1599     spin_lock_irqsave(&cmd->t_state_lock, flags);
1600     - cmd->transport_state |= CMD_T_ACTIVE;
1601     + if (cmd->transport_state & CMD_T_ABORTED) {
1602     + aborted = true;
1603     + } else {
1604     + cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
1605     + cmd->transport_state |= CMD_T_ACTIVE;
1606     + }
1607     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1608    
1609     + if (aborted) {
1610     + pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
1611     + "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
1612     + cmd->se_tmr_req->ref_task_tag, cmd->tag);
1613     + transport_cmd_check_stop_to_fabric(cmd);
1614     + return 0;
1615     + }
1616     +
1617     INIT_WORK(&cmd->work, target_tmr_work);
1618     queue_work(cmd->se_dev->tmr_wq, &cmd->work);
1619     return 0;
1620     diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
1621     index 094a1440eacb..18848ba8d2ba 100644
1622     --- a/drivers/target/target_core_xcopy.c
1623     +++ b/drivers/target/target_core_xcopy.c
1624     @@ -836,7 +836,7 @@ static void target_xcopy_do_work(struct work_struct *work)
1625     " CHECK_CONDITION -> sending response\n", rc);
1626     ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1627     }
1628     - target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
1629     + target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
1630     }
1631    
1632     sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
1633     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1634     index 7acbd2cf6192..1782804f6c26 100644
1635     --- a/fs/btrfs/ioctl.c
1636     +++ b/fs/btrfs/ioctl.c
1637     @@ -5648,6 +5648,10 @@ long btrfs_ioctl(struct file *file, unsigned int
1638     #ifdef CONFIG_COMPAT
1639     long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1640     {
1641     + /*
1642     + * These all access 32-bit values anyway so no further
1643     + * handling is necessary.
1644     + */
1645     switch (cmd) {
1646     case FS_IOC32_GETFLAGS:
1647     cmd = FS_IOC_GETFLAGS;
1648     @@ -5658,8 +5662,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1649     case FS_IOC32_GETVERSION:
1650     cmd = FS_IOC_GETVERSION;
1651     break;
1652     - default:
1653     - return -ENOIOCTLCMD;
1654     }
1655    
1656     return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1657     diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
1658     index da7fbf1cdd56..fa3b155ce7e1 100644
1659     --- a/include/linux/cpumask.h
1660     +++ b/include/linux/cpumask.h
1661     @@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
1662     static inline int cpumask_parse_user(const char __user *buf, int len,
1663     struct cpumask *dstp)
1664     {
1665     - return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
1666     + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
1667     }
1668    
1669     /**
1670     @@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
1671     struct cpumask *dstp)
1672     {
1673     return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
1674     - nr_cpu_ids);
1675     + nr_cpumask_bits);
1676     }
1677    
1678     /**
1679     @@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
1680     char *nl = strchr(buf, '\n');
1681     unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
1682    
1683     - return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
1684     + return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
1685     }
1686    
1687     /**
1688     @@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
1689     */
1690     static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
1691     {
1692     - return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
1693     + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
1694     }
1695    
1696     /**
1697     diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
1698     index cd184bdca58f..c92a083bcf16 100644
1699     --- a/include/linux/hyperv.h
1700     +++ b/include/linux/hyperv.h
1701     @@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
1702     u32 ring_data_startoffset;
1703     u32 priv_write_index;
1704     u32 priv_read_index;
1705     + u32 cached_read_index;
1706     };
1707    
1708     /*
1709     @@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
1710     return write;
1711     }
1712    
1713     +static inline u32 hv_get_cached_bytes_to_write(
1714     + const struct hv_ring_buffer_info *rbi)
1715     +{
1716     + u32 read_loc, write_loc, dsize, write;
1717     +
1718     + dsize = rbi->ring_datasize;
1719     + read_loc = rbi->cached_read_index;
1720     + write_loc = rbi->ring_buffer->write_index;
1721     +
1722     + write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
1723     + read_loc - write_loc;
1724     + return write;
1725     +}
1726     /*
1727     * VMBUS version is 32 bit entity broken up into
1728     * two 16 bit quantities: major_number. minor_number.
1729     @@ -1447,6 +1461,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel);
1730    
1731     void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1732    
1733     +void vmbus_setevent(struct vmbus_channel *channel);
1734     /*
1735     * Negotiated version with the Host.
1736     */
1737     @@ -1479,10 +1494,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1738     * there is room for the producer to send the pending packet.
1739     */
1740    
1741     -static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
1742     +static inline void hv_signal_on_read(struct vmbus_channel *channel)
1743     {
1744     - u32 cur_write_sz;
1745     + u32 cur_write_sz, cached_write_sz;
1746     u32 pending_sz;
1747     + struct hv_ring_buffer_info *rbi = &channel->inbound;
1748    
1749     /*
1750     * Issue a full memory barrier before making the signaling decision.
1751     @@ -1500,14 +1516,26 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
1752     pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
1753     /* If the other end is not blocked on write don't bother. */
1754     if (pending_sz == 0)
1755     - return false;
1756     + return;
1757    
1758     cur_write_sz = hv_get_bytes_to_write(rbi);
1759    
1760     - if (cur_write_sz >= pending_sz)
1761     - return true;
1762     + if (cur_write_sz < pending_sz)
1763     + return;
1764     +
1765     + cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1766     + if (cached_write_sz < pending_sz)
1767     + vmbus_setevent(channel);
1768     +
1769     + return;
1770     +}
1771     +
1772     +static inline void
1773     +init_cached_read_index(struct vmbus_channel *channel)
1774     +{
1775     + struct hv_ring_buffer_info *rbi = &channel->inbound;
1776    
1777     - return false;
1778     + rbi->cached_read_index = rbi->ring_buffer->read_index;
1779     }
1780    
1781     /*
1782     @@ -1571,6 +1599,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
1783     * This call commits the read index and potentially signals the host.
1784     * Here is the pattern for using the "in-place" consumption APIs:
1785     *
1786     + * init_cached_read_index();
1787     + *
1788     * while (get_next_pkt_raw() {
1789     * process the packet "in-place";
1790     * put_pkt_raw();
1791     @@ -1589,8 +1619,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel)
1792     virt_rmb();
1793     ring_info->ring_buffer->read_index = ring_info->priv_read_index;
1794    
1795     - if (hv_need_to_signal_on_read(ring_info))
1796     - vmbus_set_event(channel);
1797     + hv_signal_on_read(channel);
1798     }
1799    
1800    
1801     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
1802     index c2119008990a..48bc1ac1da43 100644
1803     --- a/include/target/target_core_base.h
1804     +++ b/include/target/target_core_base.h
1805     @@ -538,6 +538,7 @@ struct se_node_acl {
1806     char initiatorname[TRANSPORT_IQN_LEN];
1807     /* Used to signal demo mode created ACL, disabled by default */
1808     bool dynamic_node_acl;
1809     + bool dynamic_stop;
1810     u32 queue_depth;
1811     u32 acl_index;
1812     enum target_prot_type saved_prot_type;
1813     diff --git a/kernel/events/core.c b/kernel/events/core.c
1814     index b1cfd7416db0..4b3323151a2f 100644
1815     --- a/kernel/events/core.c
1816     +++ b/kernel/events/core.c
1817     @@ -3461,14 +3461,15 @@ struct perf_read_data {
1818     int ret;
1819     };
1820    
1821     -static int find_cpu_to_read(struct perf_event *event, int local_cpu)
1822     +static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
1823     {
1824     - int event_cpu = event->oncpu;
1825     u16 local_pkg, event_pkg;
1826    
1827     if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
1828     - event_pkg = topology_physical_package_id(event_cpu);
1829     - local_pkg = topology_physical_package_id(local_cpu);
1830     + int local_cpu = smp_processor_id();
1831     +
1832     + event_pkg = topology_physical_package_id(event_cpu);
1833     + local_pkg = topology_physical_package_id(local_cpu);
1834    
1835     if (event_pkg == local_pkg)
1836     return local_cpu;
1837     @@ -3598,7 +3599,7 @@ u64 perf_event_read_local(struct perf_event *event)
1838    
1839     static int perf_event_read(struct perf_event *event, bool group)
1840     {
1841     - int ret = 0, cpu_to_read, local_cpu;
1842     + int event_cpu, ret = 0;
1843    
1844     /*
1845     * If event is enabled and currently active on a CPU, update the
1846     @@ -3611,21 +3612,25 @@ static int perf_event_read(struct perf_event *event, bool group)
1847     .ret = 0,
1848     };
1849    
1850     - local_cpu = get_cpu();
1851     - cpu_to_read = find_cpu_to_read(event, local_cpu);
1852     - put_cpu();
1853     + event_cpu = READ_ONCE(event->oncpu);
1854     + if ((unsigned)event_cpu >= nr_cpu_ids)
1855     + return 0;
1856     +
1857     + preempt_disable();
1858     + event_cpu = __perf_event_read_cpu(event, event_cpu);
1859    
1860     /*
1861     * Purposely ignore the smp_call_function_single() return
1862     * value.
1863     *
1864     - * If event->oncpu isn't a valid CPU it means the event got
1865     + * If event_cpu isn't a valid CPU it means the event got
1866     * scheduled out and that will have updated the event count.
1867     *
1868     * Therefore, either way, we'll have an up-to-date event count
1869     * after this.
1870     */
1871     - (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
1872     + (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
1873     + preempt_enable();
1874     ret = data.ret;
1875     } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1876     struct perf_event_context *ctx = event->ctx;
1877     diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
1878     index b6e4c16377c7..9c15a9124e83 100644
1879     --- a/kernel/stacktrace.c
1880     +++ b/kernel/stacktrace.c
1881     @@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
1882     if (WARN_ON(!trace->entries))
1883     return;
1884    
1885     - for (i = 0; i < trace->nr_entries; i++) {
1886     - printk("%*c", 1 + spaces, ' ');
1887     - print_ip_sym(trace->entries[i]);
1888     - }
1889     + for (i = 0; i < trace->nr_entries; i++)
1890     + printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
1891     }
1892     EXPORT_SYMBOL_GPL(print_stack_trace);
1893    
1894     @@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size,
1895     struct stack_trace *trace, int spaces)
1896     {
1897     int i;
1898     - unsigned long ip;
1899     int generated;
1900     int total = 0;
1901    
1902     @@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size,
1903     return 0;
1904    
1905     for (i = 0; i < trace->nr_entries; i++) {
1906     - ip = trace->entries[i];
1907     - generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
1908     - 1 + spaces, ' ', (void *) ip, (void *) ip);
1909     + generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
1910     + (void *)trace->entries[i]);
1911    
1912     total += generated;
1913    
1914     diff --git a/mm/slub.c b/mm/slub.c
1915     index 2b3e740609e9..7aa0e97af928 100644
1916     --- a/mm/slub.c
1917     +++ b/mm/slub.c
1918     @@ -1419,6 +1419,10 @@ static int init_cache_random_seq(struct kmem_cache *s)
1919     int err;
1920     unsigned long i, count = oo_objects(s->oo);
1921    
1922     + /* Bailout if already initialised */
1923     + if (s->random_seq)
1924     + return 0;
1925     +
1926     err = cache_random_seq_create(s, count, GFP_KERNEL);
1927     if (err) {
1928     pr_err("SLUB: Unable to initialize free list for %s\n",
1929     diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
1930     index 42120d965263..50e1b7f78bd4 100644
1931     --- a/net/mac80211/mesh.c
1932     +++ b/net/mac80211/mesh.c
1933     @@ -339,7 +339,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
1934     /* fast-forward to vendor IEs */
1935     offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
1936    
1937     - if (offset) {
1938     + if (offset < ifmsh->ie_len) {
1939     len = ifmsh->ie_len - offset;
1940     data = ifmsh->ie + offset;
1941     if (skb_tailroom(skb) < len)
1942     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
1943     index 1b3c18c2c1ec..cd7a419faa21 100644
1944     --- a/net/wireless/nl80211.c
1945     +++ b/net/wireless/nl80211.c
1946     @@ -5874,6 +5874,7 @@ do { \
1947     break;
1948     }
1949     cfg->ht_opmode = ht_opmode;
1950     + mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
1951     }
1952     FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
1953     1, 65535, mask,
1954     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
1955     index 09fd6108e421..c2da45ae5b2a 100644
1956     --- a/security/selinux/hooks.c
1957     +++ b/security/selinux/hooks.c
1958     @@ -5858,7 +5858,7 @@ static int selinux_setprocattr(struct task_struct *p,
1959     return error;
1960    
1961     /* Obtain a SID for the context, if one was specified. */
1962     - if (size && str[1] && str[1] != '\n') {
1963     + if (size && str[0] && str[0] != '\n') {
1964     if (str[size-1] == '\n') {
1965     str[size-1] = 0;
1966     size--;
1967     diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
1968     index c850345c43b5..dfa5156f3585 100644
1969     --- a/sound/core/seq/seq_memory.c
1970     +++ b/sound/core/seq/seq_memory.c
1971     @@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
1972     {
1973     unsigned long flags;
1974     struct snd_seq_event_cell *ptr;
1975     - int max_count = 5 * HZ;
1976    
1977     if (snd_BUG_ON(!pool))
1978     return -EINVAL;
1979     @@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
1980     if (waitqueue_active(&pool->output_sleep))
1981     wake_up(&pool->output_sleep);
1982    
1983     - while (atomic_read(&pool->counter) > 0) {
1984     - if (max_count == 0) {
1985     - pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
1986     - break;
1987     - }
1988     + while (atomic_read(&pool->counter) > 0)
1989     schedule_timeout_uninterruptible(1);
1990     - max_count--;
1991     - }
1992    
1993     /* release all resources */
1994     spin_lock_irqsave(&pool->lock, flags);
1995     diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
1996     index 0bec02e89d51..450c5187eecb 100644
1997     --- a/sound/core/seq/seq_queue.c
1998     +++ b/sound/core/seq/seq_queue.c
1999     @@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void)
2000     }
2001     }
2002    
2003     +static void queue_use(struct snd_seq_queue *queue, int client, int use);
2004     +
2005     /* allocate a new queue -
2006     * return queue index value or negative value for error
2007     */
2008     @@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
2009     if (q == NULL)
2010     return -ENOMEM;
2011     q->info_flags = info_flags;
2012     + queue_use(q, client, 1);
2013     if (queue_list_add(q) < 0) {
2014     queue_delete(q);
2015     return -ENOMEM;
2016     }
2017     - snd_seq_queue_use(q->queue, client, 1); /* use this queue */
2018     return q->queue;
2019     }
2020    
2021     @@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
2022     return result;
2023     }
2024    
2025     -
2026     -/* use or unuse this queue -
2027     - * if it is the first client, starts the timer.
2028     - * if it is not longer used by any clients, stop the timer.
2029     - */
2030     -int snd_seq_queue_use(int queueid, int client, int use)
2031     +/* use or unuse this queue */
2032     +static void queue_use(struct snd_seq_queue *queue, int client, int use)
2033     {
2034     - struct snd_seq_queue *queue;
2035     -
2036     - queue = queueptr(queueid);
2037     - if (queue == NULL)
2038     - return -EINVAL;
2039     - mutex_lock(&queue->timer_mutex);
2040     if (use) {
2041     if (!test_and_set_bit(client, queue->clients_bitmap))
2042     queue->clients++;
2043     @@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
2044     } else {
2045     snd_seq_timer_close(queue);
2046     }
2047     +}
2048     +
2049     +/* use or unuse this queue -
2050     + * if it is the first client, starts the timer.
2051     + * if it is not longer used by any clients, stop the timer.
2052     + */
2053     +int snd_seq_queue_use(int queueid, int client, int use)
2054     +{
2055     + struct snd_seq_queue *queue;
2056     +
2057     + queue = queueptr(queueid);
2058     + if (queue == NULL)
2059     + return -EINVAL;
2060     + mutex_lock(&queue->timer_mutex);
2061     + queue_use(queue, client, use);
2062     mutex_unlock(&queue->timer_mutex);
2063     queuefree(queue);
2064     return 0;
2065     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2066     index 56e5204ac9c1..4bf48336b0fc 100644
2067     --- a/sound/pci/hda/patch_hdmi.c
2068     +++ b/sound/pci/hda/patch_hdmi.c
2069     @@ -3638,6 +3638,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
2070     HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
2071     HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
2072     HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
2073     +HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
2074     HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
2075     HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
2076     HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
2077     diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
2078     index 90009c0b3a92..ab3c280a23d1 100644
2079     --- a/sound/usb/line6/driver.c
2080     +++ b/sound/usb/line6/driver.c
2081     @@ -754,8 +754,9 @@ int line6_probe(struct usb_interface *interface,
2082     goto error;
2083     }
2084    
2085     + line6_get_interval(line6);
2086     +
2087     if (properties->capabilities & LINE6_CAP_CONTROL) {
2088     - line6_get_interval(line6);
2089     ret = line6_init_cap_control(line6);
2090     if (ret < 0)
2091     goto error;
2092     diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
2093     index 9ff0db4e2d0c..933aeec46f4a 100644
2094     --- a/tools/perf/builtin-diff.c
2095     +++ b/tools/perf/builtin-diff.c
2096     @@ -1199,7 +1199,7 @@ static int ui_init(void)
2097     BUG_ON(1);
2098     }
2099    
2100     - perf_hpp__register_sort_field(fmt);
2101     + perf_hpp__prepend_sort_field(fmt);
2102     return 0;
2103     }
2104    
2105     diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
2106     index 37388397b5bc..18cfcdc90356 100644
2107     --- a/tools/perf/ui/hist.c
2108     +++ b/tools/perf/ui/hist.c
2109     @@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
2110     list_add_tail(&format->sort_list, &list->sorts);
2111     }
2112    
2113     +void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
2114     + struct perf_hpp_fmt *format)
2115     +{
2116     + list_add(&format->sort_list, &list->sorts);
2117     +}
2118     +
2119     void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
2120     {
2121     list_del(&format->list);
2122     @@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
2123     perf_hpp_list__for_each_sort_list(list, fmt) {
2124     struct perf_hpp_fmt *pos;
2125    
2126     + /* skip sort-only fields ("sort_compute" in perf diff) */
2127     + if (!fmt->entry && !fmt->color)
2128     + continue;
2129     +
2130     perf_hpp_list__for_each_format(list, pos) {
2131     if (fmt_equal(fmt, pos))
2132     goto next;
2133     diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
2134     index 9928fed8bc59..a440a04a29ff 100644
2135     --- a/tools/perf/util/hist.h
2136     +++ b/tools/perf/util/hist.h
2137     @@ -282,6 +282,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list,
2138     struct perf_hpp_fmt *format);
2139     void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
2140     struct perf_hpp_fmt *format);
2141     +void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
2142     + struct perf_hpp_fmt *format);
2143    
2144     static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
2145     {
2146     @@ -293,6 +295,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
2147     perf_hpp_list__register_sort_field(&perf_hpp_list, format);
2148     }
2149    
2150     +static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
2151     +{
2152     + perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
2153     +}
2154     +
2155     #define perf_hpp_list__for_each_format(_list, format) \
2156     list_for_each_entry(format, &(_list)->fields, list)
2157