Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.4/0120-4.4.21-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2830 - (hide annotations) (download)
Fri Sep 16 08:34:18 2016 UTC (7 years, 7 months ago) by niro
File size: 341564 byte(s)
-linux-4.4.21
1 niro 2830 diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
2     index 402ab99e409f..6716413c17ba 100644
3     --- a/Documentation/filesystems/proc.txt
4     +++ b/Documentation/filesystems/proc.txt
5     @@ -346,7 +346,7 @@ address perms offset dev inode pathname
6     a7cb1000-a7cb2000 ---p 00000000 00:00 0
7     a7cb2000-a7eb2000 rw-p 00000000 00:00 0
8     a7eb2000-a7eb3000 ---p 00000000 00:00 0
9     -a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack:1001]
10     +a7eb3000-a7ed5000 rw-p 00000000 00:00 0
11     a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
12     a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
13     a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
14     @@ -378,7 +378,6 @@ is not associated with a file:
15    
16     [heap] = the heap of the program
17     [stack] = the stack of the main process
18     - [stack:1001] = the stack of the thread with tid 1001
19     [vdso] = the "virtual dynamic shared object",
20     the kernel system call handler
21    
22     @@ -386,10 +385,8 @@ is not associated with a file:
23    
24     The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
25     of the individual tasks of a process. In this file you will see a mapping marked
26     -as [stack] if that task sees it as a stack. This is a key difference from the
27     -content of /proc/PID/maps, where you will see all mappings that are being used
28     -as stack by all of those tasks. Hence, for the example above, the task-level
29     -map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
30     +as [stack] if that task sees it as a stack. Hence, for the example above, the
31     +task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
32    
33     08048000-08049000 r-xp 00000000 03:00 8312 /opt/test
34     08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
35     diff --git a/Makefile b/Makefile
36     index b74d60081a16..d1cc9e0b7473 100644
37     --- a/Makefile
38     +++ b/Makefile
39     @@ -1,6 +1,6 @@
40     VERSION = 4
41     PATCHLEVEL = 4
42     -SUBLEVEL = 20
43     +SUBLEVEL = 21
44     EXTRAVERSION =
45     NAME = Blurry Fish Butt
46    
47     diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
48     index 871f21783866..14cdc6dea493 100644
49     --- a/arch/arm64/Kconfig
50     +++ b/arch/arm64/Kconfig
51     @@ -391,6 +391,15 @@ config CAVIUM_ERRATUM_22375
52    
53     If unsure, say Y.
54    
55     +config CAVIUM_ERRATUM_23144
56     + bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
57     + depends on NUMA
58     + default y
59     + help
60     + ITS SYNC command hang for cross node io and collections/cpu mapping.
61     +
62     + If unsure, say Y.
63     +
64     config CAVIUM_ERRATUM_23154
65     bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
66     default y
67     @@ -401,6 +410,17 @@ config CAVIUM_ERRATUM_23154
68    
69     If unsure, say Y.
70    
71     +config CAVIUM_ERRATUM_27456
72     + bool "Cavium erratum 27456: Broadcast TLBI instructions may cause icache corruption"
73     + default y
74     + help
75     + On ThunderX T88 pass 1.x through 2.1 parts, broadcast TLBI
76     + instructions may cause the icache to become corrupted if it
77     + contains data for a non-current ASID. The fix is to
78     + invalidate the icache when changing the mm context.
79     +
80     + If unsure, say Y.
81     +
82     endmenu
83    
84    
85     diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
86     index 2731d3b25ed2..8ec88e5b290f 100644
87     --- a/arch/arm64/include/asm/arch_gicv3.h
88     +++ b/arch/arm64/include/asm/arch_gicv3.h
89     @@ -103,6 +103,7 @@ static inline u64 gic_read_iar_common(void)
90     u64 irqstat;
91    
92     asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
93     + dsb(sy);
94     return irqstat;
95     }
96    
97     diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
98     index 8f271b83f910..8136afc9df0d 100644
99     --- a/arch/arm64/include/asm/cpufeature.h
100     +++ b/arch/arm64/include/asm/cpufeature.h
101     @@ -30,8 +30,9 @@
102     #define ARM64_HAS_LSE_ATOMICS 5
103     #define ARM64_WORKAROUND_CAVIUM_23154 6
104     #define ARM64_WORKAROUND_834220 7
105     +#define ARM64_WORKAROUND_CAVIUM_27456 8
106    
107     -#define ARM64_NCAPS 8
108     +#define ARM64_NCAPS 9
109    
110     #ifndef __ASSEMBLY__
111    
112     diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
113     index 5e6857b6bdc4..2d960f8588b0 100644
114     --- a/arch/arm64/include/asm/kvm_arm.h
115     +++ b/arch/arm64/include/asm/kvm_arm.h
116     @@ -107,8 +107,6 @@
117     #define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
118     TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
119    
120     -#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
121     -
122     /* VTCR_EL2 Registers bits */
123     #define VTCR_EL2_RES1 (1 << 31)
124     #define VTCR_EL2_PS_MASK (7 << 16)
125     diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
126     index feb6b4efa641..a3e846a28b05 100644
127     --- a/arch/arm64/kernel/cpu_errata.c
128     +++ b/arch/arm64/kernel/cpu_errata.c
129     @@ -100,6 +100,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
130     MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
131     },
132     #endif
133     +#ifdef CONFIG_CAVIUM_ERRATUM_27456
134     + {
135     + /* Cavium ThunderX, T88 pass 1.x - 2.1 */
136     + .desc = "Cavium erratum 27456",
137     + .capability = ARM64_WORKAROUND_CAVIUM_27456,
138     + MIDR_RANGE(MIDR_THUNDERX, 0x00,
139     + (1 << MIDR_VARIANT_SHIFT) | 1),
140     + },
141     +#endif
142     {
143     }
144     };
145     diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
146     index 178ba2248a98..84c338f017b2 100644
147     --- a/arch/arm64/kvm/hyp-init.S
148     +++ b/arch/arm64/kvm/hyp-init.S
149     @@ -64,7 +64,7 @@ __do_hyp_init:
150     mrs x4, tcr_el1
151     ldr x5, =TCR_EL2_MASK
152     and x4, x4, x5
153     - ldr x5, =TCR_EL2_FLAGS
154     + mov x5, #TCR_EL2_RES1
155     orr x4, x4, x5
156    
157     #ifndef CONFIG_ARM64_VA_BITS_48
158     @@ -85,15 +85,18 @@ __do_hyp_init:
159     ldr_l x5, idmap_t0sz
160     bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
161     #endif
162     - msr tcr_el2, x4
163     -
164     - ldr x4, =VTCR_EL2_FLAGS
165     /*
166     * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
167     - * VTCR_EL2.
168     + * TCR_EL2 and VTCR_EL2.
169     */
170     mrs x5, ID_AA64MMFR0_EL1
171     bfi x4, x5, #16, #3
172     +
173     + msr tcr_el2, x4
174     +
175     + ldr x4, =VTCR_EL2_FLAGS
176     + bfi x4, x5, #16, #3
177     +
178     msr vtcr_el2, x4
179    
180     mrs x4, mair_el1
181     diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
182     index 1f6bb29ca53b..18201e9e8cc7 100644
183     --- a/arch/arm64/mm/proc.S
184     +++ b/arch/arm64/mm/proc.S
185     @@ -25,6 +25,8 @@
186     #include <asm/hwcap.h>
187     #include <asm/pgtable-hwdef.h>
188     #include <asm/pgtable.h>
189     +#include <asm/cpufeature.h>
190     +#include <asm/alternative.h>
191    
192     #include "proc-macros.S"
193    
194     @@ -137,7 +139,17 @@ ENTRY(cpu_do_switch_mm)
195     bfi x0, x1, #48, #16 // set the ASID
196     msr ttbr0_el1, x0 // set TTBR0
197     isb
198     +alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
199     ret
200     + nop
201     + nop
202     + nop
203     +alternative_else
204     + ic iallu
205     + dsb nsh
206     + isb
207     + ret
208     +alternative_endif
209     ENDPROC(cpu_do_switch_mm)
210    
211     .section ".text.init", #alloc, #execinstr
212     diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
213     index a62581815624..88fa25fae8bd 100644
214     --- a/arch/metag/include/asm/atomic_lnkget.h
215     +++ b/arch/metag/include/asm/atomic_lnkget.h
216     @@ -61,7 +61,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
217     " CMPT %0, #HI(0x02000000)\n" \
218     " BNZ 1b\n" \
219     : "=&d" (temp), "=&da" (result) \
220     - : "da" (&v->counter), "bd" (i) \
221     + : "da" (&v->counter), "br" (i) \
222     : "cc"); \
223     \
224     smp_mb(); \
225     diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
226     index 9f8402b35115..27e588f6c72e 100644
227     --- a/arch/powerpc/include/asm/icswx.h
228     +++ b/arch/powerpc/include/asm/icswx.h
229     @@ -164,6 +164,7 @@ struct coprocessor_request_block {
230     #define ICSWX_INITIATED (0x8)
231     #define ICSWX_BUSY (0x4)
232     #define ICSWX_REJECTED (0x2)
233     +#define ICSWX_XERS0 (0x1) /* undefined or set from XERSO. */
234    
235     static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
236     {
237     diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
238     index bf8f34a58670..b7019b559ddb 100644
239     --- a/arch/powerpc/kernel/tm.S
240     +++ b/arch/powerpc/kernel/tm.S
241     @@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
242     std r3, STK_PARAM(R3)(r1)
243     SAVE_NVGPRS(r1)
244    
245     - /* We need to setup MSR for VSX register save instructions. Here we
246     - * also clear the MSR RI since when we do the treclaim, we won't have a
247     - * valid kernel pointer for a while. We clear RI here as it avoids
248     - * adding another mtmsr closer to the treclaim. This makes the region
249     - * maked as non-recoverable wider than it needs to be but it saves on
250     - * inserting another mtmsrd later.
251     - */
252     + /* We need to setup MSR for VSX register save instructions. */
253     mfmsr r14
254     mr r15, r14
255     ori r15, r15, MSR_FP
256     - li r16, MSR_RI
257     + li r16, 0
258     ori r16, r16, MSR_EE /* IRQs hard off */
259     andc r15, r15, r16
260     oris r15, r15, MSR_VEC@h
261     @@ -176,7 +170,17 @@ dont_backup_fp:
262     1: tdeqi r6, 0
263     EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
264    
265     - /* The moment we treclaim, ALL of our GPRs will switch
266     + /* Clear MSR RI since we are about to change r1, EE is already off. */
267     + li r4, 0
268     + mtmsrd r4, 1
269     +
270     + /*
271     + * BE CAREFUL HERE:
272     + * At this point we can't take an SLB miss since we have MSR_RI
273     + * off. Load only to/from the stack/paca which are in SLB bolted regions
274     + * until we turn MSR RI back on.
275     + *
276     + * The moment we treclaim, ALL of our GPRs will switch
277     * to user register state. (FPRs, CCR etc. also!)
278     * Use an sprg and a tm_scratch in the PACA to shuffle.
279     */
280     @@ -197,6 +201,11 @@ dont_backup_fp:
281    
282     /* Store the PPR in r11 and reset to decent value */
283     std r11, GPR11(r1) /* Temporary stash */
284     +
285     + /* Reset MSR RI so we can take SLB faults again */
286     + li r11, MSR_RI
287     + mtmsrd r11, 1
288     +
289     mfspr r11, SPRN_PPR
290     HMT_MEDIUM
291    
292     @@ -397,11 +406,6 @@ restore_gprs:
293     ld r5, THREAD_TM_DSCR(r3)
294     ld r6, THREAD_TM_PPR(r3)
295    
296     - /* Clear the MSR RI since we are about to change R1. EE is already off
297     - */
298     - li r4, 0
299     - mtmsrd r4, 1
300     -
301     REST_GPR(0, r7) /* GPR0 */
302     REST_2GPRS(2, r7) /* GPR2-3 */
303     REST_GPR(4, r7) /* GPR4 */
304     @@ -439,10 +443,33 @@ restore_gprs:
305     ld r6, _CCR(r7)
306     mtcr r6
307    
308     - REST_GPR(1, r7) /* GPR1 */
309     - REST_GPR(5, r7) /* GPR5-7 */
310     REST_GPR(6, r7)
311     - ld r7, GPR7(r7)
312     +
313     + /*
314     + * Store r1 and r5 on the stack so that we can access them
315     + * after we clear MSR RI.
316     + */
317     +
318     + REST_GPR(5, r7)
319     + std r5, -8(r1)
320     + ld r5, GPR1(r7)
321     + std r5, -16(r1)
322     +
323     + REST_GPR(7, r7)
324     +
325     + /* Clear MSR RI since we are about to change r1. EE is already off */
326     + li r5, 0
327     + mtmsrd r5, 1
328     +
329     + /*
330     + * BE CAREFUL HERE:
331     + * At this point we can't take an SLB miss since we have MSR_RI
332     + * off. Load only to/from the stack/paca which are in SLB bolted regions
333     + * until we turn MSR RI back on.
334     + */
335     +
336     + ld r5, -8(r1)
337     + ld r1, -16(r1)
338    
339     /* Commit register state as checkpointed state: */
340     TRECHKPT
341     diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
342     index b8045b97f4fb..d750cc0dfe30 100644
343     --- a/arch/s390/crypto/prng.c
344     +++ b/arch/s390/crypto/prng.c
345     @@ -669,11 +669,13 @@ static const struct file_operations prng_tdes_fops = {
346     static struct miscdevice prng_sha512_dev = {
347     .name = "prandom",
348     .minor = MISC_DYNAMIC_MINOR,
349     + .mode = 0644,
350     .fops = &prng_sha512_fops,
351     };
352     static struct miscdevice prng_tdes_dev = {
353     .name = "prandom",
354     .minor = MISC_DYNAMIC_MINOR,
355     + .mode = 0644,
356     .fops = &prng_tdes_fops,
357     };
358    
359     diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
360     index 1aac41e83ea1..92df3eb8d14e 100644
361     --- a/arch/s390/include/asm/pci_dma.h
362     +++ b/arch/s390/include/asm/pci_dma.h
363     @@ -23,6 +23,8 @@ enum zpci_ioat_dtype {
364     #define ZPCI_IOTA_FS_2G 2
365     #define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
366    
367     +#define ZPCI_TABLE_SIZE_RT (1UL << 42)
368     +
369     #define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
370     #define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
371     #define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
372     diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
373     index 19442395f413..f2f6720a3331 100644
374     --- a/arch/s390/pci/pci.c
375     +++ b/arch/s390/pci/pci.c
376     @@ -701,8 +701,7 @@ static int zpci_restore(struct device *dev)
377     goto out;
378    
379     zpci_map_resources(pdev);
380     - zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
381     - zdev->start_dma + zdev->iommu_size - 1,
382     + zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
383     (u64) zdev->dma_table);
384    
385     out:
386     diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
387     index d348f2c09a1e..3a40f718baef 100644
388     --- a/arch/s390/pci/pci_dma.c
389     +++ b/arch/s390/pci/pci_dma.c
390     @@ -458,7 +458,19 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
391     goto out_clean;
392     }
393    
394     - zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
395     + /*
396     + * Restrict the iommu bitmap size to the minimum of the following:
397     + * - main memory size
398     + * - 3-level pagetable address limit minus start_dma offset
399     + * - DMA address range allowed by the hardware (clp query pci fn)
400     + *
401     + * Also set zdev->end_dma to the actual end address of the usable
402     + * range, instead of the theoretical maximum as reported by hardware.
403     + */
404     + zdev->iommu_size = min3((u64) high_memory,
405     + ZPCI_TABLE_SIZE_RT - zdev->start_dma,
406     + zdev->end_dma - zdev->start_dma + 1);
407     + zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
408     zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
409     zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
410     if (!zdev->iommu_bitmap) {
411     @@ -466,10 +478,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
412     goto out_reg;
413     }
414    
415     - rc = zpci_register_ioat(zdev,
416     - 0,
417     - zdev->start_dma + PAGE_OFFSET,
418     - zdev->start_dma + zdev->iommu_size - 1,
419     + rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
420     (u64) zdev->dma_table);
421     if (rc)
422     goto out_reg;
423     diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
424     index 2f69e3b184f6..a3e1f8497f8c 100644
425     --- a/arch/x86/kernel/apic/apic.c
426     +++ b/arch/x86/kernel/apic/apic.c
427     @@ -1587,6 +1587,9 @@ void __init enable_IR_x2apic(void)
428     unsigned long flags;
429     int ret, ir_stat;
430    
431     + if (skip_ioapic_setup)
432     + return;
433     +
434     ir_stat = irq_remapping_prepare();
435     if (ir_stat < 0 && !x2apic_supported())
436     return;
437     diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
438     index 20e242ea1bc4..cfc4a966e2b9 100644
439     --- a/arch/x86/kernel/cpu/mshyperv.c
440     +++ b/arch/x86/kernel/cpu/mshyperv.c
441     @@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
442     .flags = CLOCK_SOURCE_IS_CONTINUOUS,
443     };
444    
445     +static unsigned char hv_get_nmi_reason(void)
446     +{
447     + return 0;
448     +}
449     +
450     static void __init ms_hyperv_init_platform(void)
451     {
452     /*
453     @@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
454     machine_ops.crash_shutdown = hv_machine_crash_shutdown;
455     #endif
456     mark_tsc_unstable("running on Hyper-V");
457     +
458     + /*
459     + * Generation 2 instances don't support reading the NMI status from
460     + * 0x61 port.
461     + */
462     + if (efi_enabled(EFI_BOOT))
463     + x86_platform.get_nmi_reason = hv_get_nmi_reason;
464     }
465    
466     const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
467     diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
468     index a316ca96f1b6..fc704ed587e8 100644
469     --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
470     +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
471     @@ -211,6 +211,20 @@ static void __put_rmid(u32 rmid)
472     list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
473     }
474    
475     +static void cqm_cleanup(void)
476     +{
477     + int i;
478     +
479     + if (!cqm_rmid_ptrs)
480     + return;
481     +
482     + for (i = 0; i < cqm_max_rmid; i++)
483     + kfree(cqm_rmid_ptrs[i]);
484     +
485     + kfree(cqm_rmid_ptrs);
486     + cqm_rmid_ptrs = NULL;
487     +}
488     +
489     static int intel_cqm_setup_rmid_cache(void)
490     {
491     struct cqm_rmid_entry *entry;
492     @@ -218,7 +232,7 @@ static int intel_cqm_setup_rmid_cache(void)
493     int r = 0;
494    
495     nr_rmids = cqm_max_rmid + 1;
496     - cqm_rmid_ptrs = kmalloc(sizeof(struct cqm_rmid_entry *) *
497     + cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) *
498     nr_rmids, GFP_KERNEL);
499     if (!cqm_rmid_ptrs)
500     return -ENOMEM;
501     @@ -249,11 +263,9 @@ static int intel_cqm_setup_rmid_cache(void)
502     mutex_unlock(&cache_mutex);
503    
504     return 0;
505     -fail:
506     - while (r--)
507     - kfree(cqm_rmid_ptrs[r]);
508    
509     - kfree(cqm_rmid_ptrs);
510     +fail:
511     + cqm_cleanup();
512     return -ENOMEM;
513     }
514    
515     @@ -281,9 +293,13 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
516    
517     /*
518     * Events that target same task are placed into the same cache group.
519     + * Mark it as a multi event group, so that we update ->count
520     + * for every event rather than just the group leader later.
521     */
522     - if (a->hw.target == b->hw.target)
523     + if (a->hw.target == b->hw.target) {
524     + b->hw.is_group_event = true;
525     return true;
526     + }
527    
528     /*
529     * Are we an inherited event?
530     @@ -849,6 +865,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
531     bool conflict = false;
532     u32 rmid;
533    
534     + event->hw.is_group_event = false;
535     list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
536     rmid = iter->hw.cqm_rmid;
537    
538     @@ -940,7 +957,9 @@ static u64 intel_cqm_event_count(struct perf_event *event)
539     return __perf_event_count(event);
540    
541     /*
542     - * Only the group leader gets to report values. This stops us
543     + * Only the group leader gets to report values except in case of
544     + * multiple events in the same group, we still need to read the
545     + * other events.This stops us
546     * reporting duplicate values to userspace, and gives us a clear
547     * rule for which task gets to report the values.
548     *
549     @@ -948,7 +967,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
550     * specific packages - we forfeit that ability when we create
551     * task events.
552     */
553     - if (!cqm_group_leader(event))
554     + if (!cqm_group_leader(event) && !event->hw.is_group_event)
555     return 0;
556    
557     /*
558     @@ -1315,7 +1334,7 @@ static const struct x86_cpu_id intel_cqm_match[] = {
559    
560     static int __init intel_cqm_init(void)
561     {
562     - char *str, scale[20];
563     + char *str = NULL, scale[20];
564     int i, cpu, ret;
565    
566     if (!x86_match_cpu(intel_cqm_match))
567     @@ -1375,16 +1394,25 @@ static int __init intel_cqm_init(void)
568     cqm_pick_event_reader(i);
569     }
570    
571     - __perf_cpu_notifier(intel_cqm_cpu_notifier);
572     -
573     ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
574     - if (ret)
575     + if (ret) {
576     pr_err("Intel CQM perf registration failed: %d\n", ret);
577     - else
578     - pr_info("Intel CQM monitoring enabled\n");
579     + goto out;
580     + }
581     +
582     + pr_info("Intel CQM monitoring enabled\n");
583    
584     + /*
585     + * Register the hot cpu notifier once we are sure cqm
586     + * is enabled to avoid notifier leak.
587     + */
588     + __perf_cpu_notifier(intel_cqm_cpu_notifier);
589     out:
590     cpu_notifier_register_done();
591     + if (ret) {
592     + kfree(str);
593     + cqm_cleanup();
594     + }
595    
596     return ret;
597     }
598     diff --git a/block/blk-core.c b/block/blk-core.c
599     index f8e64cac981a..4fab5d610805 100644
600     --- a/block/blk-core.c
601     +++ b/block/blk-core.c
602     @@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
603    
604     void blk_set_queue_dying(struct request_queue *q)
605     {
606     - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
607     + spin_lock_irq(q->queue_lock);
608     + queue_flag_set(QUEUE_FLAG_DYING, q);
609     + spin_unlock_irq(q->queue_lock);
610    
611     if (q->mq_ops)
612     blk_mq_wake_waiters(q);
613     diff --git a/block/blk-merge.c b/block/blk-merge.c
614     index b966db8f3556..7225511cf0b4 100644
615     --- a/block/blk-merge.c
616     +++ b/block/blk-merge.c
617     @@ -92,9 +92,31 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
618     bool do_split = true;
619     struct bio *new = NULL;
620     const unsigned max_sectors = get_max_io_size(q, bio);
621     + unsigned bvecs = 0;
622    
623     bio_for_each_segment(bv, bio, iter) {
624     /*
625     + * With arbitrary bio size, the incoming bio may be very
626     + * big. We have to split the bio into small bios so that
627     + * each holds at most BIO_MAX_PAGES bvecs because
628     + * bio_clone() can fail to allocate big bvecs.
629     + *
630     + * It should have been better to apply the limit per
631     + * request queue in which bio_clone() is involved,
632     + * instead of globally. The biggest blocker is the
633     + * bio_clone() in bio bounce.
634     + *
635     + * If bio is splitted by this reason, we should have
636     + * allowed to continue bios merging, but don't do
637     + * that now for making the change simple.
638     + *
639     + * TODO: deal with bio bounce's bio_clone() gracefully
640     + * and convert the global limit into per-queue limit.
641     + */
642     + if (bvecs++ >= BIO_MAX_PAGES)
643     + goto split;
644     +
645     + /*
646     * If the queue doesn't support SG gaps and adding this
647     * offset would create a gap, disallow it.
648     */
649     diff --git a/block/blk-mq.c b/block/blk-mq.c
650     index 6d6f8feb48c0..839b1e17481b 100644
651     --- a/block/blk-mq.c
652     +++ b/block/blk-mq.c
653     @@ -601,8 +601,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
654     * If a request wasn't started before the queue was
655     * marked dying, kill it here or it'll go unnoticed.
656     */
657     - if (unlikely(blk_queue_dying(rq->q)))
658     - blk_mq_complete_request(rq, -EIO);
659     + if (unlikely(blk_queue_dying(rq->q))) {
660     + rq->errors = -EIO;
661     + blk_mq_end_request(rq, rq->errors);
662     + }
663     return;
664     }
665     if (rq->cmd_flags & REQ_NO_TIMEOUT)
666     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
667     index 79107597a594..c306b483de60 100644
668     --- a/drivers/bluetooth/btusb.c
669     +++ b/drivers/bluetooth/btusb.c
670     @@ -2056,12 +2056,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
671     return -EINVAL;
672     }
673    
674     - /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
675     - * supported by this firmware loading method. This check has been
676     - * put in place to ensure correct forward compatibility options
677     - * when newer hardware variants come along.
678     + /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
679     + * and 0x0c (WsP) are supported by this firmware loading method.
680     + *
681     + * This check has been put in place to ensure correct forward
682     + * compatibility options when newer hardware variants come along.
683     */
684     - if (ver->hw_variant != 0x0b) {
685     + if (ver->hw_variant != 0x0b && ver->hw_variant != 0x0c) {
686     BT_ERR("%s: Unsupported Intel hardware variant (%u)",
687     hdev->name, ver->hw_variant);
688     kfree_skb(skb);
689     diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
690     index aa30af5f0f2b..7845a38b6604 100644
691     --- a/drivers/char/hw_random/exynos-rng.c
692     +++ b/drivers/char/hw_random/exynos-rng.c
693     @@ -118,6 +118,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
694     {
695     struct exynos_rng *exynos_rng;
696     struct resource *res;
697     + int ret;
698    
699     exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
700     GFP_KERNEL);
701     @@ -145,7 +146,13 @@ static int exynos_rng_probe(struct platform_device *pdev)
702     pm_runtime_use_autosuspend(&pdev->dev);
703     pm_runtime_enable(&pdev->dev);
704    
705     - return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
706     + ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
707     + if (ret) {
708     + pm_runtime_dont_use_autosuspend(&pdev->dev);
709     + pm_runtime_disable(&pdev->dev);
710     + }
711     +
712     + return ret;
713     }
714    
715     #ifdef CONFIG_PM
716     diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
717     index 27c0da29eca3..10224b01b97c 100644
718     --- a/drivers/clk/clk-xgene.c
719     +++ b/drivers/clk/clk-xgene.c
720     @@ -351,7 +351,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
721     /* Set new divider */
722     data = xgene_clk_read(pclk->param.divider_reg +
723     pclk->param.reg_divider_offset);
724     - data &= ~((1 << pclk->param.reg_divider_width) - 1);
725     + data &= ~((1 << pclk->param.reg_divider_width) - 1)
726     + << pclk->param.reg_divider_shift;
727     data |= divider;
728     xgene_clk_write(data, pclk->param.divider_reg +
729     pclk->param.reg_divider_offset);
730     diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
731     index 4dbf1db16aca..9cc8abd3d116 100644
732     --- a/drivers/cpufreq/cpufreq_userspace.c
733     +++ b/drivers/cpufreq/cpufreq_userspace.c
734     @@ -17,6 +17,7 @@
735     #include <linux/init.h>
736     #include <linux/module.h>
737     #include <linux/mutex.h>
738     +#include <linux/slab.h>
739    
740     static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
741     static DEFINE_MUTEX(userspace_mutex);
742     @@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
743     static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
744     {
745     int ret = -EINVAL;
746     + unsigned int *setspeed = policy->governor_data;
747    
748     pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
749    
750     @@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
751     if (!per_cpu(cpu_is_managed, policy->cpu))
752     goto err;
753    
754     + *setspeed = freq;
755     +
756     ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
757     err:
758     mutex_unlock(&userspace_mutex);
759     @@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
760     return sprintf(buf, "%u\n", policy->cur);
761     }
762    
763     +static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
764     +{
765     + unsigned int *setspeed;
766     +
767     + setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
768     + if (!setspeed)
769     + return -ENOMEM;
770     +
771     + policy->governor_data = setspeed;
772     + return 0;
773     +}
774     +
775     static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
776     unsigned int event)
777     {
778     + unsigned int *setspeed = policy->governor_data;
779     unsigned int cpu = policy->cpu;
780     int rc = 0;
781    
782     + if (event == CPUFREQ_GOV_POLICY_INIT)
783     + return cpufreq_userspace_policy_init(policy);
784     +
785     + if (!setspeed)
786     + return -EINVAL;
787     +
788     switch (event) {
789     + case CPUFREQ_GOV_POLICY_EXIT:
790     + mutex_lock(&userspace_mutex);
791     + policy->governor_data = NULL;
792     + kfree(setspeed);
793     + mutex_unlock(&userspace_mutex);
794     + break;
795     case CPUFREQ_GOV_START:
796     BUG_ON(!policy->cur);
797     pr_debug("started managing cpu %u\n", cpu);
798    
799     mutex_lock(&userspace_mutex);
800     per_cpu(cpu_is_managed, cpu) = 1;
801     + *setspeed = policy->cur;
802     mutex_unlock(&userspace_mutex);
803     break;
804     case CPUFREQ_GOV_STOP:
805     @@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
806    
807     mutex_lock(&userspace_mutex);
808     per_cpu(cpu_is_managed, cpu) = 0;
809     + *setspeed = 0;
810     mutex_unlock(&userspace_mutex);
811     break;
812     case CPUFREQ_GOV_LIMITS:
813     mutex_lock(&userspace_mutex);
814     - pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
815     - cpu, policy->min, policy->max,
816     - policy->cur);
817     + pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
818     + cpu, policy->min, policy->max, policy->cur, *setspeed);
819    
820     - if (policy->max < policy->cur)
821     + if (policy->max < *setspeed)
822     __cpufreq_driver_target(policy, policy->max,
823     CPUFREQ_RELATION_H);
824     - else if (policy->min > policy->cur)
825     + else if (policy->min > *setspeed)
826     __cpufreq_driver_target(policy, policy->min,
827     CPUFREQ_RELATION_L);
828     + else
829     + __cpufreq_driver_target(policy, *setspeed,
830     + CPUFREQ_RELATION_L);
831     mutex_unlock(&userspace_mutex);
832     break;
833     }
834     diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
835     index 6dc597126b79..b3044219772c 100644
836     --- a/drivers/crypto/caam/caamalg.c
837     +++ b/drivers/crypto/caam/caamalg.c
838     @@ -556,7 +556,10 @@ skip_enc:
839    
840     /* Read and write assoclen bytes */
841     append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
842     - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
843     + if (alg->caam.geniv)
844     + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
845     + else
846     + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
847    
848     /* Skip assoc data */
849     append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
850     @@ -565,6 +568,14 @@ skip_enc:
851     append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
852     KEY_VLF);
853    
854     + if (alg->caam.geniv) {
855     + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
856     + LDST_SRCDST_BYTE_CONTEXT |
857     + (ctx1_iv_off << LDST_OFFSET_SHIFT));
858     + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
859     + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
860     + }
861     +
862     /* Load Counter into CONTEXT1 reg */
863     if (is_rfc3686)
864     append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
865     @@ -2150,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
866    
867     init_aead_job(req, edesc, all_contig, encrypt);
868    
869     - if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
870     + if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
871     append_load_as_imm(desc, req->iv, ivsize,
872     LDST_CLASS_1_CCB |
873     LDST_SRCDST_BYTE_CONTEXT |
874     @@ -2537,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
875     return ret;
876     }
877    
878     -static int aead_givdecrypt(struct aead_request *req)
879     -{
880     - struct crypto_aead *aead = crypto_aead_reqtfm(req);
881     - unsigned int ivsize = crypto_aead_ivsize(aead);
882     -
883     - if (req->cryptlen < ivsize)
884     - return -EINVAL;
885     -
886     - req->cryptlen -= ivsize;
887     - req->assoclen += ivsize;
888     -
889     - return aead_decrypt(req);
890     -}
891     -
892     /*
893     * allocate and map the ablkcipher extended descriptor for ablkcipher
894     */
895     @@ -3210,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
896     .setkey = aead_setkey,
897     .setauthsize = aead_setauthsize,
898     .encrypt = aead_encrypt,
899     - .decrypt = aead_givdecrypt,
900     + .decrypt = aead_decrypt,
901     .ivsize = AES_BLOCK_SIZE,
902     .maxauthsize = MD5_DIGEST_SIZE,
903     },
904     @@ -3256,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
905     .setkey = aead_setkey,
906     .setauthsize = aead_setauthsize,
907     .encrypt = aead_encrypt,
908     - .decrypt = aead_givdecrypt,
909     + .decrypt = aead_decrypt,
910     .ivsize = AES_BLOCK_SIZE,
911     .maxauthsize = SHA1_DIGEST_SIZE,
912     },
913     @@ -3302,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
914     .setkey = aead_setkey,
915     .setauthsize = aead_setauthsize,
916     .encrypt = aead_encrypt,
917     - .decrypt = aead_givdecrypt,
918     + .decrypt = aead_decrypt,
919     .ivsize = AES_BLOCK_SIZE,
920     .maxauthsize = SHA224_DIGEST_SIZE,
921     },
922     @@ -3348,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
923     .setkey = aead_setkey,
924     .setauthsize = aead_setauthsize,
925     .encrypt = aead_encrypt,
926     - .decrypt = aead_givdecrypt,
927     + .decrypt = aead_decrypt,
928     .ivsize = AES_BLOCK_SIZE,
929     .maxauthsize = SHA256_DIGEST_SIZE,
930     },
931     @@ -3394,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
932     .setkey = aead_setkey,
933     .setauthsize = aead_setauthsize,
934     .encrypt = aead_encrypt,
935     - .decrypt = aead_givdecrypt,
936     + .decrypt = aead_decrypt,
937     .ivsize = AES_BLOCK_SIZE,
938     .maxauthsize = SHA384_DIGEST_SIZE,
939     },
940     @@ -3440,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
941     .setkey = aead_setkey,
942     .setauthsize = aead_setauthsize,
943     .encrypt = aead_encrypt,
944     - .decrypt = aead_givdecrypt,
945     + .decrypt = aead_decrypt,
946     .ivsize = AES_BLOCK_SIZE,
947     .maxauthsize = SHA512_DIGEST_SIZE,
948     },
949     @@ -3486,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
950     .setkey = aead_setkey,
951     .setauthsize = aead_setauthsize,
952     .encrypt = aead_encrypt,
953     - .decrypt = aead_givdecrypt,
954     + .decrypt = aead_decrypt,
955     .ivsize = DES3_EDE_BLOCK_SIZE,
956     .maxauthsize = MD5_DIGEST_SIZE,
957     },
958     @@ -3534,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
959     .setkey = aead_setkey,
960     .setauthsize = aead_setauthsize,
961     .encrypt = aead_encrypt,
962     - .decrypt = aead_givdecrypt,
963     + .decrypt = aead_decrypt,
964     .ivsize = DES3_EDE_BLOCK_SIZE,
965     .maxauthsize = SHA1_DIGEST_SIZE,
966     },
967     @@ -3582,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
968     .setkey = aead_setkey,
969     .setauthsize = aead_setauthsize,
970     .encrypt = aead_encrypt,
971     - .decrypt = aead_givdecrypt,
972     + .decrypt = aead_decrypt,
973     .ivsize = DES3_EDE_BLOCK_SIZE,
974     .maxauthsize = SHA224_DIGEST_SIZE,
975     },
976     @@ -3630,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
977     .setkey = aead_setkey,
978     .setauthsize = aead_setauthsize,
979     .encrypt = aead_encrypt,
980     - .decrypt = aead_givdecrypt,
981     + .decrypt = aead_decrypt,
982     .ivsize = DES3_EDE_BLOCK_SIZE,
983     .maxauthsize = SHA256_DIGEST_SIZE,
984     },
985     @@ -3678,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
986     .setkey = aead_setkey,
987     .setauthsize = aead_setauthsize,
988     .encrypt = aead_encrypt,
989     - .decrypt = aead_givdecrypt,
990     + .decrypt = aead_decrypt,
991     .ivsize = DES3_EDE_BLOCK_SIZE,
992     .maxauthsize = SHA384_DIGEST_SIZE,
993     },
994     @@ -3726,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
995     .setkey = aead_setkey,
996     .setauthsize = aead_setauthsize,
997     .encrypt = aead_encrypt,
998     - .decrypt = aead_givdecrypt,
999     + .decrypt = aead_decrypt,
1000     .ivsize = DES3_EDE_BLOCK_SIZE,
1001     .maxauthsize = SHA512_DIGEST_SIZE,
1002     },
1003     @@ -3772,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
1004     .setkey = aead_setkey,
1005     .setauthsize = aead_setauthsize,
1006     .encrypt = aead_encrypt,
1007     - .decrypt = aead_givdecrypt,
1008     + .decrypt = aead_decrypt,
1009     .ivsize = DES_BLOCK_SIZE,
1010     .maxauthsize = MD5_DIGEST_SIZE,
1011     },
1012     @@ -3818,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
1013     .setkey = aead_setkey,
1014     .setauthsize = aead_setauthsize,
1015     .encrypt = aead_encrypt,
1016     - .decrypt = aead_givdecrypt,
1017     + .decrypt = aead_decrypt,
1018     .ivsize = DES_BLOCK_SIZE,
1019     .maxauthsize = SHA1_DIGEST_SIZE,
1020     },
1021     @@ -3864,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
1022     .setkey = aead_setkey,
1023     .setauthsize = aead_setauthsize,
1024     .encrypt = aead_encrypt,
1025     - .decrypt = aead_givdecrypt,
1026     + .decrypt = aead_decrypt,
1027     .ivsize = DES_BLOCK_SIZE,
1028     .maxauthsize = SHA224_DIGEST_SIZE,
1029     },
1030     @@ -3910,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
1031     .setkey = aead_setkey,
1032     .setauthsize = aead_setauthsize,
1033     .encrypt = aead_encrypt,
1034     - .decrypt = aead_givdecrypt,
1035     + .decrypt = aead_decrypt,
1036     .ivsize = DES_BLOCK_SIZE,
1037     .maxauthsize = SHA256_DIGEST_SIZE,
1038     },
1039     @@ -3956,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
1040     .setkey = aead_setkey,
1041     .setauthsize = aead_setauthsize,
1042     .encrypt = aead_encrypt,
1043     - .decrypt = aead_givdecrypt,
1044     + .decrypt = aead_decrypt,
1045     .ivsize = DES_BLOCK_SIZE,
1046     .maxauthsize = SHA384_DIGEST_SIZE,
1047     },
1048     @@ -4002,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
1049     .setkey = aead_setkey,
1050     .setauthsize = aead_setauthsize,
1051     .encrypt = aead_encrypt,
1052     - .decrypt = aead_givdecrypt,
1053     + .decrypt = aead_decrypt,
1054     .ivsize = DES_BLOCK_SIZE,
1055     .maxauthsize = SHA512_DIGEST_SIZE,
1056     },
1057     @@ -4051,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
1058     .setkey = aead_setkey,
1059     .setauthsize = aead_setauthsize,
1060     .encrypt = aead_encrypt,
1061     - .decrypt = aead_givdecrypt,
1062     + .decrypt = aead_decrypt,
1063     .ivsize = CTR_RFC3686_IV_SIZE,
1064     .maxauthsize = MD5_DIGEST_SIZE,
1065     },
1066     @@ -4102,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
1067     .setkey = aead_setkey,
1068     .setauthsize = aead_setauthsize,
1069     .encrypt = aead_encrypt,
1070     - .decrypt = aead_givdecrypt,
1071     + .decrypt = aead_decrypt,
1072     .ivsize = CTR_RFC3686_IV_SIZE,
1073     .maxauthsize = SHA1_DIGEST_SIZE,
1074     },
1075     @@ -4153,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
1076     .setkey = aead_setkey,
1077     .setauthsize = aead_setauthsize,
1078     .encrypt = aead_encrypt,
1079     - .decrypt = aead_givdecrypt,
1080     + .decrypt = aead_decrypt,
1081     .ivsize = CTR_RFC3686_IV_SIZE,
1082     .maxauthsize = SHA224_DIGEST_SIZE,
1083     },
1084     @@ -4204,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
1085     .setkey = aead_setkey,
1086     .setauthsize = aead_setauthsize,
1087     .encrypt = aead_encrypt,
1088     - .decrypt = aead_givdecrypt,
1089     + .decrypt = aead_decrypt,
1090     .ivsize = CTR_RFC3686_IV_SIZE,
1091     .maxauthsize = SHA256_DIGEST_SIZE,
1092     },
1093     @@ -4255,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
1094     .setkey = aead_setkey,
1095     .setauthsize = aead_setauthsize,
1096     .encrypt = aead_encrypt,
1097     - .decrypt = aead_givdecrypt,
1098     + .decrypt = aead_decrypt,
1099     .ivsize = CTR_RFC3686_IV_SIZE,
1100     .maxauthsize = SHA384_DIGEST_SIZE,
1101     },
1102     @@ -4306,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
1103     .setkey = aead_setkey,
1104     .setauthsize = aead_setauthsize,
1105     .encrypt = aead_encrypt,
1106     - .decrypt = aead_givdecrypt,
1107     + .decrypt = aead_decrypt,
1108     .ivsize = CTR_RFC3686_IV_SIZE,
1109     .maxauthsize = SHA512_DIGEST_SIZE,
1110     },
1111     diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
1112     index 9ef51fafdbff..6e105e87b8ff 100644
1113     --- a/drivers/crypto/nx/nx-842-powernv.c
1114     +++ b/drivers/crypto/nx/nx-842-powernv.c
1115     @@ -442,6 +442,14 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
1116     (unsigned int)ccw,
1117     (unsigned int)be32_to_cpu(crb->ccw));
1118    
1119     + /*
1120     + * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
1121     + * XER[S0] is the integer summary overflow bit which is nothing
1122     + * to do NX. Since this bit can be set with other return values,
1123     + * mask this bit.
1124     + */
1125     + ret &= ~ICSWX_XERS0;
1126     +
1127     switch (ret) {
1128     case ICSWX_INITIATED:
1129     ret = wait_for_csb(wmem, csb);
1130     @@ -454,10 +462,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
1131     pr_err_ratelimited("ICSWX rejected\n");
1132     ret = -EPROTO;
1133     break;
1134     - default:
1135     - pr_err_ratelimited("Invalid ICSWX return code %x\n", ret);
1136     - ret = -EPROTO;
1137     - break;
1138     }
1139    
1140     if (!ret)
1141     diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
1142     index f3801b983f42..3f8bb9a40df1 100644
1143     --- a/drivers/crypto/vmx/aes_cbc.c
1144     +++ b/drivers/crypto/vmx/aes_cbc.c
1145     @@ -191,7 +191,7 @@ struct crypto_alg p8_aes_cbc_alg = {
1146     .cra_init = p8_aes_cbc_init,
1147     .cra_exit = p8_aes_cbc_exit,
1148     .cra_blkcipher = {
1149     - .ivsize = 0,
1150     + .ivsize = AES_BLOCK_SIZE,
1151     .min_keysize = AES_MIN_KEY_SIZE,
1152     .max_keysize = AES_MAX_KEY_SIZE,
1153     .setkey = p8_aes_cbc_setkey,
1154     diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
1155     index 404a1b69a3ab..72f138985e18 100644
1156     --- a/drivers/crypto/vmx/aes_ctr.c
1157     +++ b/drivers/crypto/vmx/aes_ctr.c
1158     @@ -175,7 +175,7 @@ struct crypto_alg p8_aes_ctr_alg = {
1159     .cra_init = p8_aes_ctr_init,
1160     .cra_exit = p8_aes_ctr_exit,
1161     .cra_blkcipher = {
1162     - .ivsize = 0,
1163     + .ivsize = AES_BLOCK_SIZE,
1164     .min_keysize = AES_MIN_KEY_SIZE,
1165     .max_keysize = AES_MAX_KEY_SIZE,
1166     .setkey = p8_aes_ctr_setkey,
1167     diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
1168     index b9997335f193..b18e67d0e065 100644
1169     --- a/drivers/crypto/vmx/ppc-xlate.pl
1170     +++ b/drivers/crypto/vmx/ppc-xlate.pl
1171     @@ -139,6 +139,26 @@ my $vmr = sub {
1172     " vor $vx,$vy,$vy";
1173     };
1174    
1175     +# Some ABIs specify vrsave, special-purpose register #256, as reserved
1176     +# for system use.
1177     +my $no_vrsave = ($flavour =~ /linux-ppc64le/);
1178     +my $mtspr = sub {
1179     + my ($f,$idx,$ra) = @_;
1180     + if ($idx == 256 && $no_vrsave) {
1181     + " or $ra,$ra,$ra";
1182     + } else {
1183     + " mtspr $idx,$ra";
1184     + }
1185     +};
1186     +my $mfspr = sub {
1187     + my ($f,$rd,$idx) = @_;
1188     + if ($idx == 256 && $no_vrsave) {
1189     + " li $rd,-1";
1190     + } else {
1191     + " mfspr $rd,$idx";
1192     + }
1193     +};
1194     +
1195     # PowerISA 2.06 stuff
1196     sub vsxmem_op {
1197     my ($f, $vrt, $ra, $rb, $op) = @_;
1198     diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
1199     index 92b6acadfc52..21aacc1f45c1 100644
1200     --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
1201     +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
1202     @@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
1203    
1204     /* convert bits per color to bits per pixel */
1205     /* get bpc from the EDID */
1206     -static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
1207     +static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
1208     {
1209     if (bpc == 0)
1210     return 24;
1211     @@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
1212     return bpc * 3;
1213     }
1214    
1215     -/* get the max pix clock supported by the link rate and lane num */
1216     -static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
1217     - int lane_num,
1218     - int bpp)
1219     -{
1220     - return (link_rate * lane_num * 8) / bpp;
1221     -}
1222     -
1223     /***** amdgpu specific DP functions *****/
1224    
1225     -/* First get the min lane# when low rate is used according to pixel clock
1226     - * (prefer low rate), second check max lane# supported by DP panel,
1227     - * if the max lane# < low rate lane# then use max lane# instead.
1228     - */
1229     -static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
1230     +static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
1231     const u8 dpcd[DP_DPCD_SIZE],
1232     - int pix_clock)
1233     -{
1234     - int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
1235     - int max_link_rate = drm_dp_max_link_rate(dpcd);
1236     - int max_lane_num = drm_dp_max_lane_count(dpcd);
1237     - int lane_num;
1238     - int max_dp_pix_clock;
1239     -
1240     - for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
1241     - max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
1242     - if (pix_clock <= max_dp_pix_clock)
1243     - break;
1244     - }
1245     -
1246     - return lane_num;
1247     -}
1248     -
1249     -static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
1250     - const u8 dpcd[DP_DPCD_SIZE],
1251     - int pix_clock)
1252     + unsigned pix_clock,
1253     + unsigned *dp_lanes, unsigned *dp_rate)
1254     {
1255     - int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
1256     - int lane_num, max_pix_clock;
1257     -
1258     - if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
1259     - ENCODER_OBJECT_ID_NUTMEG)
1260     - return 270000;
1261     -
1262     - lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
1263     - max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
1264     - if (pix_clock <= max_pix_clock)
1265     - return 162000;
1266     - max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
1267     - if (pix_clock <= max_pix_clock)
1268     - return 270000;
1269     - if (amdgpu_connector_is_dp12_capable(connector)) {
1270     - max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
1271     - if (pix_clock <= max_pix_clock)
1272     - return 540000;
1273     + unsigned bpp =
1274     + amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
1275     + static const unsigned link_rates[3] = { 162000, 270000, 540000 };
1276     + unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
1277     + unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
1278     + unsigned lane_num, i, max_pix_clock;
1279     +
1280     + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
1281     + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
1282     + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
1283     + if (max_pix_clock >= pix_clock) {
1284     + *dp_lanes = lane_num;
1285     + *dp_rate = link_rates[i];
1286     + return 0;
1287     + }
1288     + }
1289     }
1290    
1291     - return drm_dp_max_link_rate(dpcd);
1292     + return -EINVAL;
1293     }
1294    
1295     static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
1296     @@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
1297     {
1298     struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
1299     struct amdgpu_connector_atom_dig *dig_connector;
1300     + int ret;
1301    
1302     if (!amdgpu_connector->con_priv)
1303     return;
1304     @@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
1305    
1306     if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
1307     (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
1308     - dig_connector->dp_clock =
1309     - amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
1310     - dig_connector->dp_lane_count =
1311     - amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
1312     + ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
1313     + mode->clock,
1314     + &dig_connector->dp_lane_count,
1315     + &dig_connector->dp_clock);
1316     + if (ret) {
1317     + dig_connector->dp_clock = 0;
1318     + dig_connector->dp_lane_count = 0;
1319     + }
1320     }
1321     }
1322    
1323     @@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
1324     {
1325     struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
1326     struct amdgpu_connector_atom_dig *dig_connector;
1327     - int dp_clock;
1328     + unsigned dp_lanes, dp_clock;
1329     + int ret;
1330    
1331     if (!amdgpu_connector->con_priv)
1332     return MODE_CLOCK_HIGH;
1333     dig_connector = amdgpu_connector->con_priv;
1334    
1335     - dp_clock =
1336     - amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
1337     + ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
1338     + mode->clock, &dp_lanes, &dp_clock);
1339     + if (ret)
1340     + return MODE_CLOCK_HIGH;
1341    
1342     if ((dp_clock == 540000) &&
1343     (!amdgpu_connector_is_dp12_capable(connector)))
1344     diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
1345     index 8035d4d6a4f5..653917a3bcc2 100644
1346     --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
1347     +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
1348     @@ -1955,10 +1955,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
1349     }
1350     } else { /*pi->caps_vce_pg*/
1351     cz_update_vce_dpm(adev);
1352     - cz_enable_vce_dpm(adev, true);
1353     + cz_enable_vce_dpm(adev, !gate);
1354     }
1355     -
1356     - return;
1357     }
1358    
1359     const struct amd_ip_funcs cz_dpm_ip_funcs = {
1360     diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
1361     index e5aec45bf985..1ac29d703c12 100644
1362     --- a/drivers/gpu/drm/drm_atomic_helper.c
1363     +++ b/drivers/gpu/drm/drm_atomic_helper.c
1364     @@ -108,7 +108,6 @@ steal_encoder(struct drm_atomic_state *state,
1365     struct drm_crtc_state *crtc_state;
1366     struct drm_connector *connector;
1367     struct drm_connector_state *connector_state;
1368     - int ret;
1369    
1370     /*
1371     * We can only steal an encoder coming from a connector, which means we
1372     @@ -139,9 +138,6 @@ steal_encoder(struct drm_atomic_state *state,
1373     if (IS_ERR(connector_state))
1374     return PTR_ERR(connector_state);
1375    
1376     - ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
1377     - if (ret)
1378     - return ret;
1379     connector_state->best_encoder = NULL;
1380     }
1381    
1382     diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1383     index dc84003f694e..5e4bb4837bae 100644
1384     --- a/drivers/gpu/drm/drm_crtc.c
1385     +++ b/drivers/gpu/drm/drm_crtc.c
1386     @@ -5231,6 +5231,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
1387     unsigned long flags;
1388     int ret = -EINVAL;
1389    
1390     + if (!drm_core_check_feature(dev, DRIVER_MODESET))
1391     + return -EINVAL;
1392     +
1393     if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
1394     page_flip->reserved != 0)
1395     return -EINVAL;
1396     diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
1397     index c7de454e8e88..b205224f1a44 100644
1398     --- a/drivers/gpu/drm/drm_gem.c
1399     +++ b/drivers/gpu/drm/drm_gem.c
1400     @@ -338,27 +338,32 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
1401     spin_unlock(&file_priv->table_lock);
1402     idr_preload_end();
1403     mutex_unlock(&dev->object_name_lock);
1404     - if (ret < 0) {
1405     - drm_gem_object_handle_unreference_unlocked(obj);
1406     - return ret;
1407     - }
1408     + if (ret < 0)
1409     + goto err_unref;
1410     +
1411     *handlep = ret;
1412    
1413     ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
1414     - if (ret) {
1415     - drm_gem_handle_delete(file_priv, *handlep);
1416     - return ret;
1417     - }
1418     + if (ret)
1419     + goto err_remove;
1420    
1421     if (dev->driver->gem_open_object) {
1422     ret = dev->driver->gem_open_object(obj, file_priv);
1423     - if (ret) {
1424     - drm_gem_handle_delete(file_priv, *handlep);
1425     - return ret;
1426     - }
1427     + if (ret)
1428     + goto err_revoke;
1429     }
1430    
1431     return 0;
1432     +
1433     +err_revoke:
1434     + drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
1435     +err_remove:
1436     + spin_lock(&file_priv->table_lock);
1437     + idr_remove(&file_priv->object_idr, *handlep);
1438     + spin_unlock(&file_priv->table_lock);
1439     +err_unref:
1440     + drm_gem_object_handle_unreference_unlocked(obj);
1441     + return ret;
1442     }
1443    
1444     /**
1445     diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1446     index d3ce4da6a6ad..d400d6773bbb 100644
1447     --- a/drivers/gpu/drm/i915/i915_drv.h
1448     +++ b/drivers/gpu/drm/i915/i915_drv.h
1449     @@ -3313,6 +3313,9 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1450     }
1451     extern void intel_i2c_reset(struct drm_device *dev);
1452    
1453     +/* intel_bios.c */
1454     +bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
1455     +
1456     /* intel_opregion.c */
1457     #ifdef CONFIG_ACPI
1458     extern int intel_opregion_setup(struct drm_device *dev);
1459     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1460     index 9ed9f6dde86f..cace154bbdc0 100644
1461     --- a/drivers/gpu/drm/i915/i915_reg.h
1462     +++ b/drivers/gpu/drm/i915/i915_reg.h
1463     @@ -3240,19 +3240,20 @@ enum skl_disp_power_wells {
1464    
1465     #define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
1466     /*
1467     - * HDMI/DP bits are gen4+
1468     + * HDMI/DP bits are g4x+
1469     *
1470     * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
1471     * Please check the detailed lore in the commit message for for experimental
1472     * evidence.
1473     */
1474     -#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
1475     +/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
1476     +#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
1477     +#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
1478     +#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
1479     +/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
1480     +#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
1481     #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
1482     -#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
1483     -/* VLV DP/HDMI bits again match Bspec */
1484     -#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
1485     -#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
1486     -#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
1487     +#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
1488     #define PORTD_HOTPLUG_INT_STATUS (3 << 21)
1489     #define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
1490     #define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
1491     diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
1492     index ce82f9c7df24..d14bdc537587 100644
1493     --- a/drivers/gpu/drm/i915/intel_bios.c
1494     +++ b/drivers/gpu/drm/i915/intel_bios.c
1495     @@ -1351,3 +1351,42 @@ intel_parse_bios(struct drm_device *dev)
1496    
1497     return 0;
1498     }
1499     +
1500     +/**
1501     + * intel_bios_is_port_present - is the specified digital port present
1502     + * @dev_priv: i915 device instance
1503     + * @port: port to check
1504     + *
1505     + * Return true if the device in %port is present.
1506     + */
1507     +bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
1508     +{
1509     + static const struct {
1510     + u16 dp, hdmi;
1511     + } port_mapping[] = {
1512     + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
1513     + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
1514     + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
1515     + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
1516     + };
1517     + int i;
1518     +
1519     + /* FIXME maybe deal with port A as well? */
1520     + if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
1521     + return false;
1522     +
1523     + if (!dev_priv->vbt.child_dev_num)
1524     + return false;
1525     +
1526     + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
1527     + const union child_device_config *p_child =
1528     + &dev_priv->vbt.child_dev[i];
1529     + if ((p_child->common.dvo_port == port_mapping[port].dp ||
1530     + p_child->common.dvo_port == port_mapping[port].hdmi) &&
1531     + (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
1532     + DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
1533     + return true;
1534     + }
1535     +
1536     + return false;
1537     +}
1538     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1539     index 3292495ee10f..a3254c3bcc7c 100644
1540     --- a/drivers/gpu/drm/i915/intel_display.c
1541     +++ b/drivers/gpu/drm/i915/intel_display.c
1542     @@ -14160,6 +14160,8 @@ static void intel_setup_outputs(struct drm_device *dev)
1543     if (I915_READ(PCH_DP_D) & DP_DETECTED)
1544     intel_dp_init(dev, PCH_DP_D, PORT_D);
1545     } else if (IS_VALLEYVIEW(dev)) {
1546     + bool has_edp, has_port;
1547     +
1548     /*
1549     * The DP_DETECTED bit is the latched state of the DDC
1550     * SDA pin at boot. However since eDP doesn't require DDC
1551     @@ -14168,27 +14170,37 @@ static void intel_setup_outputs(struct drm_device *dev)
1552     * Thus we can't rely on the DP_DETECTED bit alone to detect
1553     * eDP ports. Consult the VBT as well as DP_DETECTED to
1554     * detect eDP ports.
1555     + *
1556     + * Sadly the straps seem to be missing sometimes even for HDMI
1557     + * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
1558     + * and VBT for the presence of the port. Additionally we can't
1559     + * trust the port type the VBT declares as we've seen at least
1560     + * HDMI ports that the VBT claim are DP or eDP.
1561     */
1562     - if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
1563     - !intel_dp_is_edp(dev, PORT_B))
1564     + has_edp = intel_dp_is_edp(dev, PORT_B);
1565     + has_port = intel_bios_is_port_present(dev_priv, PORT_B);
1566     + if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
1567     + has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
1568     + if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
1569     intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
1570     - if (I915_READ(VLV_DP_B) & DP_DETECTED ||
1571     - intel_dp_is_edp(dev, PORT_B))
1572     - intel_dp_init(dev, VLV_DP_B, PORT_B);
1573    
1574     - if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
1575     - !intel_dp_is_edp(dev, PORT_C))
1576     + has_edp = intel_dp_is_edp(dev, PORT_C);
1577     + has_port = intel_bios_is_port_present(dev_priv, PORT_C);
1578     + if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
1579     + has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
1580     + if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
1581     intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
1582     - if (I915_READ(VLV_DP_C) & DP_DETECTED ||
1583     - intel_dp_is_edp(dev, PORT_C))
1584     - intel_dp_init(dev, VLV_DP_C, PORT_C);
1585    
1586     if (IS_CHERRYVIEW(dev)) {
1587     - /* eDP not supported on port D, so don't check VBT */
1588     - if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
1589     - intel_hdmi_init(dev, CHV_HDMID, PORT_D);
1590     - if (I915_READ(CHV_DP_D) & DP_DETECTED)
1591     + /*
1592     + * eDP not supported on port D,
1593     + * so no need to worry about it
1594     + */
1595     + has_port = intel_bios_is_port_present(dev_priv, PORT_D);
1596     + if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
1597     intel_dp_init(dev, CHV_DP_D, PORT_D);
1598     + if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
1599     + intel_hdmi_init(dev, CHV_HDMID, PORT_D);
1600     }
1601    
1602     intel_dsi_init(dev);
1603     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1604     index 8e1d6d74c203..ebbd23407a80 100644
1605     --- a/drivers/gpu/drm/i915/intel_dp.c
1606     +++ b/drivers/gpu/drm/i915/intel_dp.c
1607     @@ -4592,20 +4592,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1608     return I915_READ(PORT_HOTPLUG_STAT) & bit;
1609     }
1610    
1611     -static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
1612     - struct intel_digital_port *port)
1613     +static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
1614     + struct intel_digital_port *port)
1615     {
1616     u32 bit;
1617    
1618     switch (port->port) {
1619     case PORT_B:
1620     - bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
1621     + bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
1622     break;
1623     case PORT_C:
1624     - bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
1625     + bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
1626     break;
1627     case PORT_D:
1628     - bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
1629     + bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
1630     break;
1631     default:
1632     MISSING_CASE(port->port);
1633     @@ -4657,8 +4657,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
1634     return cpt_digital_port_connected(dev_priv, port);
1635     else if (IS_BROXTON(dev_priv))
1636     return bxt_digital_port_connected(dev_priv, port);
1637     - else if (IS_VALLEYVIEW(dev_priv))
1638     - return vlv_digital_port_connected(dev_priv, port);
1639     + else if (IS_GM45(dev_priv))
1640     + return gm45_digital_port_connected(dev_priv, port);
1641     else
1642     return g4x_digital_port_connected(dev_priv, port);
1643     }
1644     @@ -6113,8 +6113,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1645     return true;
1646     }
1647    
1648     -void
1649     -intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
1650     +bool intel_dp_init(struct drm_device *dev,
1651     + int output_reg,
1652     + enum port port)
1653     {
1654     struct drm_i915_private *dev_priv = dev->dev_private;
1655     struct intel_digital_port *intel_dig_port;
1656     @@ -6124,7 +6125,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
1657    
1658     intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1659     if (!intel_dig_port)
1660     - return;
1661     + return false;
1662    
1663     intel_connector = intel_connector_alloc();
1664     if (!intel_connector)
1665     @@ -6179,15 +6180,14 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
1666     if (!intel_dp_init_connector(intel_dig_port, intel_connector))
1667     goto err_init_connector;
1668    
1669     - return;
1670     + return true;
1671    
1672     err_init_connector:
1673     drm_encoder_cleanup(encoder);
1674     kfree(intel_connector);
1675     err_connector_alloc:
1676     kfree(intel_dig_port);
1677     -
1678     - return;
1679     + return false;
1680     }
1681    
1682     void intel_dp_mst_suspend(struct drm_device *dev)
1683     diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1684     index c5f11e0c5d5b..67f72a7ee7cb 100644
1685     --- a/drivers/gpu/drm/i915/intel_drv.h
1686     +++ b/drivers/gpu/drm/i915/intel_drv.h
1687     @@ -1195,7 +1195,7 @@ void intel_csr_ucode_fini(struct drm_device *dev);
1688     void assert_csr_loaded(struct drm_i915_private *dev_priv);
1689    
1690     /* intel_dp.c */
1691     -void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
1692     +bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
1693     bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
1694     struct intel_connector *intel_connector);
1695     void intel_dp_set_link_params(struct intel_dp *intel_dp,
1696     diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1697     index 4b8ed9f2dabc..dff69fef47e0 100644
1698     --- a/drivers/gpu/drm/i915/intel_hdmi.c
1699     +++ b/drivers/gpu/drm/i915/intel_hdmi.c
1700     @@ -2030,6 +2030,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1701     enum port port = intel_dig_port->port;
1702     uint8_t alternate_ddc_pin;
1703    
1704     + DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
1705     + port_name(port));
1706     +
1707     drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
1708     DRM_MODE_CONNECTOR_HDMIA);
1709     drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
1710     diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
1711     index 6d7cd3fe21e7..1847f83b1e33 100644
1712     --- a/drivers/gpu/drm/msm/msm_gem_submit.c
1713     +++ b/drivers/gpu/drm/msm/msm_gem_submit.c
1714     @@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
1715     return submit;
1716     }
1717    
1718     +static inline unsigned long __must_check
1719     +copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
1720     +{
1721     + if (access_ok(VERIFY_READ, from, n))
1722     + return __copy_from_user_inatomic(to, from, n);
1723     + return -EFAULT;
1724     +}
1725     +
1726     static int submit_lookup_objects(struct msm_gem_submit *submit,
1727     struct drm_msm_gem_submit *args, struct drm_file *file)
1728     {
1729     @@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
1730     int ret = 0;
1731    
1732     spin_lock(&file->table_lock);
1733     + pagefault_disable();
1734    
1735     for (i = 0; i < args->nr_bos; i++) {
1736     struct drm_msm_gem_submit_bo submit_bo;
1737     @@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
1738     void __user *userptr =
1739     to_user_ptr(args->bos + (i * sizeof(submit_bo)));
1740    
1741     - ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
1742     - if (ret) {
1743     - ret = -EFAULT;
1744     - goto out_unlock;
1745     + ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
1746     + if (unlikely(ret)) {
1747     + pagefault_enable();
1748     + spin_unlock(&file->table_lock);
1749     + ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
1750     + if (ret)
1751     + goto out;
1752     + spin_lock(&file->table_lock);
1753     + pagefault_disable();
1754     }
1755    
1756     if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
1757     @@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
1758     }
1759    
1760     out_unlock:
1761     - submit->nr_bos = i;
1762     + pagefault_enable();
1763     spin_unlock(&file->table_lock);
1764    
1765     +out:
1766     + submit->nr_bos = i;
1767     +
1768     return ret;
1769     }
1770    
1771     diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
1772     index bd73b4069069..44ee72e04df9 100644
1773     --- a/drivers/gpu/drm/radeon/atombios_dp.c
1774     +++ b/drivers/gpu/drm/radeon/atombios_dp.c
1775     @@ -302,77 +302,31 @@ static int convert_bpc_to_bpp(int bpc)
1776     return bpc * 3;
1777     }
1778    
1779     -/* get the max pix clock supported by the link rate and lane num */
1780     -static int dp_get_max_dp_pix_clock(int link_rate,
1781     - int lane_num,
1782     - int bpp)
1783     -{
1784     - return (link_rate * lane_num * 8) / bpp;
1785     -}
1786     -
1787     /***** radeon specific DP functions *****/
1788    
1789     -int radeon_dp_get_max_link_rate(struct drm_connector *connector,
1790     - const u8 dpcd[DP_DPCD_SIZE])
1791     -{
1792     - int max_link_rate;
1793     -
1794     - if (radeon_connector_is_dp12_capable(connector))
1795     - max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
1796     - else
1797     - max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
1798     -
1799     - return max_link_rate;
1800     -}
1801     -
1802     -/* First get the min lane# when low rate is used according to pixel clock
1803     - * (prefer low rate), second check max lane# supported by DP panel,
1804     - * if the max lane# < low rate lane# then use max lane# instead.
1805     - */
1806     -static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
1807     - const u8 dpcd[DP_DPCD_SIZE],
1808     - int pix_clock)
1809     -{
1810     - int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
1811     - int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
1812     - int max_lane_num = drm_dp_max_lane_count(dpcd);
1813     - int lane_num;
1814     - int max_dp_pix_clock;
1815     -
1816     - for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
1817     - max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
1818     - if (pix_clock <= max_dp_pix_clock)
1819     - break;
1820     - }
1821     -
1822     - return lane_num;
1823     -}
1824     -
1825     -static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
1826     - const u8 dpcd[DP_DPCD_SIZE],
1827     - int pix_clock)
1828     +int radeon_dp_get_dp_link_config(struct drm_connector *connector,
1829     + const u8 dpcd[DP_DPCD_SIZE],
1830     + unsigned pix_clock,
1831     + unsigned *dp_lanes, unsigned *dp_rate)
1832     {
1833     int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
1834     - int lane_num, max_pix_clock;
1835     -
1836     - if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
1837     - ENCODER_OBJECT_ID_NUTMEG)
1838     - return 270000;
1839     -
1840     - lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
1841     - max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
1842     - if (pix_clock <= max_pix_clock)
1843     - return 162000;
1844     - max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
1845     - if (pix_clock <= max_pix_clock)
1846     - return 270000;
1847     - if (radeon_connector_is_dp12_capable(connector)) {
1848     - max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
1849     - if (pix_clock <= max_pix_clock)
1850     - return 540000;
1851     + static const unsigned link_rates[3] = { 162000, 270000, 540000 };
1852     + unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
1853     + unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
1854     + unsigned lane_num, i, max_pix_clock;
1855     +
1856     + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
1857     + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
1858     + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
1859     + if (max_pix_clock >= pix_clock) {
1860     + *dp_lanes = lane_num;
1861     + *dp_rate = link_rates[i];
1862     + return 0;
1863     + }
1864     + }
1865     }
1866    
1867     - return radeon_dp_get_max_link_rate(connector, dpcd);
1868     + return -EINVAL;
1869     }
1870    
1871     static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
1872     @@ -491,6 +445,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
1873     {
1874     struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1875     struct radeon_connector_atom_dig *dig_connector;
1876     + int ret;
1877    
1878     if (!radeon_connector->con_priv)
1879     return;
1880     @@ -498,10 +453,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
1881    
1882     if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
1883     (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
1884     - dig_connector->dp_clock =
1885     - radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
1886     - dig_connector->dp_lane_count =
1887     - radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
1888     + ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
1889     + mode->clock,
1890     + &dig_connector->dp_lane_count,
1891     + &dig_connector->dp_clock);
1892     + if (ret) {
1893     + dig_connector->dp_clock = 0;
1894     + dig_connector->dp_lane_count = 0;
1895     + }
1896     }
1897     }
1898    
1899     @@ -510,7 +469,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
1900     {
1901     struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1902     struct radeon_connector_atom_dig *dig_connector;
1903     - int dp_clock;
1904     + unsigned dp_clock, dp_lanes;
1905     + int ret;
1906    
1907     if ((mode->clock > 340000) &&
1908     (!radeon_connector_is_dp12_capable(connector)))
1909     @@ -520,8 +480,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
1910     return MODE_CLOCK_HIGH;
1911     dig_connector = radeon_connector->con_priv;
1912    
1913     - dp_clock =
1914     - radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
1915     + ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
1916     + mode->clock,
1917     + &dp_lanes,
1918     + &dp_clock);
1919     + if (ret)
1920     + return MODE_CLOCK_HIGH;
1921    
1922     if ((dp_clock == 540000) &&
1923     (!radeon_connector_is_dp12_capable(connector)))
1924     diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
1925     index 744f5c49c664..6dd39bdedb97 100644
1926     --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
1927     +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
1928     @@ -525,11 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
1929     drm_mode_set_crtcinfo(adjusted_mode, 0);
1930     {
1931     struct radeon_connector_atom_dig *dig_connector;
1932     -
1933     dig_connector = mst_enc->connector->con_priv;
1934     dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
1935     - dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
1936     - dig_connector->dpcd);
1937     + dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
1938     DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
1939     dig_connector->dp_lane_count, dig_connector->dp_clock);
1940     }
1941     diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
1942     index bba112628b47..7a0666ac4e23 100644
1943     --- a/drivers/gpu/drm/radeon/radeon_mode.h
1944     +++ b/drivers/gpu/drm/radeon/radeon_mode.h
1945     @@ -757,8 +757,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
1946     extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
1947     extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
1948     struct drm_connector *connector);
1949     -int radeon_dp_get_max_link_rate(struct drm_connector *connector,
1950     - const u8 *dpcd);
1951     +extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
1952     + const u8 *dpcd,
1953     + unsigned pix_clock,
1954     + unsigned *dp_lanes, unsigned *dp_rate);
1955     extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
1956     u8 power_state);
1957     extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
1958     diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
1959     index f342aad79cc6..35310336dd0a 100644
1960     --- a/drivers/gpu/drm/radeon/radeon_ttm.c
1961     +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
1962     @@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
1963    
1964     rdev = radeon_get_rdev(bo->bdev);
1965     ridx = radeon_copy_ring_index(rdev);
1966     - old_start = old_mem->start << PAGE_SHIFT;
1967     - new_start = new_mem->start << PAGE_SHIFT;
1968     + old_start = (u64)old_mem->start << PAGE_SHIFT;
1969     + new_start = (u64)new_mem->start << PAGE_SHIFT;
1970    
1971     switch (old_mem->mem_type) {
1972     case TTM_PL_VRAM:
1973     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1974     index ec791e169f8f..936960202cf4 100644
1975     --- a/drivers/hid/hid-core.c
1976     +++ b/drivers/hid/hid-core.c
1977     @@ -1251,6 +1251,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
1978     /* Ignore report if ErrorRollOver */
1979     if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1980     value[n] >= min && value[n] <= max &&
1981     + value[n] - min < field->maxusage &&
1982     field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
1983     goto exit;
1984     }
1985     @@ -1263,11 +1264,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
1986     }
1987    
1988     if (field->value[n] >= min && field->value[n] <= max
1989     + && field->value[n] - min < field->maxusage
1990     && field->usage[field->value[n] - min].hid
1991     && search(value, field->value[n], count))
1992     hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
1993    
1994     if (value[n] >= min && value[n] <= max
1995     + && value[n] - min < field->maxusage
1996     && field->usage[value[n] - min].hid
1997     && search(field->value, value[n], count))
1998     hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
1999     diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
2000     index 9098f13f2f44..1ef37c727572 100644
2001     --- a/drivers/hv/channel.c
2002     +++ b/drivers/hv/channel.c
2003     @@ -28,6 +28,7 @@
2004     #include <linux/module.h>
2005     #include <linux/hyperv.h>
2006     #include <linux/uio.h>
2007     +#include <linux/interrupt.h>
2008    
2009     #include "hyperv_vmbus.h"
2010    
2011     @@ -496,8 +497,21 @@ static void reset_channel_cb(void *arg)
2012     static int vmbus_close_internal(struct vmbus_channel *channel)
2013     {
2014     struct vmbus_channel_close_channel *msg;
2015     + struct tasklet_struct *tasklet;
2016     int ret;
2017    
2018     + /*
2019     + * process_chn_event(), running in the tasklet, can race
2020     + * with vmbus_close_internal() in the case of SMP guest, e.g., when
2021     + * the former is accessing channel->inbound.ring_buffer, the latter
2022     + * could be freeing the ring_buffer pages.
2023     + *
2024     + * To resolve the race, we can serialize them by disabling the
2025     + * tasklet when the latter is running here.
2026     + */
2027     + tasklet = hv_context.event_dpc[channel->target_cpu];
2028     + tasklet_disable(tasklet);
2029     +
2030     channel->state = CHANNEL_OPEN_STATE;
2031     channel->sc_creation_callback = NULL;
2032     /* Stop callback and cancel the timer asap */
2033     @@ -525,7 +539,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
2034     * If we failed to post the close msg,
2035     * it is perhaps better to leak memory.
2036     */
2037     - return ret;
2038     + goto out;
2039     }
2040    
2041     /* Tear down the gpadl for the channel's ring buffer */
2042     @@ -538,7 +552,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
2043     * If we failed to teardown gpadl,
2044     * it is perhaps better to leak memory.
2045     */
2046     - return ret;
2047     + goto out;
2048     }
2049     }
2050    
2051     @@ -549,12 +563,9 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
2052     free_pages((unsigned long)channel->ringbuffer_pages,
2053     get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
2054    
2055     - /*
2056     - * If the channel has been rescinded; process device removal.
2057     - */
2058     - if (channel->rescind)
2059     - hv_process_channel_removal(channel,
2060     - channel->offermsg.child_relid);
2061     +out:
2062     + tasklet_enable(tasklet);
2063     +
2064     return ret;
2065     }
2066    
2067     diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
2068     index 652afd11a9ef..37238dffd947 100644
2069     --- a/drivers/hv/channel_mgmt.c
2070     +++ b/drivers/hv/channel_mgmt.c
2071     @@ -28,6 +28,7 @@
2072     #include <linux/list.h>
2073     #include <linux/module.h>
2074     #include <linux/completion.h>
2075     +#include <linux/delay.h>
2076     #include <linux/hyperv.h>
2077    
2078     #include "hyperv_vmbus.h"
2079     @@ -191,6 +192,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
2080     if (channel == NULL)
2081     return;
2082    
2083     + BUG_ON(!channel->rescind);
2084     +
2085     if (channel->target_cpu != get_cpu()) {
2086     put_cpu();
2087     smp_call_function_single(channel->target_cpu,
2088     @@ -230,9 +233,7 @@ void vmbus_free_channels(void)
2089    
2090     list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
2091     listentry) {
2092     - /* if we don't set rescind to true, vmbus_close_internal()
2093     - * won't invoke hv_process_channel_removal().
2094     - */
2095     + /* hv_process_channel_removal() needs this */
2096     channel->rescind = true;
2097    
2098     vmbus_device_unregister(channel->device_obj);
2099     @@ -459,6 +460,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
2100     cpumask_of_node(primary->numa_node));
2101    
2102     cur_cpu = -1;
2103     +
2104     + /*
2105     + * Normally Hyper-V host doesn't create more subchannels than there
2106     + * are VCPUs on the node but it is possible when not all present VCPUs
2107     + * on the node are initialized by guest. Clear the alloced_cpus_in_node
2108     + * to start over.
2109     + */
2110     + if (cpumask_equal(&primary->alloced_cpus_in_node,
2111     + cpumask_of_node(primary->numa_node)))
2112     + cpumask_clear(&primary->alloced_cpus_in_node);
2113     +
2114     while (true) {
2115     cur_cpu = cpumask_next(cur_cpu, &available_mask);
2116     if (cur_cpu >= nr_cpu_ids) {
2117     @@ -488,6 +500,40 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
2118     channel->target_vp = hv_context.vp_index[cur_cpu];
2119     }
2120    
2121     +static void vmbus_wait_for_unload(void)
2122     +{
2123     + int cpu = smp_processor_id();
2124     + void *page_addr = hv_context.synic_message_page[cpu];
2125     + struct hv_message *msg = (struct hv_message *)page_addr +
2126     + VMBUS_MESSAGE_SINT;
2127     + struct vmbus_channel_message_header *hdr;
2128     + bool unloaded = false;
2129     +
2130     + while (1) {
2131     + if (msg->header.message_type == HVMSG_NONE) {
2132     + mdelay(10);
2133     + continue;
2134     + }
2135     +
2136     + hdr = (struct vmbus_channel_message_header *)msg->u.payload;
2137     + if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
2138     + unloaded = true;
2139     +
2140     + msg->header.message_type = HVMSG_NONE;
2141     + /*
2142     + * header.message_type needs to be written before we do
2143     + * wrmsrl() below.
2144     + */
2145     + mb();
2146     +
2147     + if (msg->header.message_flags.msg_pending)
2148     + wrmsrl(HV_X64_MSR_EOM, 0);
2149     +
2150     + if (unloaded)
2151     + break;
2152     + }
2153     +}
2154     +
2155     /*
2156     * vmbus_unload_response - Handler for the unload response.
2157     */
2158     @@ -513,7 +559,14 @@ void vmbus_initiate_unload(void)
2159     hdr.msgtype = CHANNELMSG_UNLOAD;
2160     vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
2161    
2162     - wait_for_completion(&vmbus_connection.unload_event);
2163     + /*
2164     + * vmbus_initiate_unload() is also called on crash and the crash can be
2165     + * happening in an interrupt context, where scheduling is impossible.
2166     + */
2167     + if (!in_interrupt())
2168     + wait_for_completion(&vmbus_connection.unload_event);
2169     + else
2170     + vmbus_wait_for_unload();
2171     }
2172    
2173     /*
2174     diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
2175     index 6341be8739ae..63194a9a7189 100644
2176     --- a/drivers/hv/hv.c
2177     +++ b/drivers/hv/hv.c
2178     @@ -293,8 +293,14 @@ void hv_cleanup(void)
2179     * Cleanup the TSC page based CS.
2180     */
2181     if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
2182     - clocksource_change_rating(&hyperv_cs_tsc, 10);
2183     - clocksource_unregister(&hyperv_cs_tsc);
2184     + /*
2185     + * Crash can happen in an interrupt context and unregistering
2186     + * a clocksource is impossible and redundant in this case.
2187     + */
2188     + if (!oops_in_progress) {
2189     + clocksource_change_rating(&hyperv_cs_tsc, 10);
2190     + clocksource_unregister(&hyperv_cs_tsc);
2191     + }
2192    
2193     hypercall_msr.as_uint64 = 0;
2194     wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
2195     diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
2196     index db4b887b889d..c37a71e13de0 100644
2197     --- a/drivers/hv/hv_fcopy.c
2198     +++ b/drivers/hv/hv_fcopy.c
2199     @@ -51,7 +51,6 @@ static struct {
2200     struct hv_fcopy_hdr *fcopy_msg; /* current message */
2201     struct vmbus_channel *recv_channel; /* chn we got the request */
2202     u64 recv_req_id; /* request ID. */
2203     - void *fcopy_context; /* for the channel callback */
2204     } fcopy_transaction;
2205    
2206     static void fcopy_respond_to_host(int error);
2207     @@ -67,6 +66,13 @@ static struct hvutil_transport *hvt;
2208     */
2209     static int dm_reg_value;
2210    
2211     +static void fcopy_poll_wrapper(void *channel)
2212     +{
2213     + /* Transaction is finished, reset the state here to avoid races. */
2214     + fcopy_transaction.state = HVUTIL_READY;
2215     + hv_fcopy_onchannelcallback(channel);
2216     +}
2217     +
2218     static void fcopy_timeout_func(struct work_struct *dummy)
2219     {
2220     /*
2221     @@ -74,13 +80,7 @@ static void fcopy_timeout_func(struct work_struct *dummy)
2222     * process the pending transaction.
2223     */
2224     fcopy_respond_to_host(HV_E_FAIL);
2225     -
2226     - /* Transaction is finished, reset the state. */
2227     - if (fcopy_transaction.state > HVUTIL_READY)
2228     - fcopy_transaction.state = HVUTIL_READY;
2229     -
2230     - hv_poll_channel(fcopy_transaction.fcopy_context,
2231     - hv_fcopy_onchannelcallback);
2232     + hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
2233     }
2234    
2235     static int fcopy_handle_handshake(u32 version)
2236     @@ -108,9 +108,7 @@ static int fcopy_handle_handshake(u32 version)
2237     return -EINVAL;
2238     }
2239     pr_debug("FCP: userspace daemon ver. %d registered\n", version);
2240     - fcopy_transaction.state = HVUTIL_READY;
2241     - hv_poll_channel(fcopy_transaction.fcopy_context,
2242     - hv_fcopy_onchannelcallback);
2243     + hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
2244     return 0;
2245     }
2246    
2247     @@ -227,15 +225,8 @@ void hv_fcopy_onchannelcallback(void *context)
2248     int util_fw_version;
2249     int fcopy_srv_version;
2250    
2251     - if (fcopy_transaction.state > HVUTIL_READY) {
2252     - /*
2253     - * We will defer processing this callback once
2254     - * the current transaction is complete.
2255     - */
2256     - fcopy_transaction.fcopy_context = context;
2257     + if (fcopy_transaction.state > HVUTIL_READY)
2258     return;
2259     - }
2260     - fcopy_transaction.fcopy_context = NULL;
2261    
2262     vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
2263     &requestid);
2264     @@ -275,7 +266,8 @@ void hv_fcopy_onchannelcallback(void *context)
2265     * Send the information to the user-level daemon.
2266     */
2267     schedule_work(&fcopy_send_work);
2268     - schedule_delayed_work(&fcopy_timeout_work, 5*HZ);
2269     + schedule_delayed_work(&fcopy_timeout_work,
2270     + HV_UTIL_TIMEOUT * HZ);
2271     return;
2272     }
2273     icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
2274     @@ -304,9 +296,8 @@ static int fcopy_on_msg(void *msg, int len)
2275     if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
2276     fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
2277     fcopy_respond_to_host(*val);
2278     - fcopy_transaction.state = HVUTIL_READY;
2279     - hv_poll_channel(fcopy_transaction.fcopy_context,
2280     - hv_fcopy_onchannelcallback);
2281     + hv_poll_channel(fcopy_transaction.recv_channel,
2282     + fcopy_poll_wrapper);
2283     }
2284    
2285     return 0;
2286     diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
2287     index 74c38a9f34a6..2a3420c4ca59 100644
2288     --- a/drivers/hv/hv_kvp.c
2289     +++ b/drivers/hv/hv_kvp.c
2290     @@ -66,7 +66,6 @@ static struct {
2291     struct hv_kvp_msg *kvp_msg; /* current message */
2292     struct vmbus_channel *recv_channel; /* chn we got the request */
2293     u64 recv_req_id; /* request ID. */
2294     - void *kvp_context; /* for the channel callback */
2295     } kvp_transaction;
2296    
2297     /*
2298     @@ -94,6 +93,13 @@ static struct hvutil_transport *hvt;
2299     */
2300     #define HV_DRV_VERSION "3.1"
2301    
2302     +static void kvp_poll_wrapper(void *channel)
2303     +{
2304     + /* Transaction is finished, reset the state here to avoid races. */
2305     + kvp_transaction.state = HVUTIL_READY;
2306     + hv_kvp_onchannelcallback(channel);
2307     +}
2308     +
2309     static void
2310     kvp_register(int reg_value)
2311     {
2312     @@ -121,12 +127,7 @@ static void kvp_timeout_func(struct work_struct *dummy)
2313     */
2314     kvp_respond_to_host(NULL, HV_E_FAIL);
2315    
2316     - /* Transaction is finished, reset the state. */
2317     - if (kvp_transaction.state > HVUTIL_READY)
2318     - kvp_transaction.state = HVUTIL_READY;
2319     -
2320     - hv_poll_channel(kvp_transaction.kvp_context,
2321     - hv_kvp_onchannelcallback);
2322     + hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
2323     }
2324    
2325     static int kvp_handle_handshake(struct hv_kvp_msg *msg)
2326     @@ -218,9 +219,7 @@ static int kvp_on_msg(void *msg, int len)
2327     */
2328     if (cancel_delayed_work_sync(&kvp_timeout_work)) {
2329     kvp_respond_to_host(message, error);
2330     - kvp_transaction.state = HVUTIL_READY;
2331     - hv_poll_channel(kvp_transaction.kvp_context,
2332     - hv_kvp_onchannelcallback);
2333     + hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
2334     }
2335    
2336     return 0;
2337     @@ -596,15 +595,8 @@ void hv_kvp_onchannelcallback(void *context)
2338     int util_fw_version;
2339     int kvp_srv_version;
2340    
2341     - if (kvp_transaction.state > HVUTIL_READY) {
2342     - /*
2343     - * We will defer processing this callback once
2344     - * the current transaction is complete.
2345     - */
2346     - kvp_transaction.kvp_context = context;
2347     + if (kvp_transaction.state > HVUTIL_READY)
2348     return;
2349     - }
2350     - kvp_transaction.kvp_context = NULL;
2351    
2352     vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
2353     &requestid);
2354     @@ -668,7 +660,8 @@ void hv_kvp_onchannelcallback(void *context)
2355     * user-mode not responding.
2356     */
2357     schedule_work(&kvp_sendkey_work);
2358     - schedule_delayed_work(&kvp_timeout_work, 5*HZ);
2359     + schedule_delayed_work(&kvp_timeout_work,
2360     + HV_UTIL_TIMEOUT * HZ);
2361    
2362     return;
2363    
2364     diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
2365     index 815405f2e777..81882d4848bd 100644
2366     --- a/drivers/hv/hv_snapshot.c
2367     +++ b/drivers/hv/hv_snapshot.c
2368     @@ -53,7 +53,6 @@ static struct {
2369     struct vmbus_channel *recv_channel; /* chn we got the request */
2370     u64 recv_req_id; /* request ID. */
2371     struct hv_vss_msg *msg; /* current message */
2372     - void *vss_context; /* for the channel callback */
2373     } vss_transaction;
2374    
2375    
2376     @@ -74,6 +73,13 @@ static void vss_timeout_func(struct work_struct *dummy);
2377     static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
2378     static DECLARE_WORK(vss_send_op_work, vss_send_op);
2379    
2380     +static void vss_poll_wrapper(void *channel)
2381     +{
2382     + /* Transaction is finished, reset the state here to avoid races. */
2383     + vss_transaction.state = HVUTIL_READY;
2384     + hv_vss_onchannelcallback(channel);
2385     +}
2386     +
2387     /*
2388     * Callback when data is received from user mode.
2389     */
2390     @@ -86,12 +92,7 @@ static void vss_timeout_func(struct work_struct *dummy)
2391     pr_warn("VSS: timeout waiting for daemon to reply\n");
2392     vss_respond_to_host(HV_E_FAIL);
2393    
2394     - /* Transaction is finished, reset the state. */
2395     - if (vss_transaction.state > HVUTIL_READY)
2396     - vss_transaction.state = HVUTIL_READY;
2397     -
2398     - hv_poll_channel(vss_transaction.vss_context,
2399     - hv_vss_onchannelcallback);
2400     + hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
2401     }
2402    
2403     static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
2404     @@ -138,9 +139,8 @@ static int vss_on_msg(void *msg, int len)
2405     if (cancel_delayed_work_sync(&vss_timeout_work)) {
2406     vss_respond_to_host(vss_msg->error);
2407     /* Transaction is finished, reset the state. */
2408     - vss_transaction.state = HVUTIL_READY;
2409     - hv_poll_channel(vss_transaction.vss_context,
2410     - hv_vss_onchannelcallback);
2411     + hv_poll_channel(vss_transaction.recv_channel,
2412     + vss_poll_wrapper);
2413     }
2414     } else {
2415     /* This is a spurious call! */
2416     @@ -238,15 +238,8 @@ void hv_vss_onchannelcallback(void *context)
2417     struct icmsg_hdr *icmsghdrp;
2418     struct icmsg_negotiate *negop = NULL;
2419    
2420     - if (vss_transaction.state > HVUTIL_READY) {
2421     - /*
2422     - * We will defer processing this callback once
2423     - * the current transaction is complete.
2424     - */
2425     - vss_transaction.vss_context = context;
2426     + if (vss_transaction.state > HVUTIL_READY)
2427     return;
2428     - }
2429     - vss_transaction.vss_context = NULL;
2430    
2431     vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
2432     &requestid);
2433     @@ -338,6 +331,11 @@ static void vss_on_reset(void)
2434     int
2435     hv_vss_init(struct hv_util_service *srv)
2436     {
2437     + if (vmbus_proto_version < VERSION_WIN8_1) {
2438     + pr_warn("Integration service 'Backup (volume snapshot)'"
2439     + " not supported on this host version.\n");
2440     + return -ENOTSUPP;
2441     + }
2442     recv_buffer = srv->recv_buffer;
2443    
2444     /*
2445     diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
2446     index 6a9d80a5332d..1505ee6e6605 100644
2447     --- a/drivers/hv/hv_utils_transport.c
2448     +++ b/drivers/hv/hv_utils_transport.c
2449     @@ -204,9 +204,12 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
2450     goto out_unlock;
2451     }
2452     hvt->outmsg = kzalloc(len, GFP_KERNEL);
2453     - memcpy(hvt->outmsg, msg, len);
2454     - hvt->outmsg_len = len;
2455     - wake_up_interruptible(&hvt->outmsg_q);
2456     + if (hvt->outmsg) {
2457     + memcpy(hvt->outmsg, msg, len);
2458     + hvt->outmsg_len = len;
2459     + wake_up_interruptible(&hvt->outmsg_q);
2460     + } else
2461     + ret = -ENOMEM;
2462     out_unlock:
2463     mutex_unlock(&hvt->outmsg_lock);
2464     return ret;
2465     diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
2466     index 3782636562a1..12156db2e88e 100644
2467     --- a/drivers/hv/hyperv_vmbus.h
2468     +++ b/drivers/hv/hyperv_vmbus.h
2469     @@ -31,6 +31,11 @@
2470     #include <linux/hyperv.h>
2471    
2472     /*
2473     + * Timeout for services such as KVP and fcopy.
2474     + */
2475     +#define HV_UTIL_TIMEOUT 30
2476     +
2477     +/*
2478     * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
2479     * is set by CPUID(HVCPUID_VERSION_FEATURES).
2480     */
2481     @@ -759,11 +764,7 @@ static inline void hv_poll_channel(struct vmbus_channel *channel,
2482     if (!channel)
2483     return;
2484    
2485     - if (channel->target_cpu != smp_processor_id())
2486     - smp_call_function_single(channel->target_cpu,
2487     - cb, channel, true);
2488     - else
2489     - cb(channel);
2490     + smp_call_function_single(channel->target_cpu, cb, channel, true);
2491     }
2492    
2493     enum hvutil_device_state {
2494     diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
2495     index 9b5440f6b3b4..509ed9731630 100644
2496     --- a/drivers/hv/vmbus_drv.c
2497     +++ b/drivers/hv/vmbus_drv.c
2498     @@ -105,6 +105,7 @@ static struct notifier_block hyperv_panic_block = {
2499     };
2500    
2501     struct resource *hyperv_mmio;
2502     +DEFINE_SEMAPHORE(hyperv_mmio_lock);
2503    
2504     static int vmbus_exists(void)
2505     {
2506     @@ -603,23 +604,11 @@ static int vmbus_remove(struct device *child_device)
2507     {
2508     struct hv_driver *drv;
2509     struct hv_device *dev = device_to_hv_device(child_device);
2510     - u32 relid = dev->channel->offermsg.child_relid;
2511    
2512     if (child_device->driver) {
2513     drv = drv_to_hv_drv(child_device->driver);
2514     if (drv->remove)
2515     drv->remove(dev);
2516     - else {
2517     - hv_process_channel_removal(dev->channel, relid);
2518     - pr_err("remove not set for driver %s\n",
2519     - dev_name(child_device));
2520     - }
2521     - } else {
2522     - /*
2523     - * We don't have a driver for this device; deal with the
2524     - * rescind message by removing the channel.
2525     - */
2526     - hv_process_channel_removal(dev->channel, relid);
2527     }
2528    
2529     return 0;
2530     @@ -654,7 +643,10 @@ static void vmbus_shutdown(struct device *child_device)
2531     static void vmbus_device_release(struct device *device)
2532     {
2533     struct hv_device *hv_dev = device_to_hv_device(device);
2534     + struct vmbus_channel *channel = hv_dev->channel;
2535    
2536     + hv_process_channel_removal(channel,
2537     + channel->offermsg.child_relid);
2538     kfree(hv_dev);
2539    
2540     }
2541     @@ -870,7 +862,7 @@ static int vmbus_bus_init(int irq)
2542     on_each_cpu(hv_synic_init, NULL, 1);
2543     ret = vmbus_connect();
2544     if (ret)
2545     - goto err_alloc;
2546     + goto err_connect;
2547    
2548     if (vmbus_proto_version > VERSION_WIN7)
2549     cpu_hotplug_disable();
2550     @@ -888,6 +880,8 @@ static int vmbus_bus_init(int irq)
2551    
2552     return 0;
2553    
2554     +err_connect:
2555     + on_each_cpu(hv_synic_cleanup, NULL, 1);
2556     err_alloc:
2557     hv_synic_free();
2558     hv_remove_vmbus_irq();
2559     @@ -1147,7 +1141,10 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2560     resource_size_t range_min, range_max, start, local_min, local_max;
2561     const char *dev_n = dev_name(&device_obj->device);
2562     u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
2563     - int i;
2564     + int i, retval;
2565     +
2566     + retval = -ENXIO;
2567     + down(&hyperv_mmio_lock);
2568    
2569     for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2570     if ((iter->start >= max) || (iter->end <= min))
2571     @@ -1184,13 +1181,17 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2572     for (; start + size - 1 <= local_max; start += align) {
2573     *new = request_mem_region_exclusive(start, size,
2574     dev_n);
2575     - if (*new)
2576     - return 0;
2577     + if (*new) {
2578     + retval = 0;
2579     + goto exit;
2580     + }
2581     }
2582     }
2583     }
2584    
2585     - return -ENXIO;
2586     +exit:
2587     + up(&hyperv_mmio_lock);
2588     + return retval;
2589     }
2590     EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2591    
2592     diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
2593     index 146eed70bdf4..ba947df5a8c7 100644
2594     --- a/drivers/idle/intel_idle.c
2595     +++ b/drivers/idle/intel_idle.c
2596     @@ -716,6 +716,26 @@ static struct cpuidle_state avn_cstates[] = {
2597     {
2598     .enter = NULL }
2599     };
2600     +static struct cpuidle_state knl_cstates[] = {
2601     + {
2602     + .name = "C1-KNL",
2603     + .desc = "MWAIT 0x00",
2604     + .flags = MWAIT2flg(0x00),
2605     + .exit_latency = 1,
2606     + .target_residency = 2,
2607     + .enter = &intel_idle,
2608     + .enter_freeze = intel_idle_freeze },
2609     + {
2610     + .name = "C6-KNL",
2611     + .desc = "MWAIT 0x10",
2612     + .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
2613     + .exit_latency = 120,
2614     + .target_residency = 500,
2615     + .enter = &intel_idle,
2616     + .enter_freeze = intel_idle_freeze },
2617     + {
2618     + .enter = NULL }
2619     +};
2620    
2621     /**
2622     * intel_idle
2623     @@ -890,6 +910,10 @@ static const struct idle_cpu idle_cpu_avn = {
2624     .disable_promotion_to_c1e = true,
2625     };
2626    
2627     +static const struct idle_cpu idle_cpu_knl = {
2628     + .state_table = knl_cstates,
2629     +};
2630     +
2631     #define ICPU(model, cpu) \
2632     { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
2633    
2634     @@ -921,6 +945,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
2635     ICPU(0x56, idle_cpu_bdw),
2636     ICPU(0x4e, idle_cpu_skl),
2637     ICPU(0x5e, idle_cpu_skl),
2638     + ICPU(0x57, idle_cpu_knl),
2639     {}
2640     };
2641     MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
2642     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
2643     index 5ea0c14070d1..fa9c42ff1fb0 100644
2644     --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
2645     +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
2646     @@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
2647     skb_reset_mac_header(skb);
2648     skb_pull(skb, IPOIB_ENCAP_LEN);
2649    
2650     - skb->truesize = SKB_TRUESIZE(skb->len);
2651     -
2652     ++dev->stats.rx_packets;
2653     dev->stats.rx_bytes += skb->len;
2654    
2655     diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
2656     index 2b2f9d66c2c7..aff42d5e2296 100644
2657     --- a/drivers/input/joystick/xpad.c
2658     +++ b/drivers/input/joystick/xpad.c
2659     @@ -317,6 +317,19 @@ static struct usb_device_id xpad_table[] = {
2660    
2661     MODULE_DEVICE_TABLE(usb, xpad_table);
2662    
2663     +struct xpad_output_packet {
2664     + u8 data[XPAD_PKT_LEN];
2665     + u8 len;
2666     + bool pending;
2667     +};
2668     +
2669     +#define XPAD_OUT_CMD_IDX 0
2670     +#define XPAD_OUT_FF_IDX 1
2671     +#define XPAD_OUT_LED_IDX (1 + IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF))
2672     +#define XPAD_NUM_OUT_PACKETS (1 + \
2673     + IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF) + \
2674     + IS_ENABLED(CONFIG_JOYSTICK_XPAD_LEDS))
2675     +
2676     struct usb_xpad {
2677     struct input_dev *dev; /* input device interface */
2678     struct usb_device *udev; /* usb device */
2679     @@ -329,9 +342,13 @@ struct usb_xpad {
2680     dma_addr_t idata_dma;
2681    
2682     struct urb *irq_out; /* urb for interrupt out report */
2683     + bool irq_out_active; /* we must not use an active URB */
2684     unsigned char *odata; /* output data */
2685     dma_addr_t odata_dma;
2686     - struct mutex odata_mutex;
2687     + spinlock_t odata_lock;
2688     +
2689     + struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS];
2690     + int last_out_packet;
2691    
2692     #if defined(CONFIG_JOYSTICK_XPAD_LEDS)
2693     struct xpad_led *led;
2694     @@ -678,18 +695,71 @@ exit:
2695     __func__, retval);
2696     }
2697    
2698     +/* Callers must hold xpad->odata_lock spinlock */
2699     +static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
2700     +{
2701     + struct xpad_output_packet *pkt, *packet = NULL;
2702     + int i;
2703     +
2704     + for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) {
2705     + if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS)
2706     + xpad->last_out_packet = 0;
2707     +
2708     + pkt = &xpad->out_packets[xpad->last_out_packet];
2709     + if (pkt->pending) {
2710     + dev_dbg(&xpad->intf->dev,
2711     + "%s - found pending output packet %d\n",
2712     + __func__, xpad->last_out_packet);
2713     + packet = pkt;
2714     + break;
2715     + }
2716     + }
2717     +
2718     + if (packet) {
2719     + memcpy(xpad->odata, packet->data, packet->len);
2720     + xpad->irq_out->transfer_buffer_length = packet->len;
2721     + packet->pending = false;
2722     + return true;
2723     + }
2724     +
2725     + return false;
2726     +}
2727     +
2728     +/* Callers must hold xpad->odata_lock spinlock */
2729     +static int xpad_try_sending_next_out_packet(struct usb_xpad *xpad)
2730     +{
2731     + int error;
2732     +
2733     + if (!xpad->irq_out_active && xpad_prepare_next_out_packet(xpad)) {
2734     + error = usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
2735     + if (error) {
2736     + dev_err(&xpad->intf->dev,
2737     + "%s - usb_submit_urb failed with result %d\n",
2738     + __func__, error);
2739     + return -EIO;
2740     + }
2741     +
2742     + xpad->irq_out_active = true;
2743     + }
2744     +
2745     + return 0;
2746     +}
2747     +
2748     static void xpad_irq_out(struct urb *urb)
2749     {
2750     struct usb_xpad *xpad = urb->context;
2751     struct device *dev = &xpad->intf->dev;
2752     - int retval, status;
2753     + int status = urb->status;
2754     + int error;
2755     + unsigned long flags;
2756    
2757     - status = urb->status;
2758     + spin_lock_irqsave(&xpad->odata_lock, flags);
2759    
2760     switch (status) {
2761     case 0:
2762     /* success */
2763     - return;
2764     + xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
2765     + break;
2766    
2767     case -ECONNRESET:
2768     case -ENOENT:
2769     @@ -697,19 +767,26 @@ static void xpad_irq_out(struct urb *urb)
2770     /* this urb is terminated, clean up */
2771     dev_dbg(dev, "%s - urb shutting down with status: %d\n",
2772     __func__, status);
2773     - return;
2774     + xpad->irq_out_active = false;
2775     + break;
2776    
2777     default:
2778     dev_dbg(dev, "%s - nonzero urb status received: %d\n",
2779     __func__, status);
2780     - goto exit;
2781     + break;
2782     }
2783    
2784     -exit:
2785     - retval = usb_submit_urb(urb, GFP_ATOMIC);
2786     - if (retval)
2787     - dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
2788     - __func__, retval);
2789     + if (xpad->irq_out_active) {
2790     + error = usb_submit_urb(urb, GFP_ATOMIC);
2791     + if (error) {
2792     + dev_err(dev,
2793     + "%s - usb_submit_urb failed with result %d\n",
2794     + __func__, error);
2795     + xpad->irq_out_active = false;
2796     + }
2797     + }
2798     +
2799     + spin_unlock_irqrestore(&xpad->odata_lock, flags);
2800     }
2801    
2802     static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
2803     @@ -728,7 +805,7 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
2804     goto fail1;
2805     }
2806    
2807     - mutex_init(&xpad->odata_mutex);
2808     + spin_lock_init(&xpad->odata_lock);
2809    
2810     xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
2811     if (!xpad->irq_out) {
2812     @@ -770,27 +847,57 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
2813    
2814     static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
2815     {
2816     + struct xpad_output_packet *packet =
2817     + &xpad->out_packets[XPAD_OUT_CMD_IDX];
2818     + unsigned long flags;
2819     int retval;
2820    
2821     - mutex_lock(&xpad->odata_mutex);
2822     + spin_lock_irqsave(&xpad->odata_lock, flags);
2823     +
2824     + packet->data[0] = 0x08;
2825     + packet->data[1] = 0x00;
2826     + packet->data[2] = 0x0F;
2827     + packet->data[3] = 0xC0;
2828     + packet->data[4] = 0x00;
2829     + packet->data[5] = 0x00;
2830     + packet->data[6] = 0x00;
2831     + packet->data[7] = 0x00;
2832     + packet->data[8] = 0x00;
2833     + packet->data[9] = 0x00;
2834     + packet->data[10] = 0x00;
2835     + packet->data[11] = 0x00;
2836     + packet->len = 12;
2837     + packet->pending = true;
2838     +
2839     + /* Reset the sequence so we send out presence first */
2840     + xpad->last_out_packet = -1;
2841     + retval = xpad_try_sending_next_out_packet(xpad);
2842     +
2843     + spin_unlock_irqrestore(&xpad->odata_lock, flags);
2844    
2845     - xpad->odata[0] = 0x08;
2846     - xpad->odata[1] = 0x00;
2847     - xpad->odata[2] = 0x0F;
2848     - xpad->odata[3] = 0xC0;
2849     - xpad->odata[4] = 0x00;
2850     - xpad->odata[5] = 0x00;
2851     - xpad->odata[6] = 0x00;
2852     - xpad->odata[7] = 0x00;
2853     - xpad->odata[8] = 0x00;
2854     - xpad->odata[9] = 0x00;
2855     - xpad->odata[10] = 0x00;
2856     - xpad->odata[11] = 0x00;
2857     - xpad->irq_out->transfer_buffer_length = 12;
2858     + return retval;
2859     +}
2860    
2861     - retval = usb_submit_urb(xpad->irq_out, GFP_KERNEL);
2862     +static int xpad_start_xbox_one(struct usb_xpad *xpad)
2863     +{
2864     + struct xpad_output_packet *packet =
2865     + &xpad->out_packets[XPAD_OUT_CMD_IDX];
2866     + unsigned long flags;
2867     + int retval;
2868     +
2869     + spin_lock_irqsave(&xpad->odata_lock, flags);
2870     +
2871     + /* Xbox one controller needs to be initialized. */
2872     + packet->data[0] = 0x05;
2873     + packet->data[1] = 0x20;
2874     + packet->len = 2;
2875     + packet->pending = true;
2876    
2877     - mutex_unlock(&xpad->odata_mutex);
2878     + /* Reset the sequence so we send out start packet first */
2879     + xpad->last_out_packet = -1;
2880     + retval = xpad_try_sending_next_out_packet(xpad);
2881     +
2882     + spin_unlock_irqrestore(&xpad->odata_lock, flags);
2883    
2884     return retval;
2885     }
2886     @@ -799,8 +906,11 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
2887     static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
2888     {
2889     struct usb_xpad *xpad = input_get_drvdata(dev);
2890     + struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_FF_IDX];
2891     __u16 strong;
2892     __u16 weak;
2893     + int retval;
2894     + unsigned long flags;
2895    
2896     if (effect->type != FF_RUMBLE)
2897     return 0;
2898     @@ -808,69 +918,80 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
2899     strong = effect->u.rumble.strong_magnitude;
2900     weak = effect->u.rumble.weak_magnitude;
2901    
2902     + spin_lock_irqsave(&xpad->odata_lock, flags);
2903     +
2904     switch (xpad->xtype) {
2905     case XTYPE_XBOX:
2906     - xpad->odata[0] = 0x00;
2907     - xpad->odata[1] = 0x06;
2908     - xpad->odata[2] = 0x00;
2909     - xpad->odata[3] = strong / 256; /* left actuator */
2910     - xpad->odata[4] = 0x00;
2911     - xpad->odata[5] = weak / 256; /* right actuator */
2912     - xpad->irq_out->transfer_buffer_length = 6;
2913     + packet->data[0] = 0x00;
2914     + packet->data[1] = 0x06;
2915     + packet->data[2] = 0x00;
2916     + packet->data[3] = strong / 256; /* left actuator */
2917     + packet->data[4] = 0x00;
2918     + packet->data[5] = weak / 256; /* right actuator */
2919     + packet->len = 6;
2920     + packet->pending = true;
2921     break;
2922    
2923     case XTYPE_XBOX360:
2924     - xpad->odata[0] = 0x00;
2925     - xpad->odata[1] = 0x08;
2926     - xpad->odata[2] = 0x00;
2927     - xpad->odata[3] = strong / 256; /* left actuator? */
2928     - xpad->odata[4] = weak / 256; /* right actuator? */
2929     - xpad->odata[5] = 0x00;
2930     - xpad->odata[6] = 0x00;
2931     - xpad->odata[7] = 0x00;
2932     - xpad->irq_out->transfer_buffer_length = 8;
2933     + packet->data[0] = 0x00;
2934     + packet->data[1] = 0x08;
2935     + packet->data[2] = 0x00;
2936     + packet->data[3] = strong / 256; /* left actuator? */
2937     + packet->data[4] = weak / 256; /* right actuator? */
2938     + packet->data[5] = 0x00;
2939     + packet->data[6] = 0x00;
2940     + packet->data[7] = 0x00;
2941     + packet->len = 8;
2942     + packet->pending = true;
2943     break;
2944    
2945     case XTYPE_XBOX360W:
2946     - xpad->odata[0] = 0x00;
2947     - xpad->odata[1] = 0x01;
2948     - xpad->odata[2] = 0x0F;
2949     - xpad->odata[3] = 0xC0;
2950     - xpad->odata[4] = 0x00;
2951     - xpad->odata[5] = strong / 256;
2952     - xpad->odata[6] = weak / 256;
2953     - xpad->odata[7] = 0x00;
2954     - xpad->odata[8] = 0x00;
2955     - xpad->odata[9] = 0x00;
2956     - xpad->odata[10] = 0x00;
2957     - xpad->odata[11] = 0x00;
2958     - xpad->irq_out->transfer_buffer_length = 12;
2959     + packet->data[0] = 0x00;
2960     + packet->data[1] = 0x01;
2961     + packet->data[2] = 0x0F;
2962     + packet->data[3] = 0xC0;
2963     + packet->data[4] = 0x00;
2964     + packet->data[5] = strong / 256;
2965     + packet->data[6] = weak / 256;
2966     + packet->data[7] = 0x00;
2967     + packet->data[8] = 0x00;
2968     + packet->data[9] = 0x00;
2969     + packet->data[10] = 0x00;
2970     + packet->data[11] = 0x00;
2971     + packet->len = 12;
2972     + packet->pending = true;
2973     break;
2974    
2975     case XTYPE_XBOXONE:
2976     - xpad->odata[0] = 0x09; /* activate rumble */
2977     - xpad->odata[1] = 0x08;
2978     - xpad->odata[2] = 0x00;
2979     - xpad->odata[3] = 0x08; /* continuous effect */
2980     - xpad->odata[4] = 0x00; /* simple rumble mode */
2981     - xpad->odata[5] = 0x03; /* L and R actuator only */
2982     - xpad->odata[6] = 0x00; /* TODO: LT actuator */
2983     - xpad->odata[7] = 0x00; /* TODO: RT actuator */
2984     - xpad->odata[8] = strong / 256; /* left actuator */
2985     - xpad->odata[9] = weak / 256; /* right actuator */
2986     - xpad->odata[10] = 0x80; /* length of pulse */
2987     - xpad->odata[11] = 0x00; /* stop period of pulse */
2988     - xpad->irq_out->transfer_buffer_length = 12;
2989     + packet->data[0] = 0x09; /* activate rumble */
2990     + packet->data[1] = 0x08;
2991     + packet->data[2] = 0x00;
2992     + packet->data[3] = 0x08; /* continuous effect */
2993     + packet->data[4] = 0x00; /* simple rumble mode */
2994     + packet->data[5] = 0x03; /* L and R actuator only */
2995     + packet->data[6] = 0x00; /* TODO: LT actuator */
2996     + packet->data[7] = 0x00; /* TODO: RT actuator */
2997     + packet->data[8] = strong / 256; /* left actuator */
2998     + packet->data[9] = weak / 256; /* right actuator */
2999     + packet->data[10] = 0x80; /* length of pulse */
3000     + packet->data[11] = 0x00; /* stop period of pulse */
3001     + packet->len = 12;
3002     + packet->pending = true;
3003     break;
3004    
3005     default:
3006     dev_dbg(&xpad->dev->dev,
3007     "%s - rumble command sent to unsupported xpad type: %d\n",
3008     __func__, xpad->xtype);
3009     - return -EINVAL;
3010     + retval = -EINVAL;
3011     + goto out;
3012     }
3013    
3014     - return usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
3015     + retval = xpad_try_sending_next_out_packet(xpad);
3016     +
3017     +out:
3018     + spin_unlock_irqrestore(&xpad->odata_lock, flags);
3019     + return retval;
3020     }
3021    
3022     static int xpad_init_ff(struct usb_xpad *xpad)
3023     @@ -921,36 +1042,44 @@ struct xpad_led {
3024     */
3025     static void xpad_send_led_command(struct usb_xpad *xpad, int command)
3026     {
3027     + struct xpad_output_packet *packet =
3028     + &xpad->out_packets[XPAD_OUT_LED_IDX];
3029     + unsigned long flags;
3030     +
3031     command %= 16;
3032    
3033     - mutex_lock(&xpad->odata_mutex);
3034     + spin_lock_irqsave(&xpad->odata_lock, flags);
3035    
3036     switch (xpad->xtype) {
3037     case XTYPE_XBOX360:
3038     - xpad->odata[0] = 0x01;
3039     - xpad->odata[1] = 0x03;
3040     - xpad->odata[2] = command;
3041     - xpad->irq_out->transfer_buffer_length = 3;
3042     + packet->data[0] = 0x01;
3043     + packet->data[1] = 0x03;
3044     + packet->data[2] = command;
3045     + packet->len = 3;
3046     + packet->pending = true;
3047     break;
3048     +
3049     case XTYPE_XBOX360W:
3050     - xpad->odata[0] = 0x00;
3051     - xpad->odata[1] = 0x00;
3052     - xpad->odata[2] = 0x08;
3053     - xpad->odata[3] = 0x40 + command;
3054     - xpad->odata[4] = 0x00;
3055     - xpad->odata[5] = 0x00;
3056     - xpad->odata[6] = 0x00;
3057     - xpad->odata[7] = 0x00;
3058     - xpad->odata[8] = 0x00;
3059     - xpad->odata[9] = 0x00;
3060     - xpad->odata[10] = 0x00;
3061     - xpad->odata[11] = 0x00;
3062     - xpad->irq_out->transfer_buffer_length = 12;
3063     + packet->data[0] = 0x00;
3064     + packet->data[1] = 0x00;
3065     + packet->data[2] = 0x08;
3066     + packet->data[3] = 0x40 + command;
3067     + packet->data[4] = 0x00;
3068     + packet->data[5] = 0x00;
3069     + packet->data[6] = 0x00;
3070     + packet->data[7] = 0x00;
3071     + packet->data[8] = 0x00;
3072     + packet->data[9] = 0x00;
3073     + packet->data[10] = 0x00;
3074     + packet->data[11] = 0x00;
3075     + packet->len = 12;
3076     + packet->pending = true;
3077     break;
3078     }
3079    
3080     - usb_submit_urb(xpad->irq_out, GFP_KERNEL);
3081     - mutex_unlock(&xpad->odata_mutex);
3082     + xpad_try_sending_next_out_packet(xpad);
3083     +
3084     + spin_unlock_irqrestore(&xpad->odata_lock, flags);
3085     }
3086    
3087     /*
3088     @@ -1048,13 +1177,8 @@ static int xpad_open(struct input_dev *dev)
3089     if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
3090     return -EIO;
3091    
3092     - if (xpad->xtype == XTYPE_XBOXONE) {
3093     - /* Xbox one controller needs to be initialized. */
3094     - xpad->odata[0] = 0x05;
3095     - xpad->odata[1] = 0x20;
3096     - xpad->irq_out->transfer_buffer_length = 2;
3097     - return usb_submit_urb(xpad->irq_out, GFP_KERNEL);
3098     - }
3099     + if (xpad->xtype == XTYPE_XBOXONE)
3100     + return xpad_start_xbox_one(xpad);
3101    
3102     return 0;
3103     }
3104     diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
3105     index a159529f9d53..c5f1757ac61d 100644
3106     --- a/drivers/irqchip/irq-gic-v3-its.c
3107     +++ b/drivers/irqchip/irq-gic-v3-its.c
3108     @@ -41,6 +41,7 @@
3109    
3110     #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
3111     #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
3112     +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
3113    
3114     #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
3115    
3116     @@ -71,6 +72,7 @@ struct its_node {
3117     struct list_head its_device_list;
3118     u64 flags;
3119     u32 ite_size;
3120     + int numa_node;
3121     };
3122    
3123     #define ITS_ITT_ALIGN SZ_256
3124     @@ -600,11 +602,23 @@ static void its_unmask_irq(struct irq_data *d)
3125     static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
3126     bool force)
3127     {
3128     - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
3129     + unsigned int cpu;
3130     + const struct cpumask *cpu_mask = cpu_online_mask;
3131     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3132     struct its_collection *target_col;
3133     u32 id = its_get_event_id(d);
3134    
3135     + /* lpi cannot be routed to a redistributor that is on a foreign node */
3136     + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3137     + if (its_dev->its->numa_node >= 0) {
3138     + cpu_mask = cpumask_of_node(its_dev->its->numa_node);
3139     + if (!cpumask_intersects(mask_val, cpu_mask))
3140     + return -EINVAL;
3141     + }
3142     + }
3143     +
3144     + cpu = cpumask_any_and(mask_val, cpu_mask);
3145     +
3146     if (cpu >= nr_cpu_ids)
3147     return -EINVAL;
3148    
3149     @@ -1081,6 +1095,16 @@ static void its_cpu_init_collection(void)
3150     list_for_each_entry(its, &its_nodes, entry) {
3151     u64 target;
3152    
3153     + /* avoid cross node collections and its mapping */
3154     + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3155     + struct device_node *cpu_node;
3156     +
3157     + cpu_node = of_get_cpu_node(cpu, NULL);
3158     + if (its->numa_node != NUMA_NO_NODE &&
3159     + its->numa_node != of_node_to_nid(cpu_node))
3160     + continue;
3161     + }
3162     +
3163     /*
3164     * We now have to bind each collection to its target
3165     * redistributor.
3166     @@ -1308,9 +1332,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
3167     {
3168     struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3169     u32 event = its_get_event_id(d);
3170     + const struct cpumask *cpu_mask = cpu_online_mask;
3171     +
3172     + /* get the cpu_mask of local node */
3173     + if (its_dev->its->numa_node >= 0)
3174     + cpu_mask = cpumask_of_node(its_dev->its->numa_node);
3175    
3176     /* Bind the LPI to the first possible CPU */
3177     - its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
3178     + its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
3179    
3180     /* Map the GIC IRQ and event to the device */
3181     its_send_mapvi(its_dev, d->hwirq, event);
3182     @@ -1400,6 +1429,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
3183     its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
3184     }
3185    
3186     +static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
3187     +{
3188     + struct its_node *its = data;
3189     +
3190     + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
3191     +}
3192     +
3193     static const struct gic_quirk its_quirks[] = {
3194     #ifdef CONFIG_CAVIUM_ERRATUM_22375
3195     {
3196     @@ -1409,6 +1445,14 @@ static const struct gic_quirk its_quirks[] = {
3197     .init = its_enable_quirk_cavium_22375,
3198     },
3199     #endif
3200     +#ifdef CONFIG_CAVIUM_ERRATUM_23144
3201     + {
3202     + .desc = "ITS: Cavium erratum 23144",
3203     + .iidr = 0xa100034c, /* ThunderX pass 1.x */
3204     + .mask = 0xffff0fff,
3205     + .init = its_enable_quirk_cavium_23144,
3206     + },
3207     +#endif
3208     {
3209     }
3210     };
3211     @@ -1470,6 +1514,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
3212     its->base = its_base;
3213     its->phys_base = res.start;
3214     its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
3215     + its->numa_node = of_node_to_nid(node);
3216    
3217     its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
3218     if (!its->cmd_base) {
3219     diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
3220     index a54b339951a3..2a96ff6923f0 100644
3221     --- a/drivers/lightnvm/gennvm.c
3222     +++ b/drivers/lightnvm/gennvm.c
3223     @@ -89,6 +89,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
3224    
3225     list_move_tail(&blk->list, &lun->bb_list);
3226     lun->vlun.nr_bad_blocks++;
3227     + lun->vlun.nr_free_blocks--;
3228     }
3229    
3230     return 0;
3231     @@ -345,7 +346,7 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
3232     static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
3233     {
3234     if (!dev->ops->submit_io)
3235     - return 0;
3236     + return -ENODEV;
3237    
3238     /* Convert address space */
3239     gennvm_generic_to_addr_mode(dev, rqd);
3240     diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
3241     index 134e4faba482..a9859489acf6 100644
3242     --- a/drivers/lightnvm/rrpc.c
3243     +++ b/drivers/lightnvm/rrpc.c
3244     @@ -287,6 +287,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
3245     }
3246    
3247     page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
3248     + if (!page)
3249     + return -ENOMEM;
3250    
3251     while ((slot = find_first_zero_bit(rblk->invalid_pages,
3252     nr_pgs_per_blk)) < nr_pgs_per_blk) {
3253     @@ -427,7 +429,7 @@ static void rrpc_lun_gc(struct work_struct *work)
3254     if (nr_blocks_need < rrpc->nr_luns)
3255     nr_blocks_need = rrpc->nr_luns;
3256    
3257     - spin_lock(&lun->lock);
3258     + spin_lock(&rlun->lock);
3259     while (nr_blocks_need > lun->nr_free_blocks &&
3260     !list_empty(&rlun->prio_list)) {
3261     struct rrpc_block *rblock = block_prio_find_max(rlun);
3262     @@ -436,16 +438,16 @@ static void rrpc_lun_gc(struct work_struct *work)
3263     if (!rblock->nr_invalid_pages)
3264     break;
3265    
3266     + gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
3267     + if (!gcb)
3268     + break;
3269     +
3270     list_del_init(&rblock->prio);
3271    
3272     BUG_ON(!block_is_full(rrpc, rblock));
3273    
3274     pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
3275    
3276     - gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
3277     - if (!gcb)
3278     - break;
3279     -
3280     gcb->rrpc = rrpc;
3281     gcb->rblk = rblock;
3282     INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
3283     @@ -454,7 +456,7 @@ static void rrpc_lun_gc(struct work_struct *work)
3284    
3285     nr_blocks_need--;
3286     }
3287     - spin_unlock(&lun->lock);
3288     + spin_unlock(&rlun->lock);
3289    
3290     /* TODO: Hint that request queue can be started again */
3291     }
3292     @@ -650,11 +652,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
3293     if (bio_data_dir(rqd->bio) == WRITE)
3294     rrpc_end_io_write(rrpc, rrqd, laddr, npages);
3295    
3296     + bio_put(rqd->bio);
3297     +
3298     if (rrqd->flags & NVM_IOTYPE_GC)
3299     return 0;
3300    
3301     rrpc_unlock_rq(rrpc, rqd);
3302     - bio_put(rqd->bio);
3303    
3304     if (npages > 1)
3305     nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
3306     @@ -841,6 +844,13 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
3307     err = nvm_submit_io(rrpc->dev, rqd);
3308     if (err) {
3309     pr_err("rrpc: I/O submission failed: %d\n", err);
3310     + bio_put(bio);
3311     + if (!(flags & NVM_IOTYPE_GC)) {
3312     + rrpc_unlock_rq(rrpc, rqd);
3313     + if (rqd->nr_pages > 1)
3314     + nvm_dev_dma_free(rrpc->dev,
3315     + rqd->ppa_list, rqd->dma_ppa_list);
3316     + }
3317     return NVM_IO_ERR;
3318     }
3319    
3320     diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3321     index a296425a7270..3d5c0ba13181 100644
3322     --- a/drivers/md/bcache/super.c
3323     +++ b/drivers/md/bcache/super.c
3324     @@ -1818,7 +1818,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
3325     free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
3326    
3327     if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
3328     - !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
3329     + !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
3330     !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
3331     !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
3332     !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
3333     diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
3334     index 292c9479bb75..310e4b8beae8 100644
3335     --- a/drivers/media/dvb-frontends/Kconfig
3336     +++ b/drivers/media/dvb-frontends/Kconfig
3337     @@ -264,7 +264,7 @@ config DVB_MB86A16
3338     config DVB_TDA10071
3339     tristate "NXP TDA10071"
3340     depends on DVB_CORE && I2C
3341     - select REGMAP
3342     + select REGMAP_I2C
3343     default m if !MEDIA_SUBDRV_AUTOSELECT
3344     help
3345     Say Y when you want to support this frontend.
3346     diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
3347     index d11fd6ac2df0..5cefca95734e 100644
3348     --- a/drivers/media/usb/uvc/uvc_driver.c
3349     +++ b/drivers/media/usb/uvc/uvc_driver.c
3350     @@ -148,6 +148,26 @@ static struct uvc_format_desc uvc_fmts[] = {
3351     .guid = UVC_GUID_FORMAT_H264,
3352     .fcc = V4L2_PIX_FMT_H264,
3353     },
3354     + {
3355     + .name = "Greyscale 8 L/R (Y8I)",
3356     + .guid = UVC_GUID_FORMAT_Y8I,
3357     + .fcc = V4L2_PIX_FMT_Y8I,
3358     + },
3359     + {
3360     + .name = "Greyscale 12 L/R (Y12I)",
3361     + .guid = UVC_GUID_FORMAT_Y12I,
3362     + .fcc = V4L2_PIX_FMT_Y12I,
3363     + },
3364     + {
3365     + .name = "Depth data 16-bit (Z16)",
3366     + .guid = UVC_GUID_FORMAT_Z16,
3367     + .fcc = V4L2_PIX_FMT_Z16,
3368     + },
3369     + {
3370     + .name = "Bayer 10-bit (SRGGB10P)",
3371     + .guid = UVC_GUID_FORMAT_RW10,
3372     + .fcc = V4L2_PIX_FMT_SRGGB10P,
3373     + },
3374     };
3375    
3376     /* ------------------------------------------------------------------------
3377     diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
3378     index f0f2391e1b43..7e4d3eea371b 100644
3379     --- a/drivers/media/usb/uvc/uvcvideo.h
3380     +++ b/drivers/media/usb/uvc/uvcvideo.h
3381     @@ -119,6 +119,18 @@
3382     #define UVC_GUID_FORMAT_H264 \
3383     { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
3384     0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
3385     +#define UVC_GUID_FORMAT_Y8I \
3386     + { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
3387     + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
3388     +#define UVC_GUID_FORMAT_Y12I \
3389     + { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
3390     + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
3391     +#define UVC_GUID_FORMAT_Z16 \
3392     + { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
3393     + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
3394     +#define UVC_GUID_FORMAT_RW10 \
3395     + { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
3396     + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
3397    
3398     /* ------------------------------------------------------------------------
3399     * Driver specific constants.
3400     diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
3401     index 6982f603fadc..ab6f392d3504 100644
3402     --- a/drivers/misc/cxl/Makefile
3403     +++ b/drivers/misc/cxl/Makefile
3404     @@ -1,4 +1,4 @@
3405     -ccflags-y := -Werror -Wno-unused-const-variable
3406     +ccflags-y := -Werror $(call cc-disable-warning, unused-const-variable)
3407    
3408     cxl-y += main.o file.o irq.o fault.o native.o
3409     cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
3410     diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
3411     index 103baf0e0c5b..ea3eeb7011e1 100644
3412     --- a/drivers/misc/cxl/api.c
3413     +++ b/drivers/misc/cxl/api.c
3414     @@ -25,7 +25,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
3415    
3416     afu = cxl_pci_to_afu(dev);
3417    
3418     - get_device(&afu->dev);
3419     ctx = cxl_context_alloc();
3420     if (IS_ERR(ctx)) {
3421     rc = PTR_ERR(ctx);
3422     @@ -61,7 +60,6 @@ err_mapping:
3423     err_ctx:
3424     kfree(ctx);
3425     err_dev:
3426     - put_device(&afu->dev);
3427     return ERR_PTR(rc);
3428     }
3429     EXPORT_SYMBOL_GPL(cxl_dev_context_init);
3430     @@ -87,8 +85,6 @@ int cxl_release_context(struct cxl_context *ctx)
3431     if (ctx->status >= STARTED)
3432     return -EBUSY;
3433    
3434     - put_device(&ctx->afu->dev);
3435     -
3436     cxl_context_free(ctx);
3437    
3438     return 0;
3439     @@ -176,7 +172,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
3440    
3441     if (task) {
3442     ctx->pid = get_task_pid(task, PIDTYPE_PID);
3443     - get_pid(ctx->pid);
3444     + ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
3445     kernel = false;
3446     }
3447    
3448     diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
3449     index 2faa1270d085..262b88eac414 100644
3450     --- a/drivers/misc/cxl/context.c
3451     +++ b/drivers/misc/cxl/context.c
3452     @@ -42,7 +42,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
3453     spin_lock_init(&ctx->sste_lock);
3454     ctx->afu = afu;
3455     ctx->master = master;
3456     - ctx->pid = NULL; /* Set in start work ioctl */
3457     + ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
3458     mutex_init(&ctx->mapping_lock);
3459     ctx->mapping = mapping;
3460    
3461     @@ -97,6 +97,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
3462     ctx->pe = i;
3463     ctx->elem = &ctx->afu->spa[i];
3464     ctx->pe_inserted = false;
3465     +
3466     + /*
3467     + * take a ref on the afu so that it stays alive at-least till
3468     + * this context is reclaimed inside reclaim_ctx.
3469     + */
3470     + cxl_afu_get(afu);
3471     return 0;
3472     }
3473    
3474     @@ -211,7 +217,11 @@ int __detach_context(struct cxl_context *ctx)
3475     WARN_ON(cxl_detach_process(ctx) &&
3476     cxl_adapter_link_ok(ctx->afu->adapter));
3477     flush_work(&ctx->fault_work); /* Only needed for dedicated process */
3478     +
3479     + /* release the reference to the group leader and mm handling pid */
3480     put_pid(ctx->pid);
3481     + put_pid(ctx->glpid);
3482     +
3483     cxl_ctx_put();
3484     return 0;
3485     }
3486     @@ -278,6 +288,9 @@ static void reclaim_ctx(struct rcu_head *rcu)
3487     if (ctx->irq_bitmap)
3488     kfree(ctx->irq_bitmap);
3489    
3490     + /* Drop ref to the afu device taken during cxl_context_init */
3491     + cxl_afu_put(ctx->afu);
3492     +
3493     kfree(ctx);
3494     }
3495    
3496     diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
3497     index 0cfb9c129f27..a521bc72cec2 100644
3498     --- a/drivers/misc/cxl/cxl.h
3499     +++ b/drivers/misc/cxl/cxl.h
3500     @@ -403,6 +403,18 @@ struct cxl_afu {
3501     bool enabled;
3502     };
3503    
3504     +/* AFU refcount management */
3505     +static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
3506     +{
3507     +
3508     + return (get_device(&afu->dev) == NULL) ? NULL : afu;
3509     +}
3510     +
3511     +static inline void cxl_afu_put(struct cxl_afu *afu)
3512     +{
3513     + put_device(&afu->dev);
3514     +}
3515     +
3516    
3517     struct cxl_irq_name {
3518     struct list_head list;
3519     @@ -433,6 +445,9 @@ struct cxl_context {
3520     unsigned int sst_size, sst_lru;
3521    
3522     wait_queue_head_t wq;
3523     + /* pid of the group leader associated with the pid */
3524     + struct pid *glpid;
3525     + /* use mm context associated with this pid for ds faults */
3526     struct pid *pid;
3527     spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
3528     /* Only used in PR mode */
3529     diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
3530     index 25a5418c55cb..81c3f75b7330 100644
3531     --- a/drivers/misc/cxl/fault.c
3532     +++ b/drivers/misc/cxl/fault.c
3533     @@ -166,13 +166,92 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
3534     cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
3535     }
3536    
3537     +/*
3538     + * Returns the mm_struct corresponding to the context ctx via ctx->pid
3539     + * In case the task has exited we use the task group leader accessible
3540     + * via ctx->glpid to find the next task in the thread group that has a
3541     + * valid mm_struct associated with it. If a task with valid mm_struct
3542     + * is found the ctx->pid is updated to use the task struct for subsequent
3543     + * translations. In case no valid mm_struct is found in the task group to
3544     + * service the fault a NULL is returned.
3545     + */
3546     +static struct mm_struct *get_mem_context(struct cxl_context *ctx)
3547     +{
3548     + struct task_struct *task = NULL;
3549     + struct mm_struct *mm = NULL;
3550     + struct pid *old_pid = ctx->pid;
3551     +
3552     + if (old_pid == NULL) {
3553     + pr_warn("%s: Invalid context for pe=%d\n",
3554     + __func__, ctx->pe);
3555     + return NULL;
3556     + }
3557     +
3558     + task = get_pid_task(old_pid, PIDTYPE_PID);
3559     +
3560     + /*
3561     + * pid_alive may look racy but this saves us from costly
3562     + * get_task_mm when the task is a zombie. In worst case
3563     + * we may think a task is alive, which is about to die
3564     + * but get_task_mm will return NULL.
3565     + */
3566     + if (task != NULL && pid_alive(task))
3567     + mm = get_task_mm(task);
3568     +
3569     + /* release the task struct that was taken earlier */
3570     + if (task)
3571     + put_task_struct(task);
3572     + else
3573     + pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
3574     + __func__, pid_nr(old_pid), ctx->pe);
3575     +
3576     + /*
3577     + * If we couldn't find the mm context then use the group
3578     + * leader to iterate over the task group and find a task
3579     + * that gives us mm_struct.
3580     + */
3581     + if (unlikely(mm == NULL && ctx->glpid != NULL)) {
3582     +
3583     + rcu_read_lock();
3584     + task = pid_task(ctx->glpid, PIDTYPE_PID);
3585     + if (task)
3586     + do {
3587     + mm = get_task_mm(task);
3588     + if (mm) {
3589     + ctx->pid = get_task_pid(task,
3590     + PIDTYPE_PID);
3591     + break;
3592     + }
3593     + task = next_thread(task);
3594     + } while (task && !thread_group_leader(task));
3595     + rcu_read_unlock();
3596     +
3597     + /* check if we switched pid */
3598     + if (ctx->pid != old_pid) {
3599     + if (mm)
3600     + pr_devel("%s:pe=%i switch pid %i->%i\n",
3601     + __func__, ctx->pe, pid_nr(old_pid),
3602     + pid_nr(ctx->pid));
3603     + else
3604     + pr_devel("%s:Cannot find mm for pid=%i\n",
3605     + __func__, pid_nr(old_pid));
3606     +
3607     + /* drop the reference to older pid */
3608     + put_pid(old_pid);
3609     + }
3610     + }
3611     +
3612     + return mm;
3613     +}
3614     +
3615     +
3616     +
3617     void cxl_handle_fault(struct work_struct *fault_work)
3618     {
3619     struct cxl_context *ctx =
3620     container_of(fault_work, struct cxl_context, fault_work);
3621     u64 dsisr = ctx->dsisr;
3622     u64 dar = ctx->dar;
3623     - struct task_struct *task = NULL;
3624     struct mm_struct *mm = NULL;
3625    
3626     if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
3627     @@ -195,17 +274,17 @@ void cxl_handle_fault(struct work_struct *fault_work)
3628     "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
3629    
3630     if (!ctx->kernel) {
3631     - if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
3632     - pr_devel("cxl_handle_fault unable to get task %i\n",
3633     - pid_nr(ctx->pid));
3634     +
3635     + mm = get_mem_context(ctx);
3636     + /* indicates all the thread in task group have exited */
3637     + if (mm == NULL) {
3638     + pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
3639     + __func__, ctx->pe, pid_nr(ctx->pid));
3640     cxl_ack_ae(ctx);
3641     return;
3642     - }
3643     - if (!(mm = get_task_mm(task))) {
3644     - pr_devel("cxl_handle_fault unable to get mm %i\n",
3645     - pid_nr(ctx->pid));
3646     - cxl_ack_ae(ctx);
3647     - goto out;
3648     + } else {
3649     + pr_devel("Handling page fault for pe=%d pid=%i\n",
3650     + ctx->pe, pid_nr(ctx->pid));
3651     }
3652     }
3653    
3654     @@ -218,33 +297,22 @@ void cxl_handle_fault(struct work_struct *fault_work)
3655    
3656     if (mm)
3657     mmput(mm);
3658     -out:
3659     - if (task)
3660     - put_task_struct(task);
3661     }
3662    
3663     static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
3664     {
3665     - int rc;
3666     - struct task_struct *task;
3667     struct mm_struct *mm;
3668    
3669     - if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
3670     - pr_devel("cxl_prefault_one unable to get task %i\n",
3671     - pid_nr(ctx->pid));
3672     - return;
3673     - }
3674     - if (!(mm = get_task_mm(task))) {
3675     + mm = get_mem_context(ctx);
3676     + if (mm == NULL) {
3677     pr_devel("cxl_prefault_one unable to get mm %i\n",
3678     pid_nr(ctx->pid));
3679     - put_task_struct(task);
3680     return;
3681     }
3682    
3683     - rc = cxl_fault_segment(ctx, mm, ea);
3684     + cxl_fault_segment(ctx, mm, ea);
3685    
3686     mmput(mm);
3687     - put_task_struct(task);
3688     }
3689    
3690     static u64 next_segment(u64 ea, u64 vsid)
3691     @@ -263,18 +331,13 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
3692     struct copro_slb slb;
3693     struct vm_area_struct *vma;
3694     int rc;
3695     - struct task_struct *task;
3696     struct mm_struct *mm;
3697    
3698     - if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
3699     - pr_devel("cxl_prefault_vma unable to get task %i\n",
3700     - pid_nr(ctx->pid));
3701     - return;
3702     - }
3703     - if (!(mm = get_task_mm(task))) {
3704     + mm = get_mem_context(ctx);
3705     + if (mm == NULL) {
3706     pr_devel("cxl_prefault_vm unable to get mm %i\n",
3707     pid_nr(ctx->pid));
3708     - goto out1;
3709     + return;
3710     }
3711    
3712     down_read(&mm->mmap_sem);
3713     @@ -295,8 +358,6 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
3714     up_read(&mm->mmap_sem);
3715    
3716     mmput(mm);
3717     -out1:
3718     - put_task_struct(task);
3719     }
3720    
3721     void cxl_prefault(struct cxl_context *ctx, u64 wed)
3722     diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
3723     index 7ccd2998be92..783337d22f36 100644
3724     --- a/drivers/misc/cxl/file.c
3725     +++ b/drivers/misc/cxl/file.c
3726     @@ -67,7 +67,13 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
3727     spin_unlock(&adapter->afu_list_lock);
3728     goto err_put_adapter;
3729     }
3730     - get_device(&afu->dev);
3731     +
3732     + /*
3733     + * taking a ref to the afu so that it doesn't go away
3734     + * for rest of the function. This ref is released before
3735     + * we return.
3736     + */
3737     + cxl_afu_get(afu);
3738     spin_unlock(&adapter->afu_list_lock);
3739    
3740     if (!afu->current_mode)
3741     @@ -90,13 +96,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
3742     file->private_data = ctx;
3743     cxl_ctx_get();
3744    
3745     - /* Our ref on the AFU will now hold the adapter */
3746     - put_device(&adapter->dev);
3747     -
3748     - return 0;
3749     + /* indicate success */
3750     + rc = 0;
3751    
3752     err_put_afu:
3753     - put_device(&afu->dev);
3754     + /* release the ref taken earlier */
3755     + cxl_afu_put(afu);
3756     err_put_adapter:
3757     put_device(&adapter->dev);
3758     return rc;
3759     @@ -131,8 +136,6 @@ int afu_release(struct inode *inode, struct file *file)
3760     mutex_unlock(&ctx->mapping_lock);
3761     }
3762    
3763     - put_device(&ctx->afu->dev);
3764     -
3765     /*
3766     * At this this point all bottom halfs have finished and we should be
3767     * getting no more IRQs from the hardware for this context. Once it's
3768     @@ -198,8 +201,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
3769     * where a process (master, some daemon, etc) has opened the chardev on
3770     * behalf of another process, so the AFU's mm gets bound to the process
3771     * that performs this ioctl and not the process that opened the file.
3772     + * Also we grab the PID of the group leader so that if the task that
3773     + * has performed the attach operation exits the mm context of the
3774     + * process is still accessible.
3775     */
3776     - ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
3777     + ctx->pid = get_task_pid(current, PIDTYPE_PID);
3778     + ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
3779    
3780     trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
3781    
3782     diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
3783     index be2c8e248e2e..0c6c17a1c59e 100644
3784     --- a/drivers/misc/cxl/pci.c
3785     +++ b/drivers/misc/cxl/pci.c
3786     @@ -138,6 +138,7 @@ static const struct pci_device_id cxl_pci_tbl[] = {
3787     { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
3788     { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
3789     { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
3790     + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
3791     { PCI_DEVICE_CLASS(0x120000, ~0), },
3792    
3793     { }
3794     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
3795     index 1a802af827ed..552a34dc4f82 100644
3796     --- a/drivers/mmc/host/sdhci.c
3797     +++ b/drivers/mmc/host/sdhci.c
3798     @@ -492,7 +492,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
3799     host->align_buffer, host->align_buffer_sz, direction);
3800     if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
3801     goto fail;
3802     - BUG_ON(host->align_addr & host->align_mask);
3803     + BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
3804    
3805     host->sg_count = sdhci_pre_dma_transfer(host, data);
3806     if (host->sg_count < 0)
3807     @@ -514,8 +514,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
3808     * the (up to three) bytes that screw up the
3809     * alignment.
3810     */
3811     - offset = (host->align_sz - (addr & host->align_mask)) &
3812     - host->align_mask;
3813     + offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
3814     + SDHCI_ADMA2_MASK;
3815     if (offset) {
3816     if (data->flags & MMC_DATA_WRITE) {
3817     buffer = sdhci_kmap_atomic(sg, &flags);
3818     @@ -529,8 +529,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
3819    
3820     BUG_ON(offset > 65536);
3821    
3822     - align += host->align_sz;
3823     - align_addr += host->align_sz;
3824     + align += SDHCI_ADMA2_ALIGN;
3825     + align_addr += SDHCI_ADMA2_ALIGN;
3826    
3827     desc += host->desc_sz;
3828    
3829     @@ -611,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
3830     /* Do a quick scan of the SG list for any unaligned mappings */
3831     has_unaligned = false;
3832     for_each_sg(data->sg, sg, host->sg_count, i)
3833     - if (sg_dma_address(sg) & host->align_mask) {
3834     + if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
3835     has_unaligned = true;
3836     break;
3837     }
3838     @@ -623,15 +623,15 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
3839     align = host->align_buffer;
3840    
3841     for_each_sg(data->sg, sg, host->sg_count, i) {
3842     - if (sg_dma_address(sg) & host->align_mask) {
3843     - size = host->align_sz -
3844     - (sg_dma_address(sg) & host->align_mask);
3845     + if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
3846     + size = SDHCI_ADMA2_ALIGN -
3847     + (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
3848    
3849     buffer = sdhci_kmap_atomic(sg, &flags);
3850     memcpy(buffer, align, size);
3851     sdhci_kunmap_atomic(buffer, &flags);
3852    
3853     - align += host->align_sz;
3854     + align += SDHCI_ADMA2_ALIGN;
3855     }
3856     }
3857     }
3858     @@ -1315,7 +1315,9 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
3859     pwr = SDHCI_POWER_330;
3860     break;
3861     default:
3862     - BUG();
3863     + WARN(1, "%s: Invalid vdd %#x\n",
3864     + mmc_hostname(host->mmc), vdd);
3865     + break;
3866     }
3867     }
3868    
3869     @@ -2983,24 +2985,17 @@ int sdhci_add_host(struct sdhci_host *host)
3870     if (host->flags & SDHCI_USE_64_BIT_DMA) {
3871     host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3872     SDHCI_ADMA2_64_DESC_SZ;
3873     - host->align_buffer_sz = SDHCI_MAX_SEGS *
3874     - SDHCI_ADMA2_64_ALIGN;
3875     host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3876     - host->align_sz = SDHCI_ADMA2_64_ALIGN;
3877     - host->align_mask = SDHCI_ADMA2_64_ALIGN - 1;
3878     } else {
3879     host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3880     SDHCI_ADMA2_32_DESC_SZ;
3881     - host->align_buffer_sz = SDHCI_MAX_SEGS *
3882     - SDHCI_ADMA2_32_ALIGN;
3883     host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3884     - host->align_sz = SDHCI_ADMA2_32_ALIGN;
3885     - host->align_mask = SDHCI_ADMA2_32_ALIGN - 1;
3886     }
3887     host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
3888     host->adma_table_sz,
3889     &host->adma_addr,
3890     GFP_KERNEL);
3891     + host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3892     host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
3893     if (!host->adma_table || !host->align_buffer) {
3894     if (host->adma_table)
3895     @@ -3014,7 +3009,7 @@ int sdhci_add_host(struct sdhci_host *host)
3896     host->flags &= ~SDHCI_USE_ADMA;
3897     host->adma_table = NULL;
3898     host->align_buffer = NULL;
3899     - } else if (host->adma_addr & host->align_mask) {
3900     + } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3901     pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3902     mmc_hostname(mmc));
3903     host->flags &= ~SDHCI_USE_ADMA;
3904     diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
3905     index 9c331ac5ad6b..0115e9907bf8 100644
3906     --- a/drivers/mmc/host/sdhci.h
3907     +++ b/drivers/mmc/host/sdhci.h
3908     @@ -272,22 +272,27 @@
3909     /* ADMA2 32-bit DMA descriptor size */
3910     #define SDHCI_ADMA2_32_DESC_SZ 8
3911    
3912     -/* ADMA2 32-bit DMA alignment */
3913     -#define SDHCI_ADMA2_32_ALIGN 4
3914     -
3915     /* ADMA2 32-bit descriptor */
3916     struct sdhci_adma2_32_desc {
3917     __le16 cmd;
3918     __le16 len;
3919     __le32 addr;
3920     -} __packed __aligned(SDHCI_ADMA2_32_ALIGN);
3921     +} __packed __aligned(4);
3922     +
3923     +/* ADMA2 data alignment */
3924     +#define SDHCI_ADMA2_ALIGN 4
3925     +#define SDHCI_ADMA2_MASK (SDHCI_ADMA2_ALIGN - 1)
3926     +
3927     +/*
3928     + * ADMA2 descriptor alignment. Some controllers (e.g. Intel) require 8 byte
3929     + * alignment for the descriptor table even in 32-bit DMA mode. Memory
3930     + * allocation is at least 8 byte aligned anyway, so just stipulate 8 always.
3931     + */
3932     +#define SDHCI_ADMA2_DESC_ALIGN 8
3933    
3934     /* ADMA2 64-bit DMA descriptor size */
3935     #define SDHCI_ADMA2_64_DESC_SZ 12
3936    
3937     -/* ADMA2 64-bit DMA alignment */
3938     -#define SDHCI_ADMA2_64_ALIGN 8
3939     -
3940     /*
3941     * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
3942     * aligned.
3943     @@ -483,8 +488,6 @@ struct sdhci_host {
3944     dma_addr_t align_addr; /* Mapped bounce buffer */
3945    
3946     unsigned int desc_sz; /* ADMA descriptor size */
3947     - unsigned int align_sz; /* ADMA alignment */
3948     - unsigned int align_mask; /* ADMA alignment mask */
3949    
3950     struct tasklet_struct finish_tasklet; /* Tasklet structures */
3951    
3952     diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
3953     index b89504405b72..7445da218bd9 100644
3954     --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
3955     +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
3956     @@ -2526,7 +2526,7 @@ static void handle_timestamp(struct octeon_device *oct,
3957    
3958     octeon_swap_8B_data(&resp->timestamp, 1);
3959    
3960     - if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
3961     + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
3962     struct skb_shared_hwtstamps ts;
3963     u64 ns = resp->timestamp;
3964    
3965     diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
3966     index 39ca6744a4e6..22471d283a95 100644
3967     --- a/drivers/net/ethernet/cavium/thunder/nic.h
3968     +++ b/drivers/net/ethernet/cavium/thunder/nic.h
3969     @@ -116,6 +116,15 @@
3970     #define NIC_PF_INTR_ID_MBOX0 8
3971     #define NIC_PF_INTR_ID_MBOX1 9
3972    
3973     +/* Minimum FIFO level before all packets for the CQ are dropped
3974     + *
3975     + * This value ensures that once a packet has been "accepted"
3976     + * for reception it will not get dropped due to non-availability
3977     + * of CQ descriptor. An errata in HW mandates this value to be
3978     + * atleast 0x100.
3979     + */
3980     +#define NICPF_CQM_MIN_DROP_LEVEL 0x100
3981     +
3982     /* Global timer for CQ timer thresh interrupts
3983     * Calculated for SCLK of 700Mhz
3984     * value written should be a 1/16th of what is expected
3985     diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
3986     index 5f24d11cb16a..16baaafed26c 100644
3987     --- a/drivers/net/ethernet/cavium/thunder/nic_main.c
3988     +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
3989     @@ -309,6 +309,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
3990     static void nic_init_hw(struct nicpf *nic)
3991     {
3992     int i;
3993     + u64 cqm_cfg;
3994    
3995     /* Enable NIC HW block */
3996     nic_reg_write(nic, NIC_PF_CFG, 0x3);
3997     @@ -345,6 +346,11 @@ static void nic_init_hw(struct nicpf *nic)
3998     /* Enable VLAN ethertype matching and stripping */
3999     nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
4000     (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
4001     +
4002     + /* Check if HW expected value is higher (could be in future chips) */
4003     + cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
4004     + if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
4005     + nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
4006     }
4007    
4008     /* Channel parse index configuration */
4009     diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
4010     index dd536be20193..afb10e326b4f 100644
4011     --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
4012     +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
4013     @@ -21,7 +21,7 @@
4014     #define NIC_PF_TCP_TIMER (0x0060)
4015     #define NIC_PF_BP_CFG (0x0080)
4016     #define NIC_PF_RRM_CFG (0x0088)
4017     -#define NIC_PF_CQM_CF (0x00A0)
4018     +#define NIC_PF_CQM_CFG (0x00A0)
4019     #define NIC_PF_CNM_CF (0x00A8)
4020     #define NIC_PF_CNM_STATUS (0x00B0)
4021     #define NIC_PF_CQ_AVG_CFG (0x00C0)
4022     diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
4023     index dde8dc720cd3..b7093b9cd1e8 100644
4024     --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
4025     +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
4026     @@ -566,8 +566,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
4027    
4028     static void nicvf_rcv_pkt_handler(struct net_device *netdev,
4029     struct napi_struct *napi,
4030     - struct cmp_queue *cq,
4031     - struct cqe_rx_t *cqe_rx, int cqe_type)
4032     + struct cqe_rx_t *cqe_rx)
4033     {
4034     struct sk_buff *skb;
4035     struct nicvf *nic = netdev_priv(netdev);
4036     @@ -583,7 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
4037     }
4038    
4039     /* Check for errors */
4040     - err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
4041     + err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
4042     if (err && !cqe_rx->rb_cnt)
4043     return;
4044    
4045     @@ -674,8 +673,7 @@ loop:
4046     cq_idx, cq_desc->cqe_type);
4047     switch (cq_desc->cqe_type) {
4048     case CQE_TYPE_RX:
4049     - nicvf_rcv_pkt_handler(netdev, napi, cq,
4050     - cq_desc, CQE_TYPE_RX);
4051     + nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
4052     work_done++;
4053     break;
4054     case CQE_TYPE_SEND:
4055     @@ -1117,7 +1115,6 @@ int nicvf_stop(struct net_device *netdev)
4056    
4057     /* Clear multiqset info */
4058     nic->pnicvf = nic;
4059     - nic->sqs_count = 0;
4060    
4061     return 0;
4062     }
4063     @@ -1346,6 +1343,9 @@ void nicvf_update_stats(struct nicvf *nic)
4064     drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
4065     stats->tx_bcast_frames_ok +
4066     stats->tx_mcast_frames_ok;
4067     + drv_stats->rx_frames_ok = stats->rx_ucast_frames +
4068     + stats->rx_bcast_frames +
4069     + stats->rx_mcast_frames;
4070     drv_stats->rx_drops = stats->rx_drop_red +
4071     stats->rx_drop_overrun;
4072     drv_stats->tx_drops = stats->tx_drops;
4073     diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
4074     index d1c217eaf417..912ee28ab58b 100644
4075     --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
4076     +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
4077     @@ -1414,16 +1414,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
4078     }
4079    
4080     /* Check for errors in the receive cmp.queue entry */
4081     -int nicvf_check_cqe_rx_errs(struct nicvf *nic,
4082     - struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
4083     +int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
4084     {
4085     struct nicvf_hw_stats *stats = &nic->hw_stats;
4086     - struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
4087    
4088     - if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
4089     - drv_stats->rx_frames_ok++;
4090     + if (!cqe_rx->err_level && !cqe_rx->err_opcode)
4091     return 0;
4092     - }
4093    
4094     if (netif_msg_rx_err(nic))
4095     netdev_err(nic->netdev,
4096     diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
4097     index 033e8306e91c..5652c612e20b 100644
4098     --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
4099     +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
4100     @@ -344,8 +344,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
4101     /* Stats */
4102     void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
4103     void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
4104     -int nicvf_check_cqe_rx_errs(struct nicvf *nic,
4105     - struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
4106     +int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
4107     int nicvf_check_cqe_tx_errs(struct nicvf *nic,
4108     struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
4109     #endif /* NICVF_QUEUES_H */
4110     diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
4111     index 9df26c2263bc..42718cc7d4e8 100644
4112     --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
4113     +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
4114     @@ -549,7 +549,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
4115     }
4116    
4117     /* Clear rcvflt bit (latching high) and read it back */
4118     - bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
4119     + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
4120     + bgx_reg_modify(bgx, lmacid,
4121     + BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
4122     if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
4123     dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
4124     if (bgx->use_training) {
4125     @@ -568,13 +570,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
4126     return -1;
4127     }
4128    
4129     - /* Wait for MAC RX to be ready */
4130     - if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
4131     - SMU_RX_CTL_STATUS, true)) {
4132     - dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
4133     - return -1;
4134     - }
4135     -
4136     /* Wait for BGX RX to be idle */
4137     if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
4138     dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
4139     @@ -587,29 +582,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
4140     return -1;
4141     }
4142    
4143     - if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
4144     - dev_err(&bgx->pdev->dev, "Receive fault\n");
4145     - return -1;
4146     - }
4147     -
4148     - /* Receive link is latching low. Force it high and verify it */
4149     - bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
4150     - if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
4151     - SPU_STATUS1_RCV_LNK, false)) {
4152     - dev_err(&bgx->pdev->dev, "SPU receive link down\n");
4153     - return -1;
4154     - }
4155     -
4156     + /* Clear receive packet disable */
4157     cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
4158     cfg &= ~SPU_MISC_CTL_RX_DIS;
4159     bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
4160     - return 0;
4161     +
4162     + /* Check for MAC RX faults */
4163     + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
4164     + /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
4165     + cfg &= SMU_RX_CTL_STATUS;
4166     + if (!cfg)
4167     + return 0;
4168     +
4169     + /* Rx local/remote fault seen.
4170     + * Do lmac reinit to see if condition recovers
4171     + */
4172     + bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
4173     +
4174     + return -1;
4175     }
4176    
4177     static void bgx_poll_for_link(struct work_struct *work)
4178     {
4179     struct lmac *lmac;
4180     - u64 link;
4181     + u64 spu_link, smu_link;
4182    
4183     lmac = container_of(work, struct lmac, dwork.work);
4184    
4185     @@ -619,8 +615,11 @@ static void bgx_poll_for_link(struct work_struct *work)
4186     bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
4187     SPU_STATUS1_RCV_LNK, false);
4188    
4189     - link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
4190     - if (link & SPU_STATUS1_RCV_LNK) {
4191     + spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
4192     + smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
4193     +
4194     + if ((spu_link & SPU_STATUS1_RCV_LNK) &&
4195     + !(smu_link & SMU_RX_CTL_STATUS)) {
4196     lmac->link_up = 1;
4197     if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
4198     lmac->last_speed = 40000;
4199     @@ -634,9 +633,15 @@ static void bgx_poll_for_link(struct work_struct *work)
4200     }
4201    
4202     if (lmac->last_link != lmac->link_up) {
4203     + if (lmac->link_up) {
4204     + if (bgx_xaui_check_link(lmac)) {
4205     + /* Errors, clear link_up state */
4206     + lmac->link_up = 0;
4207     + lmac->last_speed = SPEED_UNKNOWN;
4208     + lmac->last_duplex = DUPLEX_UNKNOWN;
4209     + }
4210     + }
4211     lmac->last_link = lmac->link_up;
4212     - if (lmac->link_up)
4213     - bgx_xaui_check_link(lmac);
4214     }
4215    
4216     queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
4217     @@ -708,7 +713,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
4218     static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
4219     {
4220     struct lmac *lmac;
4221     - u64 cmrx_cfg;
4222     + u64 cfg;
4223    
4224     lmac = &bgx->lmac[lmacid];
4225     if (lmac->check_link) {
4226     @@ -717,9 +722,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
4227     destroy_workqueue(lmac->check_link);
4228     }
4229    
4230     - cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
4231     - cmrx_cfg &= ~(1 << 15);
4232     - bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
4233     + /* Disable packet reception */
4234     + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
4235     + cfg &= ~CMR_PKT_RX_EN;
4236     + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
4237     +
4238     + /* Give chance for Rx/Tx FIFO to get drained */
4239     + bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
4240     + bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
4241     +
4242     + /* Disable packet transmission */
4243     + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
4244     + cfg &= ~CMR_PKT_TX_EN;
4245     + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
4246     +
4247     + /* Disable serdes lanes */
4248     + if (!lmac->is_sgmii)
4249     + bgx_reg_modify(bgx, lmacid,
4250     + BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
4251     + else
4252     + bgx_reg_modify(bgx, lmacid,
4253     + BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
4254     +
4255     + /* Disable LMAC */
4256     + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
4257     + cfg &= ~CMR_EN;
4258     + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
4259     +
4260     bgx_flush_dmac_addrs(bgx, lmacid);
4261    
4262     if ((bgx->lmac_type != BGX_MODE_XFI) &&
4263     diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
4264     index 149e179363a1..42010d2e5ddf 100644
4265     --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
4266     +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
4267     @@ -41,6 +41,7 @@
4268     #define BGX_CMRX_RX_STAT10 0xC0
4269     #define BGX_CMRX_RX_BP_DROP 0xC8
4270     #define BGX_CMRX_RX_DMAC_CTL 0x0E8
4271     +#define BGX_CMRX_RX_FIFO_LEN 0x108
4272     #define BGX_CMR_RX_DMACX_CAM 0x200
4273     #define RX_DMACX_CAM_EN BIT_ULL(48)
4274     #define RX_DMACX_CAM_LMACID(x) (x << 49)
4275     @@ -50,6 +51,7 @@
4276     #define BGX_CMR_CHAN_MSK_AND 0x450
4277     #define BGX_CMR_BIST_STATUS 0x460
4278     #define BGX_CMR_RX_LMACS 0x468
4279     +#define BGX_CMRX_TX_FIFO_LEN 0x518
4280     #define BGX_CMRX_TX_STAT0 0x600
4281     #define BGX_CMRX_TX_STAT1 0x608
4282     #define BGX_CMRX_TX_STAT2 0x610
4283     diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
4284     index 69707108d23c..98fe5a2cd6e3 100644
4285     --- a/drivers/net/ethernet/intel/e1000/e1000.h
4286     +++ b/drivers/net/ethernet/intel/e1000/e1000.h
4287     @@ -213,8 +213,11 @@ struct e1000_rx_ring {
4288     };
4289    
4290     #define E1000_DESC_UNUSED(R) \
4291     - ((((R)->next_to_clean > (R)->next_to_use) \
4292     - ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
4293     +({ \
4294     + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \
4295     + unsigned int use = READ_ONCE((R)->next_to_use); \
4296     + (clean > use ? 0 : (R)->count) + clean - use - 1; \
4297     +})
4298    
4299     #define E1000_RX_DESC_EXT(R, i) \
4300     (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
4301     diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
4302     index fd7be860c201..068023595d84 100644
4303     --- a/drivers/net/ethernet/intel/e1000/e1000_main.c
4304     +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
4305     @@ -3876,7 +3876,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
4306     eop_desc = E1000_TX_DESC(*tx_ring, eop);
4307     }
4308    
4309     - tx_ring->next_to_clean = i;
4310     + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
4311     + * which will reuse the cleaned buffers.
4312     + */
4313     + smp_store_release(&tx_ring->next_to_clean, i);
4314    
4315     netdev_completed_queue(netdev, pkts_compl, bytes_compl);
4316    
4317     diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
4318     index 0a854a47d31a..80ec587d510e 100644
4319     --- a/drivers/net/ethernet/intel/e1000e/netdev.c
4320     +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
4321     @@ -1959,8 +1959,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
4322     * previous interrupt.
4323     */
4324     if (rx_ring->set_itr) {
4325     - writel(1000000000 / (rx_ring->itr_val * 256),
4326     - rx_ring->itr_register);
4327     + u32 itr = rx_ring->itr_val ?
4328     + 1000000000 / (rx_ring->itr_val * 256) : 0;
4329     +
4330     + writel(itr, rx_ring->itr_register);
4331     rx_ring->set_itr = 0;
4332     }
4333    
4334     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
4335     index 14440200499b..48809e5d3f79 100644
4336     --- a/drivers/net/ethernet/intel/fm10k/fm10k.h
4337     +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
4338     @@ -33,7 +33,7 @@
4339     #include "fm10k_pf.h"
4340     #include "fm10k_vf.h"
4341    
4342     -#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */
4343     +#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */
4344    
4345     #define MAX_QUEUES FM10K_MAX_QUEUES_PF
4346    
4347     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4348     index e76a44cf330c..09281558bfbc 100644
4349     --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4350     +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
4351     @@ -1428,6 +1428,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
4352     fm10k_for_each_ring(ring, q_vector->tx)
4353     clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
4354    
4355     + /* Handle case where we are called by netpoll with a budget of 0 */
4356     + if (budget <= 0)
4357     + return budget;
4358     +
4359     /* attempt to distribute budget to each queue fairly, but don't
4360     * allow the budget to go below 1 because we'll exit polling
4361     */
4362     @@ -1966,8 +1970,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
4363    
4364     /* Allocate memory for queues */
4365     err = fm10k_alloc_q_vectors(interface);
4366     - if (err)
4367     + if (err) {
4368     + fm10k_reset_msix_capability(interface);
4369     return err;
4370     + }
4371    
4372     /* Map rings to devices, and map devices to physical queues */
4373     fm10k_assign_rings(interface);
4374     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
4375     index 74be792f3f1b..7f3fb51bc37b 100644
4376     --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
4377     +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
4378     @@ -159,13 +159,30 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
4379    
4380     fm10k_mbx_free_irq(interface);
4381    
4382     + /* free interrupts */
4383     + fm10k_clear_queueing_scheme(interface);
4384     +
4385     /* delay any future reset requests */
4386     interface->last_reset = jiffies + (10 * HZ);
4387    
4388     /* reset and initialize the hardware so it is in a known state */
4389     - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
4390     - if (err)
4391     + err = hw->mac.ops.reset_hw(hw);
4392     + if (err) {
4393     + dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err);
4394     + goto reinit_err;
4395     + }
4396     +
4397     + err = hw->mac.ops.init_hw(hw);
4398     + if (err) {
4399     dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err);
4400     + goto reinit_err;
4401     + }
4402     +
4403     + err = fm10k_init_queueing_scheme(interface);
4404     + if (err) {
4405     + dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
4406     + goto reinit_err;
4407     + }
4408    
4409     /* reassociate interrupts */
4410     fm10k_mbx_request_irq(interface);
4411     @@ -193,6 +210,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
4412    
4413     fm10k_iov_resume(interface->pdev);
4414    
4415     +reinit_err:
4416     + if (err)
4417     + netif_device_detach(netdev);
4418     +
4419     rtnl_unlock();
4420    
4421     clear_bit(__FM10K_RESETTING, &interface->state);
4422     @@ -1101,6 +1122,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
4423     struct fm10k_hw *hw = &interface->hw;
4424     int itr_reg;
4425    
4426     + /* no mailbox IRQ to free if MSI-X is not enabled */
4427     + if (!interface->msix_entries)
4428     + return;
4429     +
4430     /* disconnect the mailbox */
4431     hw->mbx.ops.disconnect(hw, &hw->mbx);
4432    
4433     @@ -1423,10 +1448,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
4434     err = fm10k_mbx_request_irq_pf(interface);
4435     else
4436     err = fm10k_mbx_request_irq_vf(interface);
4437     + if (err)
4438     + return err;
4439    
4440     /* connect mailbox */
4441     - if (!err)
4442     - err = hw->mbx.ops.connect(hw, &hw->mbx);
4443     + err = hw->mbx.ops.connect(hw, &hw->mbx);
4444     +
4445     + /* if the mailbox failed to connect, then free IRQ */
4446     + if (err)
4447     + fm10k_mbx_free_irq(interface);
4448    
4449     return err;
4450     }
4451     @@ -1684,7 +1714,13 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
4452     interface->last_reset = jiffies + (10 * HZ);
4453    
4454     /* reset and initialize the hardware so it is in a known state */
4455     - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw);
4456     + err = hw->mac.ops.reset_hw(hw);
4457     + if (err) {
4458     + dev_err(&pdev->dev, "reset_hw failed: %d\n", err);
4459     + return err;
4460     + }
4461     +
4462     + err = hw->mac.ops.init_hw(hw);
4463     if (err) {
4464     dev_err(&pdev->dev, "init_hw failed: %d\n", err);
4465     return err;
4466     @@ -2071,8 +2107,10 @@ static int fm10k_resume(struct pci_dev *pdev)
4467    
4468     /* reset hardware to known state */
4469     err = hw->mac.ops.init_hw(&interface->hw);
4470     - if (err)
4471     + if (err) {
4472     + dev_err(&pdev->dev, "init_hw failed: %d\n", err);
4473     return err;
4474     + }
4475    
4476     /* reset statistics starting values */
4477     hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
4478     @@ -2185,6 +2223,9 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
4479     if (netif_running(netdev))
4480     fm10k_close(netdev);
4481    
4482     + /* free interrupts */
4483     + fm10k_clear_queueing_scheme(interface);
4484     +
4485     fm10k_mbx_free_irq(interface);
4486    
4487     pci_disable_device(pdev);
4488     @@ -2248,11 +2289,21 @@ static void fm10k_io_resume(struct pci_dev *pdev)
4489     int err = 0;
4490    
4491     /* reset hardware to known state */
4492     - hw->mac.ops.init_hw(&interface->hw);
4493     + err = hw->mac.ops.init_hw(&interface->hw);
4494     + if (err) {
4495     + dev_err(&pdev->dev, "init_hw failed: %d\n", err);
4496     + return;
4497     + }
4498    
4499     /* reset statistics starting values */
4500     hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
4501    
4502     + err = fm10k_init_queueing_scheme(interface);
4503     + if (err) {
4504     + dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
4505     + return;
4506     + }
4507     +
4508     /* reassociate interrupts */
4509     fm10k_mbx_request_irq(interface);
4510    
4511     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
4512     index 318a212f0a78..35afd711d144 100644
4513     --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
4514     +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
4515     @@ -77,6 +77,7 @@ struct fm10k_hw;
4516     #define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10
4517    
4518     #define FM10K_ERR_PARAM -2
4519     +#define FM10K_ERR_NO_RESOURCES -3
4520     #define FM10K_ERR_REQUESTS_PENDING -4
4521     #define FM10K_ERR_RESET_REQUESTED -5
4522     #define FM10K_ERR_DMA_PENDING -6
4523     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
4524     index 36c8b0aa08fd..d512575c33f3 100644
4525     --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
4526     +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
4527     @@ -103,7 +103,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
4528     s32 err;
4529     u16 i;
4530    
4531     - /* assume we always have at least 1 queue */
4532     + /* verify we have at least 1 queue */
4533     + if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) ||
4534     + !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) {
4535     + err = FM10K_ERR_NO_RESOURCES;
4536     + goto reset_max_queues;
4537     + }
4538     +
4539     + /* determine how many queues we have */
4540     for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) {
4541     /* verify the Descriptor cache offsets are increasing */
4542     tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i));
4543     @@ -119,7 +126,7 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
4544     /* shut down queues we own and reset DMA configuration */
4545     err = fm10k_disable_queues_generic(hw, i);
4546     if (err)
4547     - return err;
4548     + goto reset_max_queues;
4549    
4550     /* record maximum queue count */
4551     hw->mac.max_queues = i;
4552     @@ -129,6 +136,11 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw)
4553     FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT;
4554    
4555     return 0;
4556     +
4557     +reset_max_queues:
4558     + hw->mac.max_queues = 0;
4559     +
4560     + return err;
4561     }
4562    
4563     /* This structure defines the attibutes to be parsed below */
4564     diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
4565     index 4dd3e26129b4..7e258a83ccab 100644
4566     --- a/drivers/net/ethernet/intel/i40e/i40e.h
4567     +++ b/drivers/net/ethernet/intel/i40e/i40e.h
4568     @@ -767,6 +767,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
4569     int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
4570     struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
4571     bool is_vf, bool is_netdev);
4572     +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
4573     + bool is_vf, bool is_netdev);
4574     bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
4575     struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
4576     bool is_vf, bool is_netdev);
4577     diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
4578     index 3f385ffe420f..488a50d59dca 100644
4579     --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
4580     +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
4581     @@ -2164,8 +2164,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
4582     case TCP_V4_FLOW:
4583     switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
4584     case 0:
4585     - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
4586     - break;
4587     + return -EINVAL;
4588     case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
4589     hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
4590     break;
4591     @@ -2176,8 +2175,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
4592     case TCP_V6_FLOW:
4593     switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
4594     case 0:
4595     - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
4596     - break;
4597     + return -EINVAL;
4598     case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
4599     hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
4600     break;
4601     @@ -2188,9 +2186,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
4602     case UDP_V4_FLOW:
4603     switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
4604     case 0:
4605     - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
4606     - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
4607     - break;
4608     + return -EINVAL;
4609     case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
4610     hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
4611     BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
4612     @@ -2202,9 +2198,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
4613     case UDP_V6_FLOW:
4614     switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
4615     case 0:
4616     - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
4617     - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
4618     - break;
4619     + return -EINVAL;
4620     case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
4621     hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
4622     BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
4623     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
4624     index 4a9873ec28c7..2215bebe208e 100644
4625     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
4626     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
4627     @@ -1317,6 +1317,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
4628     }
4629    
4630     /**
4631     + * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
4632     + * @vsi: the VSI to be searched
4633     + * @macaddr: the mac address to be removed
4634     + * @is_vf: true if it is a VF
4635     + * @is_netdev: true if it is a netdev
4636     + *
4637     + * Removes a given MAC address from a VSI, regardless of VLAN
4638     + *
4639     + * Returns 0 for success, or error
4640     + **/
4641     +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
4642     + bool is_vf, bool is_netdev)
4643     +{
4644     + struct i40e_mac_filter *f = NULL;
4645     + int changed = 0;
4646     +
4647     + WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
4648     + "Missing mac_filter_list_lock\n");
4649     + list_for_each_entry(f, &vsi->mac_filter_list, list) {
4650     + if ((ether_addr_equal(macaddr, f->macaddr)) &&
4651     + (is_vf == f->is_vf) &&
4652     + (is_netdev == f->is_netdev)) {
4653     + f->counter--;
4654     + f->changed = true;
4655     + changed = 1;
4656     + }
4657     + }
4658     + if (changed) {
4659     + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4660     + vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
4661     + return 0;
4662     + }
4663     + return -ENOENT;
4664     +}
4665     +
4666     +/**
4667     * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
4668     * @vsi: the PF Main VSI - inappropriate for any other VSI
4669     * @macaddr: the MAC address
4670     @@ -1547,9 +1583,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
4671     spin_unlock_bh(&vsi->mac_filter_list_lock);
4672     }
4673    
4674     - i40e_sync_vsi_filters(vsi, false);
4675     ether_addr_copy(netdev->dev_addr, addr->sa_data);
4676     -
4677     + /* schedule our worker thread which will take care of
4678     + * applying the new filter changes
4679     + */
4680     + i40e_service_event_schedule(vsi->back);
4681     return 0;
4682     }
4683    
4684     @@ -1935,11 +1973,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
4685    
4686     /* Now process 'del_list' outside the lock */
4687     if (!list_empty(&tmp_del_list)) {
4688     + int del_list_size;
4689     +
4690     filter_list_len = pf->hw.aq.asq_buf_size /
4691     sizeof(struct i40e_aqc_remove_macvlan_element_data);
4692     - del_list = kcalloc(filter_list_len,
4693     - sizeof(struct i40e_aqc_remove_macvlan_element_data),
4694     - GFP_KERNEL);
4695     + del_list_size = filter_list_len *
4696     + sizeof(struct i40e_aqc_remove_macvlan_element_data);
4697     + del_list = kzalloc(del_list_size, GFP_KERNEL);
4698     if (!del_list) {
4699     i40e_cleanup_add_list(&tmp_add_list);
4700    
4701     @@ -1971,7 +2011,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
4702     NULL);
4703     aq_err = pf->hw.aq.asq_last_status;
4704     num_del = 0;
4705     - memset(del_list, 0, sizeof(*del_list));
4706     + memset(del_list, 0, del_list_size);
4707    
4708     if (ret && aq_err != I40E_AQ_RC_ENOENT)
4709     dev_err(&pf->pdev->dev,
4710     @@ -2004,13 +2044,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
4711     }
4712    
4713     if (!list_empty(&tmp_add_list)) {
4714     + int add_list_size;
4715    
4716     /* do all the adds now */
4717     filter_list_len = pf->hw.aq.asq_buf_size /
4718     sizeof(struct i40e_aqc_add_macvlan_element_data),
4719     - add_list = kcalloc(filter_list_len,
4720     - sizeof(struct i40e_aqc_add_macvlan_element_data),
4721     - GFP_KERNEL);
4722     + add_list_size = filter_list_len *
4723     + sizeof(struct i40e_aqc_add_macvlan_element_data);
4724     + add_list = kzalloc(add_list_size, GFP_KERNEL);
4725     if (!add_list) {
4726     /* Purge element from temporary lists */
4727     i40e_cleanup_add_list(&tmp_add_list);
4728     @@ -2048,7 +2089,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
4729    
4730     if (ret)
4731     break;
4732     - memset(add_list, 0, sizeof(*add_list));
4733     + memset(add_list, 0, add_list_size);
4734     }
4735     /* Entries from tmp_add_list were cloned from MAC
4736     * filter list, hence clean those cloned entries
4737     @@ -2112,12 +2153,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
4738     */
4739     if (pf->cur_promisc != cur_promisc) {
4740     pf->cur_promisc = cur_promisc;
4741     - if (grab_rtnl)
4742     - i40e_do_reset_safe(pf,
4743     - BIT(__I40E_PF_RESET_REQUESTED));
4744     - else
4745     - i40e_do_reset(pf,
4746     - BIT(__I40E_PF_RESET_REQUESTED));
4747     + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4748     }
4749     } else {
4750     ret = i40e_aq_set_vsi_unicast_promiscuous(
4751     @@ -2377,16 +2413,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
4752     }
4753     }
4754    
4755     - /* Make sure to release before sync_vsi_filter because that
4756     - * function will lock/unlock as necessary
4757     - */
4758     spin_unlock_bh(&vsi->mac_filter_list_lock);
4759    
4760     - if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4761     - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4762     - return 0;
4763     -
4764     - return i40e_sync_vsi_filters(vsi, false);
4765     + /* schedule our worker thread which will take care of
4766     + * applying the new filter changes
4767     + */
4768     + i40e_service_event_schedule(vsi->back);
4769     + return 0;
4770     }
4771    
4772     /**
4773     @@ -2459,16 +2492,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
4774     }
4775     }
4776    
4777     - /* Make sure to release before sync_vsi_filter because that
4778     - * function with lock/unlock as necessary
4779     - */
4780     spin_unlock_bh(&vsi->mac_filter_list_lock);
4781    
4782     - if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4783     - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4784     - return 0;
4785     -
4786     - return i40e_sync_vsi_filters(vsi, false);
4787     + /* schedule our worker thread which will take care of
4788     + * applying the new filter changes
4789     + */
4790     + i40e_service_event_schedule(vsi->back);
4791     + return 0;
4792     }
4793    
4794     /**
4795     @@ -2711,6 +2741,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
4796     netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
4797     free_cpumask_var(mask);
4798     }
4799     +
4800     + /* schedule our worker thread which will take care of
4801     + * applying the new filter changes
4802     + */
4803     + i40e_service_event_schedule(vsi->back);
4804     }
4805    
4806     /**
4807     @@ -6685,6 +6720,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
4808     struct i40e_hw *hw = &pf->hw;
4809     u8 set_fc_aq_fail = 0;
4810     i40e_status ret;
4811     + u32 val;
4812     u32 v;
4813    
4814     /* Now we wait for GRST to settle out.
4815     @@ -6823,6 +6859,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
4816     }
4817     }
4818    
4819     + /* Reconfigure hardware for allowing smaller MSS in the case
4820     + * of TSO, so that we avoid the MDD being fired and causing
4821     + * a reset in the case of small MSS+TSO.
4822     + */
4823     +#define I40E_REG_MSS 0x000E64DC
4824     +#define I40E_REG_MSS_MIN_MASK 0x3FF0000
4825     +#define I40E_64BYTE_MSS 0x400000
4826     + val = rd32(hw, I40E_REG_MSS);
4827     + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
4828     + val &= ~I40E_REG_MSS_MIN_MASK;
4829     + val |= I40E_64BYTE_MSS;
4830     + wr32(hw, I40E_REG_MSS, val);
4831     + }
4832     +
4833     if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4834     (pf->hw.aq.fw_maj_ver < 4)) {
4835     msleep(75);
4836     @@ -10183,6 +10233,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4837     u16 link_status;
4838     int err;
4839     u32 len;
4840     + u32 val;
4841     u32 i;
4842     u8 set_fc_aq_fail;
4843    
4844     @@ -10493,6 +10544,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4845     i40e_stat_str(&pf->hw, err),
4846     i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4847    
4848     + /* Reconfigure hardware for allowing smaller MSS in the case
4849     + * of TSO, so that we avoid the MDD being fired and causing
4850     + * a reset in the case of small MSS+TSO.
4851     + */
4852     + val = rd32(hw, I40E_REG_MSS);
4853     + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
4854     + val &= ~I40E_REG_MSS_MIN_MASK;
4855     + val |= I40E_64BYTE_MSS;
4856     + wr32(hw, I40E_REG_MSS, val);
4857     + }
4858     +
4859     if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4860     (pf->hw.aq.fw_maj_ver < 4)) {
4861     msleep(75);
4862     diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4863     index 635b3ac17877..26c55bba4bf3 100644
4864     --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4865     +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
4866     @@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
4867     "Filter deleted for PCTYPE %d loc = %d\n",
4868     fd_data->pctype, fd_data->fd_id);
4869     }
4870     + if (err)
4871     + kfree(raw_packet);
4872     +
4873     return err ? -EOPNOTSUPP : 0;
4874     }
4875    
4876     @@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
4877     fd_data->pctype, fd_data->fd_id);
4878     }
4879    
4880     + if (err)
4881     + kfree(raw_packet);
4882     +
4883     return err ? -EOPNOTSUPP : 0;
4884     }
4885    
4886     @@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
4887     }
4888     }
4889    
4890     + if (err)
4891     + kfree(raw_packet);
4892     +
4893     return err ? -EOPNOTSUPP : 0;
4894     }
4895    
4896     @@ -526,11 +535,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
4897     struct i40e_tx_buffer *tx_buffer)
4898     {
4899     if (tx_buffer->skb) {
4900     - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
4901     - kfree(tx_buffer->raw_buf);
4902     - else
4903     - dev_kfree_skb_any(tx_buffer->skb);
4904     -
4905     + dev_kfree_skb_any(tx_buffer->skb);
4906     if (dma_unmap_len(tx_buffer, len))
4907     dma_unmap_single(ring->dev,
4908     dma_unmap_addr(tx_buffer, dma),
4909     @@ -542,6 +547,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
4910     dma_unmap_len(tx_buffer, len),
4911     DMA_TO_DEVICE);
4912     }
4913     +
4914     + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
4915     + kfree(tx_buffer->raw_buf);
4916     +
4917     tx_buffer->next_to_watch = NULL;
4918     tx_buffer->skb = NULL;
4919     dma_unmap_len_set(tx_buffer, len, 0);
4920     @@ -1416,31 +1425,12 @@ checksum_fail:
4921     }
4922    
4923     /**
4924     - * i40e_rx_hash - returns the hash value from the Rx descriptor
4925     - * @ring: descriptor ring
4926     - * @rx_desc: specific descriptor
4927     - **/
4928     -static inline u32 i40e_rx_hash(struct i40e_ring *ring,
4929     - union i40e_rx_desc *rx_desc)
4930     -{
4931     - const __le64 rss_mask =
4932     - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
4933     - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
4934     -
4935     - if ((ring->netdev->features & NETIF_F_RXHASH) &&
4936     - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
4937     - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
4938     - else
4939     - return 0;
4940     -}
4941     -
4942     -/**
4943     - * i40e_ptype_to_hash - get a hash type
4944     + * i40e_ptype_to_htype - get a hash type
4945     * @ptype: the ptype value from the descriptor
4946     *
4947     * Returns a hash type to be used by skb_set_hash
4948     **/
4949     -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
4950     +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
4951     {
4952     struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
4953    
4954     @@ -1458,6 +1448,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
4955     }
4956    
4957     /**
4958     + * i40e_rx_hash - set the hash value in the skb
4959     + * @ring: descriptor ring
4960     + * @rx_desc: specific descriptor
4961     + **/
4962     +static inline void i40e_rx_hash(struct i40e_ring *ring,
4963     + union i40e_rx_desc *rx_desc,
4964     + struct sk_buff *skb,
4965     + u8 rx_ptype)
4966     +{
4967     + u32 hash;
4968     + const __le64 rss_mask =
4969     + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
4970     + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
4971     +
4972     + if (ring->netdev->features & NETIF_F_RXHASH)
4973     + return;
4974     +
4975     + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
4976     + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
4977     + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
4978     + }
4979     +}
4980     +
4981     +/**
4982     * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
4983     * @rx_ring: rx ring to clean
4984     * @budget: how many cleans we're allowed
4985     @@ -1606,8 +1620,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
4986     continue;
4987     }
4988    
4989     - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
4990     - i40e_ptype_to_hash(rx_ptype));
4991     + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
4992     +
4993     if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
4994     i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
4995     I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
4996     @@ -1736,8 +1750,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
4997     continue;
4998     }
4999    
5000     - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
5001     - i40e_ptype_to_hash(rx_ptype));
5002     + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
5003     if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
5004     i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
5005     I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
5006     diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5007     index 44462b40f2d7..e116d9a99b8e 100644
5008     --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5009     +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5010     @@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
5011     i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
5012    
5013     spin_lock_bh(&vsi->mac_filter_list_lock);
5014     - f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
5015     - vf->port_vlan_id ? vf->port_vlan_id : -1,
5016     - true, false);
5017     - if (!f)
5018     - dev_info(&pf->pdev->dev,
5019     - "Could not allocate VF MAC addr\n");
5020     + if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
5021     + f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
5022     + vf->port_vlan_id ? vf->port_vlan_id : -1,
5023     + true, false);
5024     + if (!f)
5025     + dev_info(&pf->pdev->dev,
5026     + "Could not add MAC filter %pM for VF %d\n",
5027     + vf->default_lan_addr.addr, vf->vf_id);
5028     + }
5029     f = i40e_add_filter(vsi, brdcast,
5030     vf->port_vlan_id ? vf->port_vlan_id : -1,
5031     true, false);
5032     @@ -1680,8 +1683,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
5033     spin_lock_bh(&vsi->mac_filter_list_lock);
5034     /* delete addresses from the list */
5035     for (i = 0; i < al->num_elements; i++)
5036     - i40e_del_filter(vsi, al->list[i].addr,
5037     - I40E_VLAN_ANY, true, false);
5038     + if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
5039     + ret = I40E_ERR_INVALID_MAC_ADDR;
5040     + spin_unlock_bh(&vsi->mac_filter_list_lock);
5041     + goto error_param;
5042     + }
5043     +
5044     spin_unlock_bh(&vsi->mac_filter_list_lock);
5045    
5046     /* program the updated filter list */
5047     diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
5048     index 47e9a90d6b10..39db70a597ed 100644
5049     --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
5050     +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
5051     @@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
5052     struct i40e_tx_buffer *tx_buffer)
5053     {
5054     if (tx_buffer->skb) {
5055     - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
5056     - kfree(tx_buffer->raw_buf);
5057     - else
5058     - dev_kfree_skb_any(tx_buffer->skb);
5059     -
5060     + dev_kfree_skb_any(tx_buffer->skb);
5061     if (dma_unmap_len(tx_buffer, len))
5062     dma_unmap_single(ring->dev,
5063     dma_unmap_addr(tx_buffer, dma),
5064     @@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
5065     dma_unmap_len(tx_buffer, len),
5066     DMA_TO_DEVICE);
5067     }
5068     +
5069     + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
5070     + kfree(tx_buffer->raw_buf);
5071     +
5072     tx_buffer->next_to_watch = NULL;
5073     tx_buffer->skb = NULL;
5074     dma_unmap_len_set(tx_buffer, len, 0);
5075     @@ -245,16 +245,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
5076     tx_ring->q_vector->tx.total_bytes += total_bytes;
5077     tx_ring->q_vector->tx.total_packets += total_packets;
5078    
5079     - /* check to see if there are any non-cache aligned descriptors
5080     - * waiting to be written back, and kick the hardware to force
5081     - * them to be written back in case of napi polling
5082     - */
5083     - if (budget &&
5084     - !((i & WB_STRIDE) == WB_STRIDE) &&
5085     - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
5086     - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
5087     - tx_ring->arm_wb = true;
5088     -
5089     netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
5090     tx_ring->queue_index),
5091     total_packets, total_bytes);
5092     @@ -889,31 +879,12 @@ checksum_fail:
5093     }
5094    
5095     /**
5096     - * i40e_rx_hash - returns the hash value from the Rx descriptor
5097     - * @ring: descriptor ring
5098     - * @rx_desc: specific descriptor
5099     - **/
5100     -static inline u32 i40e_rx_hash(struct i40e_ring *ring,
5101     - union i40e_rx_desc *rx_desc)
5102     -{
5103     - const __le64 rss_mask =
5104     - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
5105     - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
5106     -
5107     - if ((ring->netdev->features & NETIF_F_RXHASH) &&
5108     - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
5109     - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
5110     - else
5111     - return 0;
5112     -}
5113     -
5114     -/**
5115     - * i40e_ptype_to_hash - get a hash type
5116     + * i40e_ptype_to_htype - get a hash type
5117     * @ptype: the ptype value from the descriptor
5118     *
5119     * Returns a hash type to be used by skb_set_hash
5120     **/
5121     -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
5122     +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
5123     {
5124     struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
5125    
5126     @@ -931,6 +902,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
5127     }
5128    
5129     /**
5130     + * i40e_rx_hash - set the hash value in the skb
5131     + * @ring: descriptor ring
5132     + * @rx_desc: specific descriptor
5133     + **/
5134     +static inline void i40e_rx_hash(struct i40e_ring *ring,
5135     + union i40e_rx_desc *rx_desc,
5136     + struct sk_buff *skb,
5137     + u8 rx_ptype)
5138     +{
5139     + u32 hash;
5140     + const __le64 rss_mask =
5141     + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
5142     + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
5143     +
5144     + if (ring->netdev->features & NETIF_F_RXHASH)
5145     + return;
5146     +
5147     + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
5148     + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
5149     + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
5150     + }
5151     +}
5152     +
5153     +/**
5154     * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
5155     * @rx_ring: rx ring to clean
5156     * @budget: how many cleans we're allowed
5157     @@ -1071,8 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
5158     continue;
5159     }
5160    
5161     - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
5162     - i40e_ptype_to_hash(rx_ptype));
5163     + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
5164     +
5165     /* probably a little skewed due to removing CRC */
5166     total_rx_bytes += skb->len;
5167     total_rx_packets++;
5168     @@ -1189,8 +1184,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
5169     continue;
5170     }
5171    
5172     - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
5173     - i40e_ptype_to_hash(rx_ptype));
5174     + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
5175     /* probably a little skewed due to removing CRC */
5176     total_rx_bytes += skb->len;
5177     total_rx_packets++;
5178     @@ -1770,6 +1764,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
5179     u32 td_tag = 0;
5180     dma_addr_t dma;
5181     u16 gso_segs;
5182     + u16 desc_count = 0;
5183     + bool tail_bump = true;
5184     + bool do_rs = false;
5185    
5186     if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
5187     td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
5188     @@ -1810,6 +1807,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
5189    
5190     tx_desc++;
5191     i++;
5192     + desc_count++;
5193     +
5194     if (i == tx_ring->count) {
5195     tx_desc = I40E_TX_DESC(tx_ring, 0);
5196     i = 0;
5197     @@ -1829,6 +1828,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
5198    
5199     tx_desc++;
5200     i++;
5201     + desc_count++;
5202     +
5203     if (i == tx_ring->count) {
5204     tx_desc = I40E_TX_DESC(tx_ring, 0);
5205     i = 0;
5206     @@ -1843,35 +1844,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
5207     tx_bi = &tx_ring->tx_bi[i];
5208     }
5209    
5210     - /* Place RS bit on last descriptor of any packet that spans across the
5211     - * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
5212     - */
5213     #define WB_STRIDE 0x3
5214     - if (((i & WB_STRIDE) != WB_STRIDE) &&
5215     - (first <= &tx_ring->tx_bi[i]) &&
5216     - (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
5217     - tx_desc->cmd_type_offset_bsz =
5218     - build_ctob(td_cmd, td_offset, size, td_tag) |
5219     - cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
5220     - I40E_TXD_QW1_CMD_SHIFT);
5221     - } else {
5222     - tx_desc->cmd_type_offset_bsz =
5223     - build_ctob(td_cmd, td_offset, size, td_tag) |
5224     - cpu_to_le64((u64)I40E_TXD_CMD <<
5225     - I40E_TXD_QW1_CMD_SHIFT);
5226     - }
5227     -
5228     - netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
5229     - tx_ring->queue_index),
5230     - first->bytecount);
5231     -
5232     - /* Force memory writes to complete before letting h/w
5233     - * know there are new descriptors to fetch. (Only
5234     - * applicable for weak-ordered memory model archs,
5235     - * such as IA-64).
5236     - */
5237     - wmb();
5238     -
5239     /* set next_to_watch value indicating a packet is present */
5240     first->next_to_watch = tx_desc;
5241    
5242     @@ -1881,15 +1854,78 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
5243    
5244     tx_ring->next_to_use = i;
5245    
5246     + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
5247     + tx_ring->queue_index),
5248     + first->bytecount);
5249     i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
5250     +
5251     + /* Algorithm to optimize tail and RS bit setting:
5252     + * if xmit_more is supported
5253     + * if xmit_more is true
5254     + * do not update tail and do not mark RS bit.
5255     + * if xmit_more is false and last xmit_more was false
5256     + * if every packet spanned less than 4 desc
5257     + * then set RS bit on 4th packet and update tail
5258     + * on every packet
5259     + * else
5260     + * update tail and set RS bit on every packet.
5261     + * if xmit_more is false and last_xmit_more was true
5262     + * update tail and set RS bit.
5263     + * else (kernel < 3.18)
5264     + * if every packet spanned less than 4 desc
5265     + * then set RS bit on 4th packet and update tail
5266     + * on every packet
5267     + * else
5268     + * set RS bit on EOP for every packet and update tail
5269     + *
5270     + * Optimization: wmb to be issued only in case of tail update.
5271     + * Also optimize the Descriptor WB path for RS bit with the same
5272     + * algorithm.
5273     + *
5274     + * Note: If there are less than 4 packets
5275     + * pending and interrupts were disabled the service task will
5276     + * trigger a force WB.
5277     + */
5278     + if (skb->xmit_more &&
5279     + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
5280     + tx_ring->queue_index))) {
5281     + tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
5282     + tail_bump = false;
5283     + } else if (!skb->xmit_more &&
5284     + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
5285     + tx_ring->queue_index)) &&
5286     + (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
5287     + (tx_ring->packet_stride < WB_STRIDE) &&
5288     + (desc_count < WB_STRIDE)) {
5289     + tx_ring->packet_stride++;
5290     + } else {
5291     + tx_ring->packet_stride = 0;
5292     + tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
5293     + do_rs = true;
5294     + }
5295     + if (do_rs)
5296     + tx_ring->packet_stride = 0;
5297     +
5298     + tx_desc->cmd_type_offset_bsz =
5299     + build_ctob(td_cmd, td_offset, size, td_tag) |
5300     + cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
5301     + I40E_TX_DESC_CMD_EOP) <<
5302     + I40E_TXD_QW1_CMD_SHIFT);
5303     +
5304     /* notify HW of packet */
5305     - if (!skb->xmit_more ||
5306     - netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
5307     - tx_ring->queue_index)))
5308     - writel(i, tx_ring->tail);
5309     - else
5310     + if (!tail_bump)
5311     prefetchw(tx_desc + 1);
5312    
5313     + if (tail_bump) {
5314     + /* Force memory writes to complete before letting h/w
5315     + * know there are new descriptors to fetch. (Only
5316     + * applicable for weak-ordered memory model archs,
5317     + * such as IA-64).
5318     + */
5319     + wmb();
5320     + writel(i, tx_ring->tail);
5321     + }
5322     +
5323     return;
5324    
5325     dma_error:
5326     diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
5327     index ebc1bf77f036..998976844e4e 100644
5328     --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
5329     +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
5330     @@ -267,6 +267,8 @@ struct i40e_ring {
5331    
5332     bool ring_active; /* is ring online or not */
5333     bool arm_wb; /* do something to arm write back */
5334     + u8 packet_stride;
5335     +#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
5336    
5337     u16 flags;
5338     #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
5339     diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
5340     index 4790437a50ac..2ac62efc36f7 100644
5341     --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
5342     +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
5343     @@ -477,54 +477,30 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
5344    
5345     switch (nfc->flow_type) {
5346     case TCP_V4_FLOW:
5347     - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
5348     - case 0:
5349     - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
5350     - break;
5351     - case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
5352     + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
5353     hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
5354     - break;
5355     - default:
5356     + else
5357     return -EINVAL;
5358     - }
5359     break;
5360     case TCP_V6_FLOW:
5361     - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
5362     - case 0:
5363     - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
5364     - break;
5365     - case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
5366     + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))
5367     hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
5368     - break;
5369     - default:
5370     + else
5371     return -EINVAL;
5372     - }
5373     break;
5374     case UDP_V4_FLOW:
5375     - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
5376     - case 0:
5377     - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
5378     - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
5379     - break;
5380     - case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
5381     + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
5382     hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
5383     BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
5384     - break;
5385     - default:
5386     + } else {
5387     return -EINVAL;
5388     }
5389     break;
5390     case UDP_V6_FLOW:
5391     - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
5392     - case 0:
5393     - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
5394     - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
5395     - break;
5396     - case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
5397     + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
5398     hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
5399     BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
5400     - break;
5401     - default:
5402     + } else {
5403     return -EINVAL;
5404     }
5405     break;
5406     diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
5407     index 99d2cffae0cd..5f03ab3dfa19 100644
5408     --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
5409     +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
5410     @@ -1864,6 +1864,9 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
5411     {
5412     int i;
5413    
5414     + if (!adapter->tx_rings)
5415     + return;
5416     +
5417     for (i = 0; i < adapter->num_active_queues; i++)
5418     if (adapter->tx_rings[i]->desc)
5419     i40evf_free_tx_resources(adapter->tx_rings[i]);
5420     @@ -1932,6 +1935,9 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
5421     {
5422     int i;
5423    
5424     + if (!adapter->rx_rings)
5425     + return;
5426     +
5427     for (i = 0; i < adapter->num_active_queues; i++)
5428     if (adapter->rx_rings[i]->desc)
5429     i40evf_free_rx_resources(adapter->rx_rings[i]);
5430     diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
5431     index 32e620e1eb5c..5de3f52fd31f 100644
5432     --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
5433     +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
5434     @@ -391,6 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
5435     struct i40e_virtchnl_ether_addr_list *veal;
5436     int len, i = 0, count = 0;
5437     struct i40evf_mac_filter *f;
5438     + bool more = false;
5439    
5440     if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
5441     /* bail because we already have a command pending */
5442     @@ -415,7 +416,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
5443     count = (I40EVF_MAX_AQ_BUF_SIZE -
5444     sizeof(struct i40e_virtchnl_ether_addr_list)) /
5445     sizeof(struct i40e_virtchnl_ether_addr);
5446     - len = I40EVF_MAX_AQ_BUF_SIZE;
5447     + len = sizeof(struct i40e_virtchnl_ether_addr_list) +
5448     + (count * sizeof(struct i40e_virtchnl_ether_addr));
5449     + more = true;
5450     }
5451    
5452     veal = kzalloc(len, GFP_ATOMIC);
5453     @@ -431,7 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
5454     f->add = false;
5455     }
5456     }
5457     - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
5458     + if (!more)
5459     + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
5460     i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
5461     (u8 *)veal, len);
5462     kfree(veal);
5463     @@ -450,6 +454,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
5464     struct i40e_virtchnl_ether_addr_list *veal;
5465     struct i40evf_mac_filter *f, *ftmp;
5466     int len, i = 0, count = 0;
5467     + bool more = false;
5468    
5469     if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
5470     /* bail because we already have a command pending */
5471     @@ -474,7 +479,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
5472     count = (I40EVF_MAX_AQ_BUF_SIZE -
5473     sizeof(struct i40e_virtchnl_ether_addr_list)) /
5474     sizeof(struct i40e_virtchnl_ether_addr);
5475     - len = I40EVF_MAX_AQ_BUF_SIZE;
5476     + len = sizeof(struct i40e_virtchnl_ether_addr_list) +
5477     + (count * sizeof(struct i40e_virtchnl_ether_addr));
5478     + more = true;
5479     }
5480     veal = kzalloc(len, GFP_ATOMIC);
5481     if (!veal)
5482     @@ -490,7 +497,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
5483     kfree(f);
5484     }
5485     }
5486     - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
5487     + if (!more)
5488     + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
5489     i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
5490     (u8 *)veal, len);
5491     kfree(veal);
5492     @@ -509,6 +517,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
5493     struct i40e_virtchnl_vlan_filter_list *vvfl;
5494     int len, i = 0, count = 0;
5495     struct i40evf_vlan_filter *f;
5496     + bool more = false;
5497    
5498     if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
5499     /* bail because we already have a command pending */
5500     @@ -534,7 +543,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
5501     count = (I40EVF_MAX_AQ_BUF_SIZE -
5502     sizeof(struct i40e_virtchnl_vlan_filter_list)) /
5503     sizeof(u16);
5504     - len = I40EVF_MAX_AQ_BUF_SIZE;
5505     + len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
5506     + (count * sizeof(u16));
5507     + more = true;
5508     }
5509     vvfl = kzalloc(len, GFP_ATOMIC);
5510     if (!vvfl)
5511     @@ -549,7 +560,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
5512     f->add = false;
5513     }
5514     }
5515     - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
5516     + if (!more)
5517     + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
5518     i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
5519     kfree(vvfl);
5520     }
5521     @@ -567,6 +579,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
5522     struct i40e_virtchnl_vlan_filter_list *vvfl;
5523     struct i40evf_vlan_filter *f, *ftmp;
5524     int len, i = 0, count = 0;
5525     + bool more = false;
5526    
5527     if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
5528     /* bail because we already have a command pending */
5529     @@ -592,7 +605,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
5530     count = (I40EVF_MAX_AQ_BUF_SIZE -
5531     sizeof(struct i40e_virtchnl_vlan_filter_list)) /
5532     sizeof(u16);
5533     - len = I40EVF_MAX_AQ_BUF_SIZE;
5534     + len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
5535     + (count * sizeof(u16));
5536     + more = true;
5537     }
5538     vvfl = kzalloc(len, GFP_ATOMIC);
5539     if (!vvfl)
5540     @@ -608,7 +623,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
5541     kfree(f);
5542     }
5543     }
5544     - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
5545     + if (!more)
5546     + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
5547     i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
5548     kfree(vvfl);
5549     }
5550     diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
5551     index 7a73510e547c..97bf0c3d5c69 100644
5552     --- a/drivers/net/ethernet/intel/igb/e1000_82575.c
5553     +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
5554     @@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
5555     case I210_I_PHY_ID:
5556     phy->type = e1000_phy_i210;
5557     phy->ops.check_polarity = igb_check_polarity_m88;
5558     + phy->ops.get_cfg_done = igb_get_cfg_done_i210;
5559     phy->ops.get_phy_info = igb_get_phy_info_m88;
5560     phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
5561     phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
5562     diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
5563     index 65d931669f81..29f59c76878a 100644
5564     --- a/drivers/net/ethernet/intel/igb/e1000_i210.c
5565     +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
5566     @@ -900,3 +900,30 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
5567     wr32(E1000_MDICNFG, mdicnfg);
5568     return ret_val;
5569     }
5570     +
5571     +/**
5572     + * igb_get_cfg_done_i210 - Read config done bit
5573     + * @hw: pointer to the HW structure
5574     + *
5575     + * Read the management control register for the config done bit for
5576     + * completion status. NOTE: silicon which is EEPROM-less will fail trying
5577     + * to read the config done bit, so an error is *ONLY* logged and returns
5578     + * 0. If we were to return with error, EEPROM-less silicon
5579     + * would not be able to be reset or change link.
5580     + **/
5581     +s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
5582     +{
5583     + s32 timeout = PHY_CFG_TIMEOUT;
5584     + u32 mask = E1000_NVM_CFG_DONE_PORT_0;
5585     +
5586     + while (timeout) {
5587     + if (rd32(E1000_EEMNGCTL_I210) & mask)
5588     + break;
5589     + usleep_range(1000, 2000);
5590     + timeout--;
5591     + }
5592     + if (!timeout)
5593     + hw_dbg("MNG configuration cycle has not completed.\n");
5594     +
5595     + return 0;
5596     +}
5597     diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
5598     index 3442b6357d01..eaa68a50cb3b 100644
5599     --- a/drivers/net/ethernet/intel/igb/e1000_i210.h
5600     +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
5601     @@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
5602     s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
5603     bool igb_get_flash_presence_i210(struct e1000_hw *hw);
5604     s32 igb_pll_workaround_i210(struct e1000_hw *hw);
5605     +s32 igb_get_cfg_done_i210(struct e1000_hw *hw);
5606    
5607     #define E1000_STM_OPCODE 0xDB00
5608     #define E1000_EEPROM_FLASH_SIZE_WORD 0x11
5609     diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
5610     index 4af2870e49f8..0fdcd4d1b982 100644
5611     --- a/drivers/net/ethernet/intel/igb/e1000_regs.h
5612     +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
5613     @@ -66,6 +66,7 @@
5614     #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
5615     #define E1000_PBS 0x01008 /* Packet Buffer Size */
5616     #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
5617     +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */
5618     #define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
5619     #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
5620     #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
5621     diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
5622     index 1a2f1cc44b28..e3cb93bdb21a 100644
5623     --- a/drivers/net/ethernet/intel/igb/igb.h
5624     +++ b/drivers/net/ethernet/intel/igb/igb.h
5625     @@ -389,6 +389,8 @@ struct igb_adapter {
5626     u16 link_speed;
5627     u16 link_duplex;
5628    
5629     + u8 __iomem *io_addr; /* Mainly for iounmap use */
5630     +
5631     struct work_struct reset_task;
5632     struct work_struct watchdog_task;
5633     bool fc_autoneg;
5634     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
5635     index ea7b09887245..fa3b4cbea23b 100644
5636     --- a/drivers/net/ethernet/intel/igb/igb_main.c
5637     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
5638     @@ -2294,9 +2294,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5639     adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
5640    
5641     err = -EIO;
5642     - hw->hw_addr = pci_iomap(pdev, 0, 0);
5643     - if (!hw->hw_addr)
5644     + adapter->io_addr = pci_iomap(pdev, 0, 0);
5645     + if (!adapter->io_addr)
5646     goto err_ioremap;
5647     + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
5648     + hw->hw_addr = adapter->io_addr;
5649    
5650     netdev->netdev_ops = &igb_netdev_ops;
5651     igb_set_ethtool_ops(netdev);
5652     @@ -2656,7 +2658,7 @@ err_sw_init:
5653     #ifdef CONFIG_PCI_IOV
5654     igb_disable_sriov(pdev);
5655     #endif
5656     - pci_iounmap(pdev, hw->hw_addr);
5657     + pci_iounmap(pdev, adapter->io_addr);
5658     err_ioremap:
5659     free_netdev(netdev);
5660     err_alloc_etherdev:
5661     @@ -2823,7 +2825,7 @@ static void igb_remove(struct pci_dev *pdev)
5662    
5663     igb_clear_interrupt_scheme(adapter);
5664    
5665     - pci_iounmap(pdev, hw->hw_addr);
5666     + pci_iounmap(pdev, adapter->io_addr);
5667     if (hw->flash_address)
5668     iounmap(hw->flash_address);
5669     pci_release_selected_regions(pdev,
5670     @@ -2856,6 +2858,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
5671     if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
5672     return;
5673    
5674     + /* Of the below we really only want the effect of getting
5675     + * IGB_FLAG_HAS_MSIX set (if available), without which
5676     + * igb_enable_sriov() has no effect.
5677     + */
5678     + igb_set_interrupt_capability(adapter, true);
5679     + igb_reset_interrupt_capability(adapter);
5680     +
5681     pci_sriov_set_totalvfs(pdev, 7);
5682     igb_enable_sriov(pdev, max_vfs);
5683    
5684     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5685     index aed8d029b23d..cd9b284bc83b 100644
5686     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5687     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5688     @@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
5689     ixgbe_for_each_ring(ring, q_vector->tx)
5690     clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
5691    
5692     - if (!ixgbe_qv_lock_napi(q_vector))
5693     + /* Exit if we are called by netpoll or busy polling is active */
5694     + if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
5695     return budget;
5696    
5697     /* attempt to distribute budget to each queue fairly, but don't allow
5698     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
5699     index 2e022e900939..7cc9df717323 100644
5700     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
5701     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
5702     @@ -399,6 +399,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
5703     {
5704     struct mlx5e_priv *priv = netdev_priv(netdev);
5705    
5706     + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
5707     + return -ENOTSUPP;
5708     +
5709     coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
5710     coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
5711     coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
5712     @@ -416,11 +419,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
5713     int tc;
5714     int i;
5715    
5716     + if (!MLX5_CAP_GEN(mdev, cq_moderation))
5717     + return -ENOTSUPP;
5718     +
5719     + mutex_lock(&priv->state_lock);
5720     priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
5721     priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
5722     priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
5723     priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
5724    
5725     + if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
5726     + goto out;
5727     +
5728     for (i = 0; i < priv->params.num_channels; ++i) {
5729     c = priv->channel[i];
5730    
5731     @@ -436,6 +446,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
5732     coal->rx_max_coalesced_frames);
5733     }
5734    
5735     +out:
5736     + mutex_unlock(&priv->state_lock);
5737     return 0;
5738     }
5739    
5740     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5741     index cbd17e25beeb..90e876ecc720 100644
5742     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5743     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5744     @@ -863,12 +863,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
5745     if (err)
5746     goto err_destroy_cq;
5747    
5748     - err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
5749     - moderation_usecs,
5750     - moderation_frames);
5751     - if (err)
5752     - goto err_destroy_cq;
5753     -
5754     + if (MLX5_CAP_GEN(mdev, cq_moderation))
5755     + mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
5756     + moderation_usecs,
5757     + moderation_frames);
5758     return 0;
5759    
5760     err_destroy_cq:
5761     @@ -1963,6 +1961,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
5762     }
5763     if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
5764     mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
5765     + if (!MLX5_CAP_GEN(mdev, cq_moderation))
5766     + mlx5_core_warn(mdev, "CQ modiration is not supported\n");
5767    
5768     return 0;
5769     }
5770     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
5771     index 289a5df0d44a..c851bc53831c 100644
5772     --- a/drivers/nvme/host/pci.c
5773     +++ b/drivers/nvme/host/pci.c
5774     @@ -2725,7 +2725,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
5775     return 0;
5776    
5777     disable:
5778     - pci_release_regions(pdev);
5779     + pci_disable_device(pdev);
5780    
5781     return result;
5782     }
5783     diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
5784     index f9dfc8b6407a..7225ac6b3df5 100644
5785     --- a/drivers/pwm/pwm-fsl-ftm.c
5786     +++ b/drivers/pwm/pwm-fsl-ftm.c
5787     @@ -80,7 +80,6 @@ struct fsl_pwm_chip {
5788    
5789     struct mutex lock;
5790    
5791     - unsigned int use_count;
5792     unsigned int cnt_select;
5793     unsigned int clk_ps;
5794    
5795     @@ -300,9 +299,6 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
5796     {
5797     int ret;
5798    
5799     - if (fpc->use_count++ != 0)
5800     - return 0;
5801     -
5802     /* select counter clock source */
5803     regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
5804     FTM_SC_CLK(fpc->cnt_select));
5805     @@ -334,25 +330,6 @@ static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
5806     return ret;
5807     }
5808    
5809     -static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
5810     -{
5811     - /*
5812     - * already disabled, do nothing
5813     - */
5814     - if (fpc->use_count == 0)
5815     - return;
5816     -
5817     - /* there are still users, so can't disable yet */
5818     - if (--fpc->use_count > 0)
5819     - return;
5820     -
5821     - /* no users left, disable PWM counter clock */
5822     - regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, 0);
5823     -
5824     - clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
5825     - clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
5826     -}
5827     -
5828     static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
5829     {
5830     struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
5831     @@ -362,7 +339,8 @@ static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
5832     regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
5833     BIT(pwm->hwpwm));
5834    
5835     - fsl_counter_clock_disable(fpc);
5836     + clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
5837     + clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
5838    
5839     regmap_read(fpc->regmap, FTM_OUTMASK, &val);
5840     if ((val & 0xFF) == 0xFF)
5841     @@ -492,17 +470,24 @@ static int fsl_pwm_remove(struct platform_device *pdev)
5842     static int fsl_pwm_suspend(struct device *dev)
5843     {
5844     struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
5845     - u32 val;
5846     + int i;
5847    
5848     regcache_cache_only(fpc->regmap, true);
5849     regcache_mark_dirty(fpc->regmap);
5850    
5851     - /* read from cache */
5852     - regmap_read(fpc->regmap, FTM_OUTMASK, &val);
5853     - if ((val & 0xFF) != 0xFF) {
5854     + for (i = 0; i < fpc->chip.npwm; i++) {
5855     + struct pwm_device *pwm = &fpc->chip.pwms[i];
5856     +
5857     + if (!test_bit(PWMF_REQUESTED, &pwm->flags))
5858     + continue;
5859     +
5860     + clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
5861     +
5862     + if (!pwm_is_enabled(pwm))
5863     + continue;
5864     +
5865     clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
5866     clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
5867     - clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
5868     }
5869    
5870     return 0;
5871     @@ -511,12 +496,19 @@ static int fsl_pwm_suspend(struct device *dev)
5872     static int fsl_pwm_resume(struct device *dev)
5873     {
5874     struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
5875     - u32 val;
5876     + int i;
5877     +
5878     + for (i = 0; i < fpc->chip.npwm; i++) {
5879     + struct pwm_device *pwm = &fpc->chip.pwms[i];
5880     +
5881     + if (!test_bit(PWMF_REQUESTED, &pwm->flags))
5882     + continue;
5883    
5884     - /* read from cache */
5885     - regmap_read(fpc->regmap, FTM_OUTMASK, &val);
5886     - if ((val & 0xFF) != 0xFF) {
5887     clk_prepare_enable(fpc->clk[FSL_PWM_CLK_SYS]);
5888     +
5889     + if (!pwm_is_enabled(pwm))
5890     + continue;
5891     +
5892     clk_prepare_enable(fpc->clk[fpc->cnt_select]);
5893     clk_prepare_enable(fpc->clk[FSL_PWM_CLK_CNTEN]);
5894     }
5895     diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
5896     index 9fde60ce8e7b..6e203a65effb 100644
5897     --- a/drivers/pwm/pwm-lpc32xx.c
5898     +++ b/drivers/pwm/pwm-lpc32xx.c
5899     @@ -24,9 +24,7 @@ struct lpc32xx_pwm_chip {
5900     void __iomem *base;
5901     };
5902    
5903     -#define PWM_ENABLE (1 << 31)
5904     -#define PWM_RELOADV(x) (((x) & 0xFF) << 8)
5905     -#define PWM_DUTY(x) ((x) & 0xFF)
5906     +#define PWM_ENABLE BIT(31)
5907    
5908     #define to_lpc32xx_pwm_chip(_chip) \
5909     container_of(_chip, struct lpc32xx_pwm_chip, chip)
5910     @@ -38,40 +36,27 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
5911     unsigned long long c;
5912     int period_cycles, duty_cycles;
5913     u32 val;
5914     -
5915     - c = clk_get_rate(lpc32xx->clk) / 256;
5916     - c = c * period_ns;
5917     - do_div(c, NSEC_PER_SEC);
5918     -
5919     - /* Handle high and low extremes */
5920     - if (c == 0)
5921     - c = 1;
5922     - if (c > 255)
5923     - c = 0; /* 0 set division by 256 */
5924     - period_cycles = c;
5925     -
5926     - /* The duty-cycle value is as follows:
5927     - *
5928     - * DUTY-CYCLE HIGH LEVEL
5929     - * 1 99.9%
5930     - * 25 90.0%
5931     - * 128 50.0%
5932     - * 220 10.0%
5933     - * 255 0.1%
5934     - * 0 0.0%
5935     - *
5936     - * In other words, the register value is duty-cycle % 256 with
5937     - * duty-cycle in the range 1-256.
5938     - */
5939     - c = 256 * duty_ns;
5940     - do_div(c, period_ns);
5941     - if (c > 255)
5942     - c = 255;
5943     - duty_cycles = 256 - c;
5944     + c = clk_get_rate(lpc32xx->clk);
5945     +
5946     + /* The highest acceptable divisor is 256, which is represented by 0 */
5947     + period_cycles = div64_u64(c * period_ns,
5948     + (unsigned long long)NSEC_PER_SEC * 256);
5949     + if (!period_cycles)
5950     + period_cycles = 1;
5951     + if (period_cycles > 255)
5952     + period_cycles = 0;
5953     +
5954     + /* Compute 256 x #duty/period value and care for corner cases */
5955     + duty_cycles = div64_u64((unsigned long long)(period_ns - duty_ns) * 256,
5956     + period_ns);
5957     + if (!duty_cycles)
5958     + duty_cycles = 1;
5959     + if (duty_cycles > 255)
5960     + duty_cycles = 255;
5961    
5962     val = readl(lpc32xx->base + (pwm->hwpwm << 2));
5963     val &= ~0xFFFF;
5964     - val |= PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles);
5965     + val |= (period_cycles << 8) | duty_cycles;
5966     writel(val, lpc32xx->base + (pwm->hwpwm << 2));
5967    
5968     return 0;
5969     @@ -134,7 +119,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
5970    
5971     lpc32xx->chip.dev = &pdev->dev;
5972     lpc32xx->chip.ops = &lpc32xx_pwm_ops;
5973     - lpc32xx->chip.npwm = 2;
5974     + lpc32xx->chip.npwm = 1;
5975     lpc32xx->chip.base = -1;
5976    
5977     ret = pwmchip_add(&lpc32xx->chip);
5978     diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
5979     index 63cd5e68c864..3a6d0290c54c 100644
5980     --- a/drivers/regulator/anatop-regulator.c
5981     +++ b/drivers/regulator/anatop-regulator.c
5982     @@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
5983     if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
5984     sreg->sel = 22;
5985    
5986     - if (!sreg->sel) {
5987     + if (!sreg->bypass && !sreg->sel) {
5988     dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
5989     return -EINVAL;
5990     }
5991     diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
5992     index 648cb86afd42..ea607a4a1bdd 100644
5993     --- a/drivers/s390/char/sclp_ctl.c
5994     +++ b/drivers/s390/char/sclp_ctl.c
5995     @@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
5996     {
5997     struct sclp_ctl_sccb ctl_sccb;
5998     struct sccb_header *sccb;
5999     + unsigned long copied;
6000     int rc;
6001    
6002     if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
6003     @@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
6004     sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
6005     if (!sccb)
6006     return -ENOMEM;
6007     - if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
6008     + copied = PAGE_SIZE -
6009     + copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
6010     + if (offsetof(struct sccb_header, length) +
6011     + sizeof(sccb->length) > copied || sccb->length > copied) {
6012     rc = -EFAULT;
6013     goto out_free;
6014     }
6015     - if (sccb->length > PAGE_SIZE || sccb->length < 8)
6016     - return -EINVAL;
6017     - if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
6018     - rc = -EFAULT;
6019     + if (sccb->length < 8) {
6020     + rc = -EINVAL;
6021     goto out_free;
6022     }
6023     rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
6024     diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
6025     index c692dfebd0ba..50597f9522fe 100644
6026     --- a/drivers/s390/cio/chp.c
6027     +++ b/drivers/s390/cio/chp.c
6028     @@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
6029    
6030     device = container_of(kobj, struct device, kobj);
6031     chp = to_channelpath(device);
6032     - if (!chp->cmg_chars)
6033     + if (chp->cmg == -1)
6034     return 0;
6035    
6036     - return memory_read_from_buffer(buf, count, &off,
6037     - chp->cmg_chars, sizeof(struct cmg_chars));
6038     + return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
6039     + sizeof(chp->cmg_chars));
6040     }
6041    
6042     static struct bin_attribute chp_measurement_chars_attr = {
6043     @@ -416,7 +416,8 @@ static void chp_release(struct device *dev)
6044     * chp_update_desc - update channel-path description
6045     * @chp - channel-path
6046     *
6047     - * Update the channel-path description of the specified channel-path.
6048     + * Update the channel-path description of the specified channel-path
6049     + * including channel measurement related information.
6050     * Return zero on success, non-zero otherwise.
6051     */
6052     int chp_update_desc(struct channel_path *chp)
6053     @@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp)
6054     return rc;
6055    
6056     rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
6057     + if (rc)
6058     + return rc;
6059    
6060     - return rc;
6061     + return chsc_get_channel_measurement_chars(chp);
6062     }
6063    
6064     /**
6065     @@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid)
6066     ret = -ENODEV;
6067     goto out_free;
6068     }
6069     - /* Get channel-measurement characteristics. */
6070     - if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
6071     - ret = chsc_get_channel_measurement_chars(chp);
6072     - if (ret)
6073     - goto out_free;
6074     - } else {
6075     - chp->cmg = -1;
6076     - }
6077     dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
6078    
6079     /* make it known to the system */
6080     diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
6081     index 4efd5b867cc3..af0232290dc4 100644
6082     --- a/drivers/s390/cio/chp.h
6083     +++ b/drivers/s390/cio/chp.h
6084     @@ -48,7 +48,7 @@ struct channel_path {
6085     /* Channel-measurement related stuff: */
6086     int cmg;
6087     int shared;
6088     - void *cmg_chars;
6089     + struct cmg_chars cmg_chars;
6090     };
6091    
6092     /* Return channel_path struct for given chpid. */
6093     diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
6094     index a831d18596a5..c424c0c7367e 100644
6095     --- a/drivers/s390/cio/chsc.c
6096     +++ b/drivers/s390/cio/chsc.c
6097     @@ -14,6 +14,7 @@
6098     #include <linux/slab.h>
6099     #include <linux/init.h>
6100     #include <linux/device.h>
6101     +#include <linux/mutex.h>
6102     #include <linux/pci.h>
6103    
6104     #include <asm/cio.h>
6105     @@ -224,8 +225,9 @@ out_unreg:
6106    
6107     void chsc_chp_offline(struct chp_id chpid)
6108     {
6109     - char dbf_txt[15];
6110     + struct channel_path *chp = chpid_to_chp(chpid);
6111     struct chp_link link;
6112     + char dbf_txt[15];
6113    
6114     sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
6115     CIO_TRACE_EVENT(2, dbf_txt);
6116     @@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid)
6117     link.chpid = chpid;
6118     /* Wait until previous actions have settled. */
6119     css_wait_for_slow_path();
6120     +
6121     + mutex_lock(&chp->lock);
6122     + chp_update_desc(chp);
6123     + mutex_unlock(&chp->lock);
6124     +
6125     for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
6126     }
6127    
6128     @@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
6129    
6130     void chsc_chp_online(struct chp_id chpid)
6131     {
6132     - char dbf_txt[15];
6133     + struct channel_path *chp = chpid_to_chp(chpid);
6134     struct chp_link link;
6135     + char dbf_txt[15];
6136    
6137     sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
6138     CIO_TRACE_EVENT(2, dbf_txt);
6139     @@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid)
6140     link.chpid = chpid;
6141     /* Wait until previous actions have settled. */
6142     css_wait_for_slow_path();
6143     +
6144     + mutex_lock(&chp->lock);
6145     + chp_update_desc(chp);
6146     + mutex_unlock(&chp->lock);
6147     +
6148     for_each_subchannel_staged(__s390_process_res_acc, NULL,
6149     &link);
6150     css_schedule_reprobe();
6151     @@ -967,22 +980,19 @@ static void
6152     chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
6153     struct cmg_chars *chars)
6154     {
6155     - struct cmg_chars *cmg_chars;
6156     int i, mask;
6157    
6158     - cmg_chars = chp->cmg_chars;
6159     for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
6160     mask = 0x80 >> (i + 3);
6161     if (cmcv & mask)
6162     - cmg_chars->values[i] = chars->values[i];
6163     + chp->cmg_chars.values[i] = chars->values[i];
6164     else
6165     - cmg_chars->values[i] = 0;
6166     + chp->cmg_chars.values[i] = 0;
6167     }
6168     }
6169    
6170     int chsc_get_channel_measurement_chars(struct channel_path *chp)
6171     {
6172     - struct cmg_chars *cmg_chars;
6173     int ccode, ret;
6174    
6175     struct {
6176     @@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
6177     u32 data[NR_MEASUREMENT_CHARS];
6178     } __attribute__ ((packed)) *scmc_area;
6179    
6180     - chp->cmg_chars = NULL;
6181     - cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
6182     - if (!cmg_chars)
6183     - return -ENOMEM;
6184     + chp->shared = -1;
6185     + chp->cmg = -1;
6186     +
6187     + if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
6188     + return 0;
6189    
6190     spin_lock_irq(&chsc_page_lock);
6191     memset(chsc_page, 0, PAGE_SIZE);
6192     @@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
6193     scmc_area->response.code);
6194     goto out;
6195     }
6196     - if (scmc_area->not_valid) {
6197     - chp->cmg = -1;
6198     - chp->shared = -1;
6199     + if (scmc_area->not_valid)
6200     goto out;
6201     - }
6202     +
6203     chp->cmg = scmc_area->cmg;
6204     chp->shared = scmc_area->shared;
6205     if (chp->cmg != 2 && chp->cmg != 3) {
6206     /* No cmg-dependent data. */
6207     goto out;
6208     }
6209     - chp->cmg_chars = cmg_chars;
6210     chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
6211     (struct cmg_chars *) &scmc_area->data);
6212     out:
6213     spin_unlock_irq(&chsc_page_lock);
6214     - if (!chp->cmg_chars)
6215     - kfree(cmg_chars);
6216     -
6217     return ret;
6218     }
6219    
6220     diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
6221     index 12b2cb7769f9..df036b872b05 100644
6222     --- a/drivers/s390/net/qeth_l2_main.c
6223     +++ b/drivers/s390/net/qeth_l2_main.c
6224     @@ -1127,6 +1127,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
6225     qeth_l2_request_initial_mac(card);
6226     SET_NETDEV_DEV(card->dev, &card->gdev->dev);
6227     netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
6228     + netif_carrier_off(card->dev);
6229     return register_netdev(card->dev);
6230     }
6231    
6232     diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
6233     index 50cec6b13d27..cc4d3c3d8cc5 100644
6234     --- a/drivers/s390/net/qeth_l3_main.c
6235     +++ b/drivers/s390/net/qeth_l3_main.c
6236     @@ -3220,6 +3220,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
6237    
6238     SET_NETDEV_DEV(card->dev, &card->gdev->dev);
6239     netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
6240     + netif_carrier_off(card->dev);
6241     return register_netdev(card->dev);
6242     }
6243    
6244     diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
6245     index 333db5953607..41f9a00e4f74 100644
6246     --- a/drivers/scsi/arcmsr/arcmsr_hba.c
6247     +++ b/drivers/scsi/arcmsr/arcmsr_hba.c
6248     @@ -2664,7 +2664,7 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
6249     if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
6250     printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
6251     miscellaneous data' timeout \n", acb->host->host_no);
6252     - return false;
6253     + goto err_free_dma;
6254     }
6255     count = 8;
6256     while (count){
6257     @@ -2694,19 +2694,23 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
6258     acb->firm_model,
6259     acb->firm_version);
6260    
6261     - acb->signature = readl(&reg->message_rwbuffer[1]);
6262     + acb->signature = readl(&reg->message_rwbuffer[0]);
6263     /*firm_signature,1,00-03*/
6264     - acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
6265     + acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
6266     /*firm_request_len,1,04-07*/
6267     - acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
6268     + acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
6269     /*firm_numbers_queue,2,08-11*/
6270     - acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
6271     + acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
6272     /*firm_sdram_size,3,12-15*/
6273     - acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
6274     + acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
6275     /*firm_ide_channels,4,16-19*/
6276     acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
6277     /*firm_ide_channels,4,16-19*/
6278     return true;
6279     +err_free_dma:
6280     + dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
6281     + acb->dma_coherent2, acb->dma_coherent_handle2);
6282     + return false;
6283     }
6284    
6285     static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
6286     @@ -2880,15 +2884,15 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
6287     iop_device_map++;
6288     count--;
6289     }
6290     - acb->signature = readl(&reg->msgcode_rwbuffer[1]);
6291     + acb->signature = readl(&reg->msgcode_rwbuffer[0]);
6292     /*firm_signature,1,00-03*/
6293     - acb->firm_request_len = readl(&reg->msgcode_rwbuffer[2]);
6294     + acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
6295     /*firm_request_len,1,04-07*/
6296     - acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[3]);
6297     + acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
6298     /*firm_numbers_queue,2,08-11*/
6299     - acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[4]);
6300     + acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
6301     /*firm_sdram_size,3,12-15*/
6302     - acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[5]);
6303     + acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
6304     /*firm_hd_channels,4,16-19*/
6305     acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
6306     pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
6307     diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
6308     index fa09d4be2b53..2b456ca69d5c 100644
6309     --- a/drivers/scsi/constants.c
6310     +++ b/drivers/scsi/constants.c
6311     @@ -1181,8 +1181,9 @@ static const char * const snstext[] = {
6312    
6313     /* Get sense key string or NULL if not available */
6314     const char *
6315     -scsi_sense_key_string(unsigned char key) {
6316     - if (key <= 0xE)
6317     +scsi_sense_key_string(unsigned char key)
6318     +{
6319     + if (key < ARRAY_SIZE(snstext))
6320     return snstext[key];
6321     return NULL;
6322     }
6323     diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
6324     index c11cd193f896..5ada9268a450 100644
6325     --- a/drivers/scsi/cxlflash/common.h
6326     +++ b/drivers/scsi/cxlflash/common.h
6327     @@ -165,6 +165,8 @@ struct afu {
6328     struct sisl_host_map __iomem *host_map; /* MC host map */
6329     struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
6330    
6331     + struct kref mapcount;
6332     +
6333     ctx_hndl_t ctx_hndl; /* master's context handle */
6334     u64 *hrrq_start;
6335     u64 *hrrq_end;
6336     diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
6337     index 1e5bf0ca81da..c86847c68448 100644
6338     --- a/drivers/scsi/cxlflash/main.c
6339     +++ b/drivers/scsi/cxlflash/main.c
6340     @@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
6341     atomic64_set(&afu->room, room);
6342     if (room)
6343     goto write_rrin;
6344     - udelay(nretry);
6345     + udelay(1 << nretry);
6346     } while (nretry++ < MC_ROOM_RETRY_CNT);
6347    
6348     pr_err("%s: no cmd_room to send reset\n", __func__);
6349     @@ -303,7 +303,7 @@ write_rrin:
6350     if (rrin != 0x1)
6351     break;
6352     /* Double delay each time */
6353     - udelay(2 << nretry);
6354     + udelay(1 << nretry);
6355     } while (nretry++ < MC_ROOM_RETRY_CNT);
6356     }
6357    
6358     @@ -338,7 +338,7 @@ retry:
6359     atomic64_set(&afu->room, room);
6360     if (room)
6361     goto write_ioarrin;
6362     - udelay(nretry);
6363     + udelay(1 << nretry);
6364     } while (nretry++ < MC_ROOM_RETRY_CNT);
6365    
6366     dev_err(dev, "%s: no cmd_room to send 0x%X\n",
6367     @@ -352,7 +352,7 @@ retry:
6368     * afu->room.
6369     */
6370     if (nretry++ < MC_ROOM_RETRY_CNT) {
6371     - udelay(nretry);
6372     + udelay(1 << nretry);
6373     goto retry;
6374     }
6375    
6376     @@ -368,6 +368,7 @@ out:
6377    
6378     no_room:
6379     afu->read_room = true;
6380     + kref_get(&cfg->afu->mapcount);
6381     schedule_work(&cfg->work_q);
6382     rc = SCSI_MLQUEUE_HOST_BUSY;
6383     goto out;
6384     @@ -473,6 +474,16 @@ out:
6385     return rc;
6386     }
6387    
6388     +static void afu_unmap(struct kref *ref)
6389     +{
6390     + struct afu *afu = container_of(ref, struct afu, mapcount);
6391     +
6392     + if (likely(afu->afu_map)) {
6393     + cxl_psa_unmap((void __iomem *)afu->afu_map);
6394     + afu->afu_map = NULL;
6395     + }
6396     +}
6397     +
6398     /**
6399     * cxlflash_driver_info() - information handler for this host driver
6400     * @host: SCSI host associated with device.
6401     @@ -503,6 +514,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
6402     ulong lock_flags;
6403     short lflag = 0;
6404     int rc = 0;
6405     + int kref_got = 0;
6406    
6407     dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
6408     "cdb=(%08X-%08X-%08X-%08X)\n",
6409     @@ -547,6 +559,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
6410     goto out;
6411     }
6412    
6413     + kref_get(&cfg->afu->mapcount);
6414     + kref_got = 1;
6415     +
6416     cmd->rcb.ctx_id = afu->ctx_hndl;
6417     cmd->rcb.port_sel = port_sel;
6418     cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
6419     @@ -587,6 +602,8 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
6420     }
6421    
6422     out:
6423     + if (kref_got)
6424     + kref_put(&afu->mapcount, afu_unmap);
6425     pr_devel("%s: returning rc=%d\n", __func__, rc);
6426     return rc;
6427     }
6428     @@ -632,20 +649,36 @@ static void free_mem(struct cxlflash_cfg *cfg)
6429     * @cfg: Internal structure associated with the host.
6430     *
6431     * Safe to call with AFU in a partially allocated/initialized state.
6432     + *
6433     + * Cleans up all state associated with the command queue, and unmaps
6434     + * the MMIO space.
6435     + *
6436     + * - complete() will take care of commands we initiated (they'll be checked
6437     + * in as part of the cleanup that occurs after the completion)
6438     + *
6439     + * - cmd_checkin() will take care of entries that we did not initiate and that
6440     + * have not (and will not) complete because they are sitting on a [now stale]
6441     + * hardware queue
6442     */
6443     static void stop_afu(struct cxlflash_cfg *cfg)
6444     {
6445     int i;
6446     struct afu *afu = cfg->afu;
6447     + struct afu_cmd *cmd;
6448    
6449     if (likely(afu)) {
6450     - for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
6451     - complete(&afu->cmd[i].cevent);
6452     + for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
6453     + cmd = &afu->cmd[i];
6454     + complete(&cmd->cevent);
6455     + if (!atomic_read(&cmd->free))
6456     + cmd_checkin(cmd);
6457     + }
6458    
6459     if (likely(afu->afu_map)) {
6460     cxl_psa_unmap((void __iomem *)afu->afu_map);
6461     afu->afu_map = NULL;
6462     }
6463     + kref_put(&afu->mapcount, afu_unmap);
6464     }
6465     }
6466    
6467     @@ -731,8 +764,8 @@ static void cxlflash_remove(struct pci_dev *pdev)
6468     scsi_remove_host(cfg->host);
6469     /* fall through */
6470     case INIT_STATE_AFU:
6471     - term_afu(cfg);
6472     cancel_work_sync(&cfg->work_q);
6473     + term_afu(cfg);
6474     case INIT_STATE_PCI:
6475     pci_release_regions(cfg->dev);
6476     pci_disable_device(pdev);
6477     @@ -1108,7 +1141,7 @@ static const struct asyc_intr_info ainfo[] = {
6478     {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
6479     {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
6480     {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
6481     - {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
6482     + {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
6483     {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
6484     {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
6485     {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
6486     @@ -1316,6 +1349,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
6487     __func__, port);
6488     cfg->lr_state = LINK_RESET_REQUIRED;
6489     cfg->lr_port = port;
6490     + kref_get(&cfg->afu->mapcount);
6491     schedule_work(&cfg->work_q);
6492     }
6493    
6494     @@ -1336,6 +1370,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
6495    
6496     if (info->action & SCAN_HOST) {
6497     atomic_inc(&cfg->scan_host_needed);
6498     + kref_get(&cfg->afu->mapcount);
6499     schedule_work(&cfg->work_q);
6500     }
6501     }
6502     @@ -1731,6 +1766,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
6503     rc = -ENOMEM;
6504     goto err1;
6505     }
6506     + kref_init(&afu->mapcount);
6507    
6508     /* No byte reverse on reading afu_version or string will be backwards */
6509     reg = readq(&afu->afu_map->global.regs.afu_version);
6510     @@ -1765,8 +1801,7 @@ out:
6511     return rc;
6512    
6513     err2:
6514     - cxl_psa_unmap((void __iomem *)afu->afu_map);
6515     - afu->afu_map = NULL;
6516     + kref_put(&afu->mapcount, afu_unmap);
6517     err1:
6518     term_mc(cfg, UNDO_START);
6519     goto out;
6520     @@ -2114,6 +2149,16 @@ static ssize_t lun_mode_store(struct device *dev,
6521     rc = kstrtouint(buf, 10, &lun_mode);
6522     if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
6523     afu->internal_lun = lun_mode;
6524     +
6525     + /*
6526     + * When configured for internal LUN, there is only one channel,
6527     + * channel number 0, else there will be 2 (default).
6528     + */
6529     + if (afu->internal_lun)
6530     + shost->max_channel = 0;
6531     + else
6532     + shost->max_channel = NUM_FC_PORTS - 1;
6533     +
6534     afu_reset(cfg);
6535     scsi_scan_host(cfg->host);
6536     }
6537     @@ -2274,6 +2319,7 @@ static struct scsi_host_template driver_template = {
6538     * Device dependent values
6539     */
6540     static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
6541     +static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS };
6542    
6543     /*
6544     * PCI device binding table
6545     @@ -2281,6 +2327,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
6546     static struct pci_device_id cxlflash_pci_table[] = {
6547     {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
6548     PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
6549     + {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
6550     + PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
6551     {}
6552     };
6553    
6554     @@ -2339,6 +2387,7 @@ static void cxlflash_worker_thread(struct work_struct *work)
6555    
6556     if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
6557     scsi_scan_host(cfg->host);
6558     + kref_put(&afu->mapcount, afu_unmap);
6559     }
6560    
6561     /**
6562     diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
6563     index 60324566c14f..3d2d606fafb3 100644
6564     --- a/drivers/scsi/cxlflash/main.h
6565     +++ b/drivers/scsi/cxlflash/main.h
6566     @@ -24,8 +24,8 @@
6567     #define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
6568     #define CXLFLASH_DRIVER_DATE "(August 13, 2015)"
6569    
6570     -#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
6571     -#define CXLFLASH_SUBS_DEV_ID 0x04F0
6572     +#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
6573     +#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
6574    
6575     /* Since there is only one target, make it 0 */
6576     #define CXLFLASH_TARGET 0
6577     diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
6578     index cac2e6a50efd..babe7ccc1777 100644
6579     --- a/drivers/scsi/cxlflash/superpipe.c
6580     +++ b/drivers/scsi/cxlflash/superpipe.c
6581     @@ -1380,7 +1380,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
6582     }
6583    
6584     ctxid = cxl_process_element(ctx);
6585     - if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
6586     + if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
6587     dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
6588     rc = -EPERM;
6589     goto err2;
6590     @@ -1508,7 +1508,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
6591     }
6592    
6593     ctxid = cxl_process_element(ctx);
6594     - if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
6595     + if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
6596     dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
6597     rc = -EPERM;
6598     goto err1;
6599     @@ -1590,6 +1590,13 @@ err1:
6600     * place at the same time and the failure was due to CXL services being
6601     * unable to keep up.
6602     *
6603     + * As this routine is called on ioctl context, it holds the ioctl r/w
6604     + * semaphore that is used to drain ioctls in recovery scenarios. The
6605     + * implementation to achieve the pacing described above (a local mutex)
6606     + * requires that the ioctl r/w semaphore be dropped and reacquired to
6607     + * avoid a 3-way deadlock when multiple process recoveries operate in
6608     + * parallel.
6609     + *
6610     * Because a user can detect an error condition before the kernel, it is
6611     * quite possible for this routine to act as the kernel's EEH detection
6612     * source (MMIO read of mbox_r). Because of this, there is a window of
6613     @@ -1617,9 +1624,17 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
6614     int rc = 0;
6615    
6616     atomic_inc(&cfg->recovery_threads);
6617     + up_read(&cfg->ioctl_rwsem);
6618     rc = mutex_lock_interruptible(mutex);
6619     + down_read(&cfg->ioctl_rwsem);
6620     if (rc)
6621     goto out;
6622     + rc = check_state(cfg);
6623     + if (rc) {
6624     + dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
6625     + rc = -ENODEV;
6626     + goto out;
6627     + }
6628    
6629     dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
6630     __func__, recover->reason, rctxid);
6631     diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
6632     index a53f583e2d7b..50f8e9300770 100644
6633     --- a/drivers/scsi/cxlflash/vlun.c
6634     +++ b/drivers/scsi/cxlflash/vlun.c
6635     @@ -1008,6 +1008,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
6636     virt->last_lba = last_lba;
6637     virt->rsrc_handle = rsrc_handle;
6638    
6639     + if (lli->port_sel == BOTH_PORTS)
6640     + virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
6641     out:
6642     if (likely(ctxi))
6643     put_context(ctxi);
6644     diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
6645     index b0e6fe46448d..80d3c740a8a8 100644
6646     --- a/drivers/scsi/lpfc/lpfc_crtn.h
6647     +++ b/drivers/scsi/lpfc/lpfc_crtn.h
6648     @@ -72,6 +72,7 @@ void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
6649     void lpfc_retry_pport_discovery(struct lpfc_hba *);
6650     void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
6651    
6652     +void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
6653     void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
6654     void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
6655     void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
6656     diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
6657     index b6fa257ea3e0..59ced8864b2f 100644
6658     --- a/drivers/scsi/lpfc/lpfc_els.c
6659     +++ b/drivers/scsi/lpfc/lpfc_els.c
6660     @@ -455,9 +455,9 @@ int
6661     lpfc_issue_reg_vfi(struct lpfc_vport *vport)
6662     {
6663     struct lpfc_hba *phba = vport->phba;
6664     - LPFC_MBOXQ_t *mboxq;
6665     + LPFC_MBOXQ_t *mboxq = NULL;
6666     struct lpfc_nodelist *ndlp;
6667     - struct lpfc_dmabuf *dmabuf;
6668     + struct lpfc_dmabuf *dmabuf = NULL;
6669     int rc = 0;
6670    
6671     /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
6672     @@ -471,25 +471,33 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
6673     }
6674     }
6675    
6676     - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6677     - if (!dmabuf) {
6678     + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6679     + if (!mboxq) {
6680     rc = -ENOMEM;
6681     goto fail;
6682     }
6683     - dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
6684     - if (!dmabuf->virt) {
6685     - rc = -ENOMEM;
6686     - goto fail_free_dmabuf;
6687     - }
6688    
6689     - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6690     - if (!mboxq) {
6691     - rc = -ENOMEM;
6692     - goto fail_free_coherent;
6693     + /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
6694     + if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
6695     + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6696     + if (!dmabuf) {
6697     + rc = -ENOMEM;
6698     + goto fail;
6699     + }
6700     + dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
6701     + if (!dmabuf->virt) {
6702     + rc = -ENOMEM;
6703     + goto fail;
6704     + }
6705     + memcpy(dmabuf->virt, &phba->fc_fabparam,
6706     + sizeof(struct serv_parm));
6707     }
6708     +
6709     vport->port_state = LPFC_FABRIC_CFG_LINK;
6710     - memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
6711     - lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
6712     + if (dmabuf)
6713     + lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
6714     + else
6715     + lpfc_reg_vfi(mboxq, vport, 0);
6716    
6717     mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
6718     mboxq->vport = vport;
6719     @@ -497,17 +505,19 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
6720     rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
6721     if (rc == MBX_NOT_FINISHED) {
6722     rc = -ENXIO;
6723     - goto fail_free_mbox;
6724     + goto fail;
6725     }
6726     return 0;
6727    
6728     -fail_free_mbox:
6729     - mempool_free(mboxq, phba->mbox_mem_pool);
6730     -fail_free_coherent:
6731     - lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
6732     -fail_free_dmabuf:
6733     - kfree(dmabuf);
6734     fail:
6735     + if (mboxq)
6736     + mempool_free(mboxq, phba->mbox_mem_pool);
6737     + if (dmabuf) {
6738     + if (dmabuf->virt)
6739     + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
6740     + kfree(dmabuf);
6741     + }
6742     +
6743     lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6744     lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6745     "0289 Issue Register VFI failed: Err %d\n", rc);
6746     @@ -711,9 +721,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6747     * For FC we need to do some special processing because of the SLI
6748     * Port's default settings of the Common Service Parameters.
6749     */
6750     - if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
6751     + if ((phba->sli_rev == LPFC_SLI_REV4) &&
6752     + (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
6753     /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
6754     - if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
6755     + if (fabric_param_changed)
6756     lpfc_unregister_fcf_prep(phba);
6757    
6758     /* This should just update the VFI CSPs*/
6759     @@ -824,13 +835,21 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6760    
6761     spin_lock_irq(shost->host_lock);
6762     vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6763     + vport->fc_flag |= FC_PT2PT;
6764     spin_unlock_irq(shost->host_lock);
6765    
6766     - phba->fc_edtov = FF_DEF_EDTOV;
6767     - phba->fc_ratov = FF_DEF_RATOV;
6768     + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
6769     + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
6770     + lpfc_unregister_fcf_prep(phba);
6771     +
6772     + spin_lock_irq(shost->host_lock);
6773     + vport->fc_flag &= ~FC_VFI_REGISTERED;
6774     + spin_unlock_irq(shost->host_lock);
6775     + phba->fc_topology_changed = 0;
6776     + }
6777     +
6778     rc = memcmp(&vport->fc_portname, &sp->portName,
6779     sizeof(vport->fc_portname));
6780     - memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
6781    
6782     if (rc >= 0) {
6783     /* This side will initiate the PLOGI */
6784     @@ -839,38 +858,14 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6785     spin_unlock_irq(shost->host_lock);
6786    
6787     /*
6788     - * N_Port ID cannot be 0, set our to LocalID the other
6789     - * side will be RemoteID.
6790     + * N_Port ID cannot be 0, set our Id to LocalID
6791     + * the other side will be RemoteID.
6792     */
6793    
6794     /* not equal */
6795     if (rc)
6796     vport->fc_myDID = PT2PT_LocalID;
6797    
6798     - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6799     - if (!mbox)
6800     - goto fail;
6801     -
6802     - lpfc_config_link(phba, mbox);
6803     -
6804     - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6805     - mbox->vport = vport;
6806     - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6807     - if (rc == MBX_NOT_FINISHED) {
6808     - mempool_free(mbox, phba->mbox_mem_pool);
6809     - goto fail;
6810     - }
6811     -
6812     - /*
6813     - * For SLI4, the VFI/VPI are registered AFTER the
6814     - * Nport with the higher WWPN sends the PLOGI with
6815     - * an assigned NPortId.
6816     - */
6817     -
6818     - /* not equal */
6819     - if ((phba->sli_rev == LPFC_SLI_REV4) && rc)
6820     - lpfc_issue_reg_vfi(vport);
6821     -
6822     /* Decrement ndlp reference count indicating that ndlp can be
6823     * safely released when other references to it are done.
6824     */
6825     @@ -912,29 +907,20 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6826     /* If we are pt2pt with another NPort, force NPIV off! */
6827     phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
6828    
6829     - spin_lock_irq(shost->host_lock);
6830     - vport->fc_flag |= FC_PT2PT;
6831     - spin_unlock_irq(shost->host_lock);
6832     - /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
6833     - if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
6834     - lpfc_unregister_fcf_prep(phba);
6835     + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6836     + if (!mbox)
6837     + goto fail;
6838    
6839     - /* The FC_VFI_REGISTERED flag will get clear in the cmpl
6840     - * handler for unreg_vfi, but if we don't force the
6841     - * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
6842     - * built with the update bit set instead of just the vp bit to
6843     - * change the Nport ID. We need to have the vp set and the
6844     - * Upd cleared on topology changes.
6845     - */
6846     - spin_lock_irq(shost->host_lock);
6847     - vport->fc_flag &= ~FC_VFI_REGISTERED;
6848     - spin_unlock_irq(shost->host_lock);
6849     - phba->fc_topology_changed = 0;
6850     - lpfc_issue_reg_vfi(vport);
6851     + lpfc_config_link(phba, mbox);
6852     +
6853     + mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
6854     + mbox->vport = vport;
6855     + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6856     + if (rc == MBX_NOT_FINISHED) {
6857     + mempool_free(mbox, phba->mbox_mem_pool);
6858     + goto fail;
6859     }
6860    
6861     - /* Start discovery - this should just do CLEAR_LA */
6862     - lpfc_disc_start(vport);
6863     return 0;
6864     fail:
6865     return -ENXIO;
6866     @@ -1157,6 +1143,7 @@ flogifail:
6867     spin_lock_irq(&phba->hbalock);
6868     phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
6869     spin_unlock_irq(&phba->hbalock);
6870     +
6871     lpfc_nlp_put(ndlp);
6872    
6873     if (!lpfc_error_lost_link(irsp)) {
6874     @@ -3792,14 +3779,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6875     lpfc_nlp_set_state(vport, ndlp,
6876     NLP_STE_REG_LOGIN_ISSUE);
6877     }
6878     +
6879     + ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
6880     if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6881     != MBX_NOT_FINISHED)
6882     goto out;
6883     - else
6884     - /* Decrement the ndlp reference count we
6885     - * set for this failed mailbox command.
6886     - */
6887     - lpfc_nlp_put(ndlp);
6888     +
6889     + /* Decrement the ndlp reference count we
6890     + * set for this failed mailbox command.
6891     + */
6892     + lpfc_nlp_put(ndlp);
6893     + ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
6894    
6895     /* ELS rsp: Cannot issue reg_login for <NPortid> */
6896     lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6897     @@ -3856,6 +3846,7 @@ out:
6898     * the routine lpfc_els_free_iocb.
6899     */
6900     cmdiocb->context1 = NULL;
6901     +
6902     }
6903    
6904     lpfc_els_free_iocb(phba, cmdiocb);
6905     @@ -3898,6 +3889,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
6906     IOCB_t *oldcmd;
6907     struct lpfc_iocbq *elsiocb;
6908     uint8_t *pcmd;
6909     + struct serv_parm *sp;
6910     uint16_t cmdsize;
6911     int rc;
6912     ELS_PKT *els_pkt_ptr;
6913     @@ -3927,6 +3919,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
6914     "Issue ACC: did:x%x flg:x%x",
6915     ndlp->nlp_DID, ndlp->nlp_flag, 0);
6916     break;
6917     + case ELS_CMD_FLOGI:
6918     case ELS_CMD_PLOGI:
6919     cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
6920     elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
6921     @@ -3944,10 +3937,34 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
6922    
6923     *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6924     pcmd += sizeof(uint32_t);
6925     - memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
6926     + sp = (struct serv_parm *)pcmd;
6927     +
6928     + if (flag == ELS_CMD_FLOGI) {
6929     + /* Copy the received service parameters back */
6930     + memcpy(sp, &phba->fc_fabparam,
6931     + sizeof(struct serv_parm));
6932     +
6933     + /* Clear the F_Port bit */
6934     + sp->cmn.fPort = 0;
6935     +
6936     + /* Mark all class service parameters as invalid */
6937     + sp->cls1.classValid = 0;
6938     + sp->cls2.classValid = 0;
6939     + sp->cls3.classValid = 0;
6940     + sp->cls4.classValid = 0;
6941     +
6942     + /* Copy our worldwide names */
6943     + memcpy(&sp->portName, &vport->fc_sparam.portName,
6944     + sizeof(struct lpfc_name));
6945     + memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
6946     + sizeof(struct lpfc_name));
6947     + } else {
6948     + memcpy(pcmd, &vport->fc_sparam,
6949     + sizeof(struct serv_parm));
6950     + }
6951    
6952     lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
6953     - "Issue ACC PLOGI: did:x%x flg:x%x",
6954     + "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
6955     ndlp->nlp_DID, ndlp->nlp_flag, 0);
6956     break;
6957     case ELS_CMD_PRLO:
6958     @@ -4681,28 +4698,25 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
6959    
6960     desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
6961    
6962     - switch (phba->sli4_hba.link_state.speed) {
6963     - case LPFC_FC_LA_SPEED_1G:
6964     + switch (phba->fc_linkspeed) {
6965     + case LPFC_LINK_SPEED_1GHZ:
6966     rdp_speed = RDP_PS_1GB;
6967     break;
6968     - case LPFC_FC_LA_SPEED_2G:
6969     + case LPFC_LINK_SPEED_2GHZ:
6970     rdp_speed = RDP_PS_2GB;
6971     break;
6972     - case LPFC_FC_LA_SPEED_4G:
6973     + case LPFC_LINK_SPEED_4GHZ:
6974     rdp_speed = RDP_PS_4GB;
6975     break;
6976     - case LPFC_FC_LA_SPEED_8G:
6977     + case LPFC_LINK_SPEED_8GHZ:
6978     rdp_speed = RDP_PS_8GB;
6979     break;
6980     - case LPFC_FC_LA_SPEED_10G:
6981     + case LPFC_LINK_SPEED_10GHZ:
6982     rdp_speed = RDP_PS_10GB;
6983     break;
6984     - case LPFC_FC_LA_SPEED_16G:
6985     + case LPFC_LINK_SPEED_16GHZ:
6986     rdp_speed = RDP_PS_16GB;
6987     break;
6988     - case LPFC_FC_LA_SPEED_32G:
6989     - rdp_speed = RDP_PS_32GB;
6990     - break;
6991     default:
6992     rdp_speed = RDP_PS_UNKNOWN;
6993     break;
6994     @@ -5739,7 +5753,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6995     IOCB_t *icmd = &cmdiocb->iocb;
6996     struct serv_parm *sp;
6997     LPFC_MBOXQ_t *mbox;
6998     - struct ls_rjt stat;
6999     uint32_t cmd, did;
7000     int rc;
7001     uint32_t fc_flag = 0;
7002     @@ -5765,135 +5778,92 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7003     return 1;
7004     }
7005    
7006     - if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
7007     - /* For a FLOGI we accept, then if our portname is greater
7008     - * then the remote portname we initiate Nport login.
7009     - */
7010     + (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
7011    
7012     - rc = memcmp(&vport->fc_portname, &sp->portName,
7013     - sizeof(struct lpfc_name));
7014    
7015     - if (!rc) {
7016     - if (phba->sli_rev < LPFC_SLI_REV4) {
7017     - mbox = mempool_alloc(phba->mbox_mem_pool,
7018     - GFP_KERNEL);
7019     - if (!mbox)
7020     - return 1;
7021     - lpfc_linkdown(phba);
7022     - lpfc_init_link(phba, mbox,
7023     - phba->cfg_topology,
7024     - phba->cfg_link_speed);
7025     - mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
7026     - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7027     - mbox->vport = vport;
7028     - rc = lpfc_sli_issue_mbox(phba, mbox,
7029     - MBX_NOWAIT);
7030     - lpfc_set_loopback_flag(phba);
7031     - if (rc == MBX_NOT_FINISHED)
7032     - mempool_free(mbox, phba->mbox_mem_pool);
7033     - return 1;
7034     - } else {
7035     - /* abort the flogi coming back to ourselves
7036     - * due to external loopback on the port.
7037     - */
7038     - lpfc_els_abort_flogi(phba);
7039     - return 0;
7040     - }
7041     - } else if (rc > 0) { /* greater than */
7042     - spin_lock_irq(shost->host_lock);
7043     - vport->fc_flag |= FC_PT2PT_PLOGI;
7044     - spin_unlock_irq(shost->host_lock);
7045     + /*
7046     + * If our portname is greater than the remote portname,
7047     + * then we initiate Nport login.
7048     + */
7049    
7050     - /* If we have the high WWPN we can assign our own
7051     - * myDID; otherwise, we have to WAIT for a PLOGI
7052     - * from the remote NPort to find out what it
7053     - * will be.
7054     - */
7055     - vport->fc_myDID = PT2PT_LocalID;
7056     - } else
7057     - vport->fc_myDID = PT2PT_RemoteID;
7058     + rc = memcmp(&vport->fc_portname, &sp->portName,
7059     + sizeof(struct lpfc_name));
7060    
7061     - /*
7062     - * The vport state should go to LPFC_FLOGI only
7063     - * AFTER we issue a FLOGI, not receive one.
7064     + if (!rc) {
7065     + if (phba->sli_rev < LPFC_SLI_REV4) {
7066     + mbox = mempool_alloc(phba->mbox_mem_pool,
7067     + GFP_KERNEL);
7068     + if (!mbox)
7069     + return 1;
7070     + lpfc_linkdown(phba);
7071     + lpfc_init_link(phba, mbox,
7072     + phba->cfg_topology,
7073     + phba->cfg_link_speed);
7074     + mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
7075     + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7076     + mbox->vport = vport;
7077     + rc = lpfc_sli_issue_mbox(phba, mbox,
7078     + MBX_NOWAIT);
7079     + lpfc_set_loopback_flag(phba);
7080     + if (rc == MBX_NOT_FINISHED)
7081     + mempool_free(mbox, phba->mbox_mem_pool);
7082     + return 1;
7083     + }
7084     +
7085     + /* abort the flogi coming back to ourselves
7086     + * due to external loopback on the port.
7087     */
7088     + lpfc_els_abort_flogi(phba);
7089     + return 0;
7090     +
7091     + } else if (rc > 0) { /* greater than */
7092     spin_lock_irq(shost->host_lock);
7093     - fc_flag = vport->fc_flag;
7094     - port_state = vport->port_state;
7095     - vport->fc_flag |= FC_PT2PT;
7096     - vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
7097     + vport->fc_flag |= FC_PT2PT_PLOGI;
7098     spin_unlock_irq(shost->host_lock);
7099     - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7100     - "3311 Rcv Flogi PS x%x new PS x%x "
7101     - "fc_flag x%x new fc_flag x%x\n",
7102     - port_state, vport->port_state,
7103     - fc_flag, vport->fc_flag);
7104    
7105     - /*
7106     - * We temporarily set fc_myDID to make it look like we are
7107     - * a Fabric. This is done just so we end up with the right
7108     - * did / sid on the FLOGI ACC rsp.
7109     + /* If we have the high WWPN we can assign our own
7110     + * myDID; otherwise, we have to WAIT for a PLOGI
7111     + * from the remote NPort to find out what it
7112     + * will be.
7113     */
7114     - did = vport->fc_myDID;
7115     - vport->fc_myDID = Fabric_DID;
7116     -
7117     + vport->fc_myDID = PT2PT_LocalID;
7118     } else {
7119     - /* Reject this request because invalid parameters */
7120     - stat.un.b.lsRjtRsvd0 = 0;
7121     - stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7122     - stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
7123     - stat.un.b.vendorUnique = 0;
7124     -
7125     - /*
7126     - * We temporarily set fc_myDID to make it look like we are
7127     - * a Fabric. This is done just so we end up with the right
7128     - * did / sid on the FLOGI LS_RJT rsp.
7129     - */
7130     - did = vport->fc_myDID;
7131     - vport->fc_myDID = Fabric_DID;
7132     -
7133     - lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
7134     - NULL);
7135     + vport->fc_myDID = PT2PT_RemoteID;
7136     + }
7137    
7138     - /* Now lets put fc_myDID back to what its supposed to be */
7139     - vport->fc_myDID = did;
7140     + /*
7141     + * The vport state should go to LPFC_FLOGI only
7142     + * AFTER we issue a FLOGI, not receive one.
7143     + */
7144     + spin_lock_irq(shost->host_lock);
7145     + fc_flag = vport->fc_flag;
7146     + port_state = vport->port_state;
7147     + vport->fc_flag |= FC_PT2PT;
7148     + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
7149     + spin_unlock_irq(shost->host_lock);
7150     + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7151     + "3311 Rcv Flogi PS x%x new PS x%x "
7152     + "fc_flag x%x new fc_flag x%x\n",
7153     + port_state, vport->port_state,
7154     + fc_flag, vport->fc_flag);
7155    
7156     - return 1;
7157     - }
7158     + /*
7159     + * We temporarily set fc_myDID to make it look like we are
7160     + * a Fabric. This is done just so we end up with the right
7161     + * did / sid on the FLOGI ACC rsp.
7162     + */
7163     + did = vport->fc_myDID;
7164     + vport->fc_myDID = Fabric_DID;
7165    
7166     - /* send our FLOGI first */
7167     - if (vport->port_state < LPFC_FLOGI) {
7168     - vport->fc_myDID = 0;
7169     - lpfc_initial_flogi(vport);
7170     - vport->fc_myDID = Fabric_DID;
7171     - }
7172     + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
7173    
7174     /* Send back ACC */
7175     - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
7176     + lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
7177    
7178     /* Now lets put fc_myDID back to what its supposed to be */
7179     vport->fc_myDID = did;
7180    
7181     - if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
7182     -
7183     - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7184     - if (!mbox)
7185     - goto fail;
7186     -
7187     - lpfc_config_link(phba, mbox);
7188     -
7189     - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7190     - mbox->vport = vport;
7191     - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7192     - if (rc == MBX_NOT_FINISHED) {
7193     - mempool_free(mbox, phba->mbox_mem_pool);
7194     - goto fail;
7195     - }
7196     - }
7197     -
7198     return 0;
7199     -fail:
7200     - return 1;
7201     }
7202    
7203     /**
7204     @@ -7345,7 +7315,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7205    
7206     /* reject till our FLOGI completes */
7207     if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
7208     - (cmd != ELS_CMD_FLOGI)) {
7209     + (cmd != ELS_CMD_FLOGI)) {
7210     rjt_err = LSRJT_UNABLE_TPC;
7211     rjt_exp = LSEXP_NOTHING_MORE;
7212     goto lsrjt;
7213     @@ -7381,6 +7351,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7214     rjt_exp = LSEXP_NOTHING_MORE;
7215     break;
7216     }
7217     +
7218     if (vport->port_state < LPFC_DISC_AUTH) {
7219     if (!(phba->pport->fc_flag & FC_PT2PT) ||
7220     (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
7221     diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
7222     index bfc2442dd74a..d3668aa555d5 100644
7223     --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
7224     +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
7225     @@ -1083,7 +1083,7 @@ out:
7226     }
7227    
7228    
7229     -static void
7230     +void
7231     lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7232     {
7233     struct lpfc_vport *vport = pmb->vport;
7234     @@ -1113,8 +1113,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7235     /* Start discovery by sending a FLOGI. port_state is identically
7236     * LPFC_FLOGI while waiting for FLOGI cmpl
7237     */
7238     - if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
7239     + if (vport->port_state != LPFC_FLOGI)
7240     lpfc_initial_flogi(vport);
7241     + else if (vport->fc_flag & FC_PT2PT)
7242     + lpfc_disc_start(vport);
7243     return;
7244    
7245     out:
7246     @@ -2963,8 +2965,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7247    
7248     out_free_mem:
7249     mempool_free(mboxq, phba->mbox_mem_pool);
7250     - lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
7251     - kfree(dmabuf);
7252     + if (dmabuf) {
7253     + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
7254     + kfree(dmabuf);
7255     + }
7256     return;
7257     }
7258    
7259     @@ -3448,10 +3452,10 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7260     spin_lock_irq(shost->host_lock);
7261     ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
7262     spin_unlock_irq(shost->host_lock);
7263     - } else
7264     - /* Good status, call state machine */
7265     - lpfc_disc_state_machine(vport, ndlp, pmb,
7266     - NLP_EVT_CMPL_REG_LOGIN);
7267     + }
7268     +
7269     + /* Call state machine */
7270     + lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
7271    
7272     lpfc_mbuf_free(phba, mp->virt, mp->phys);
7273     kfree(mp);
7274     diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
7275     index b0d92b84bcdc..c14ab6c3ae40 100644
7276     --- a/drivers/scsi/lpfc/lpfc_init.c
7277     +++ b/drivers/scsi/lpfc/lpfc_init.c
7278     @@ -8834,9 +8834,12 @@ found:
7279     * already mapped to this phys_id.
7280     */
7281     if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
7282     - chann[saved_chann] =
7283     - cpup->channel_id;
7284     - saved_chann++;
7285     + if (saved_chann <=
7286     + LPFC_FCP_IO_CHAN_MAX) {
7287     + chann[saved_chann] =
7288     + cpup->channel_id;
7289     + saved_chann++;
7290     + }
7291     goto out;
7292     }
7293    
7294     diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
7295     index f87f90e9b7df..1e34b5408a29 100644
7296     --- a/drivers/scsi/lpfc/lpfc_mbox.c
7297     +++ b/drivers/scsi/lpfc/lpfc_mbox.c
7298     @@ -2145,10 +2145,12 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
7299     reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
7300     reg_vfi->e_d_tov = phba->fc_edtov;
7301     reg_vfi->r_a_tov = phba->fc_ratov;
7302     - reg_vfi->bde.addrHigh = putPaddrHigh(phys);
7303     - reg_vfi->bde.addrLow = putPaddrLow(phys);
7304     - reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
7305     - reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
7306     + if (phys) {
7307     + reg_vfi->bde.addrHigh = putPaddrHigh(phys);
7308     + reg_vfi->bde.addrLow = putPaddrLow(phys);
7309     + reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
7310     + reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
7311     + }
7312     bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
7313    
7314     /* Only FC supports upd bit */
7315     diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
7316     index ed9a2c80c4aa..193733e8c823 100644
7317     --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
7318     +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
7319     @@ -280,38 +280,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7320     uint32_t *lp;
7321     IOCB_t *icmd;
7322     struct serv_parm *sp;
7323     + uint32_t ed_tov;
7324     LPFC_MBOXQ_t *mbox;
7325     struct ls_rjt stat;
7326     int rc;
7327    
7328     memset(&stat, 0, sizeof (struct ls_rjt));
7329     - if (vport->port_state <= LPFC_FDISC) {
7330     - /* Before responding to PLOGI, check for pt2pt mode.
7331     - * If we are pt2pt, with an outstanding FLOGI, abort
7332     - * the FLOGI and resend it first.
7333     - */
7334     - if (vport->fc_flag & FC_PT2PT) {
7335     - lpfc_els_abort_flogi(phba);
7336     - if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
7337     - /* If the other side is supposed to initiate
7338     - * the PLOGI anyway, just ACC it now and
7339     - * move on with discovery.
7340     - */
7341     - phba->fc_edtov = FF_DEF_EDTOV;
7342     - phba->fc_ratov = FF_DEF_RATOV;
7343     - /* Start discovery - this should just do
7344     - CLEAR_LA */
7345     - lpfc_disc_start(vport);
7346     - } else
7347     - lpfc_initial_flogi(vport);
7348     - } else {
7349     - stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
7350     - stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
7351     - lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
7352     - ndlp, NULL);
7353     - return 0;
7354     - }
7355     - }
7356     pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7357     lp = (uint32_t *) pcmd->virt;
7358     sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
7359     @@ -404,30 +378,46 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7360     /* Check for Nport to NPort pt2pt protocol */
7361     if ((vport->fc_flag & FC_PT2PT) &&
7362     !(vport->fc_flag & FC_PT2PT_PLOGI)) {
7363     -
7364     /* rcv'ed PLOGI decides what our NPortId will be */
7365     vport->fc_myDID = icmd->un.rcvels.parmRo;
7366     - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7367     - if (mbox == NULL)
7368     - goto out;
7369     - lpfc_config_link(phba, mbox);
7370     - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7371     - mbox->vport = vport;
7372     - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7373     - if (rc == MBX_NOT_FINISHED) {
7374     - mempool_free(mbox, phba->mbox_mem_pool);
7375     - goto out;
7376     +
7377     + ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
7378     + if (sp->cmn.edtovResolution) {
7379     + /* E_D_TOV ticks are in nanoseconds */
7380     + ed_tov = (phba->fc_edtov + 999999) / 1000000;
7381     }
7382     +
7383     /*
7384     - * For SLI4, the VFI/VPI are registered AFTER the
7385     - * Nport with the higher WWPN sends us a PLOGI with
7386     - * our assigned NPortId.
7387     + * For pt-to-pt, use the larger EDTOV
7388     + * RATOV = 2 * EDTOV
7389     */
7390     + if (ed_tov > phba->fc_edtov)
7391     + phba->fc_edtov = ed_tov;
7392     + phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
7393     +
7394     + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
7395     +
7396     + /* Issue config_link / reg_vfi to account for updated TOV's */
7397     +
7398     if (phba->sli_rev == LPFC_SLI_REV4)
7399     lpfc_issue_reg_vfi(vport);
7400     + else {
7401     + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7402     + if (mbox == NULL)
7403     + goto out;
7404     + lpfc_config_link(phba, mbox);
7405     + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7406     + mbox->vport = vport;
7407     + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7408     + if (rc == MBX_NOT_FINISHED) {
7409     + mempool_free(mbox, phba->mbox_mem_pool);
7410     + goto out;
7411     + }
7412     + }
7413    
7414     lpfc_can_disctmo(vport);
7415     }
7416     +
7417     mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7418     if (!mbox)
7419     goto out;
7420     @@ -1038,7 +1028,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
7421     uint32_t *lp;
7422     IOCB_t *irsp;
7423     struct serv_parm *sp;
7424     + uint32_t ed_tov;
7425     LPFC_MBOXQ_t *mbox;
7426     + int rc;
7427    
7428     cmdiocb = (struct lpfc_iocbq *) arg;
7429     rspiocb = cmdiocb->context_un.rsp_iocb;
7430     @@ -1094,18 +1086,63 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
7431     ndlp->nlp_maxframe =
7432     ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
7433    
7434     + if ((vport->fc_flag & FC_PT2PT) &&
7435     + (vport->fc_flag & FC_PT2PT_PLOGI)) {
7436     + ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
7437     + if (sp->cmn.edtovResolution) {
7438     + /* E_D_TOV ticks are in nanoseconds */
7439     + ed_tov = (phba->fc_edtov + 999999) / 1000000;
7440     + }
7441     +
7442     + /*
7443     + * Use the larger EDTOV
7444     + * RATOV = 2 * EDTOV for pt-to-pt
7445     + */
7446     + if (ed_tov > phba->fc_edtov)
7447     + phba->fc_edtov = ed_tov;
7448     + phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
7449     +
7450     + memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
7451     +
7452     + /* Issue config_link / reg_vfi to account for updated TOV's */
7453     + if (phba->sli_rev == LPFC_SLI_REV4) {
7454     + lpfc_issue_reg_vfi(vport);
7455     + } else {
7456     + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7457     + if (!mbox) {
7458     + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7459     + "0133 PLOGI: no memory "
7460     + "for config_link "
7461     + "Data: x%x x%x x%x x%x\n",
7462     + ndlp->nlp_DID, ndlp->nlp_state,
7463     + ndlp->nlp_flag, ndlp->nlp_rpi);
7464     + goto out;
7465     + }
7466     +
7467     + lpfc_config_link(phba, mbox);
7468     +
7469     + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7470     + mbox->vport = vport;
7471     + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7472     + if (rc == MBX_NOT_FINISHED) {
7473     + mempool_free(mbox, phba->mbox_mem_pool);
7474     + goto out;
7475     + }
7476     + }
7477     + }
7478     +
7479     + lpfc_unreg_rpi(vport, ndlp);
7480     +
7481     mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7482     if (!mbox) {
7483     lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7484     - "0133 PLOGI: no memory for reg_login "
7485     - "Data: x%x x%x x%x x%x\n",
7486     - ndlp->nlp_DID, ndlp->nlp_state,
7487     - ndlp->nlp_flag, ndlp->nlp_rpi);
7488     + "0018 PLOGI: no memory for reg_login "
7489     + "Data: x%x x%x x%x x%x\n",
7490     + ndlp->nlp_DID, ndlp->nlp_state,
7491     + ndlp->nlp_flag, ndlp->nlp_rpi);
7492     goto out;
7493     }
7494    
7495     - lpfc_unreg_rpi(vport, ndlp);
7496     -
7497     if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
7498     (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
7499     switch (ndlp->nlp_DID) {
7500     @@ -2299,6 +2336,9 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
7501     if (vport->phba->sli_rev < LPFC_SLI_REV4)
7502     ndlp->nlp_rpi = mb->un.varWords[0];
7503     ndlp->nlp_flag |= NLP_RPI_REGISTERED;
7504     + if (ndlp->nlp_flag & NLP_LOGO_ACC) {
7505     + lpfc_unreg_rpi(vport, ndlp);
7506     + }
7507     } else {
7508     if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
7509     lpfc_drop_node(vport, ndlp);
7510     diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
7511     index 9e165bc05ee1..bae36cc3740b 100644
7512     --- a/drivers/scsi/lpfc/lpfc_scsi.c
7513     +++ b/drivers/scsi/lpfc/lpfc_scsi.c
7514     @@ -3908,9 +3908,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
7515     uint32_t logit = LOG_FCP;
7516    
7517     /* Sanity check on return of outstanding command */
7518     - if (!(lpfc_cmd->pCmd))
7519     - return;
7520     cmd = lpfc_cmd->pCmd;
7521     + if (!cmd)
7522     + return;
7523     shost = cmd->device->host;
7524    
7525     lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
7526     diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
7527     index f9585cdd8933..92dfd6a5178c 100644
7528     --- a/drivers/scsi/lpfc/lpfc_sli.c
7529     +++ b/drivers/scsi/lpfc/lpfc_sli.c
7530     @@ -14842,10 +14842,12 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
7531     struct lpfc_dmabuf *h_buf;
7532     struct hbq_dmabuf *seq_dmabuf = NULL;
7533     struct hbq_dmabuf *temp_dmabuf = NULL;
7534     + uint8_t found = 0;
7535    
7536     INIT_LIST_HEAD(&dmabuf->dbuf.list);
7537     dmabuf->time_stamp = jiffies;
7538     new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
7539     +
7540     /* Use the hdr_buf to find the sequence that this frame belongs to */
7541     list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
7542     temp_hdr = (struct fc_frame_header *)h_buf->virt;
7543     @@ -14885,7 +14887,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
7544     return seq_dmabuf;
7545     }
7546     /* find the correct place in the sequence to insert this frame */
7547     - list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
7548     + d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
7549     + while (!found) {
7550     temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
7551     temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
7552     /*
7553     @@ -14895,9 +14898,17 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
7554     if (be16_to_cpu(new_hdr->fh_seq_cnt) >
7555     be16_to_cpu(temp_hdr->fh_seq_cnt)) {
7556     list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
7557     - return seq_dmabuf;
7558     + found = 1;
7559     + break;
7560     }
7561     +
7562     + if (&d_buf->list == &seq_dmabuf->dbuf.list)
7563     + break;
7564     + d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
7565     }
7566     +
7567     + if (found)
7568     + return seq_dmabuf;
7569     return NULL;
7570     }
7571    
7572     @@ -16173,7 +16184,7 @@ fail_fcf_read:
7573     }
7574    
7575     /**
7576     - * lpfc_check_next_fcf_pri
7577     + * lpfc_check_next_fcf_pri_level
7578     * phba pointer to the lpfc_hba struct for this port.
7579     * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
7580     * routine when the rr_bmask is empty. The FCF indecies are put into the
7581     @@ -16329,8 +16340,12 @@ next_priority:
7582    
7583     if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
7584     phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
7585     - LPFC_FCF_FLOGI_FAILED)
7586     + LPFC_FCF_FLOGI_FAILED) {
7587     + if (list_is_singular(&phba->fcf.fcf_pri_list))
7588     + return LPFC_FCOE_FCF_NEXT_NONE;
7589     +
7590     goto next_priority;
7591     + }
7592    
7593     lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
7594     "2845 Get next roundrobin failover FCF (x%x)\n",
7595     diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
7596     index c0f7c8ce54aa..ef4ff03242ea 100644
7597     --- a/drivers/scsi/megaraid/megaraid_sas.h
7598     +++ b/drivers/scsi/megaraid/megaraid_sas.h
7599     @@ -1083,6 +1083,8 @@ struct megasas_ctrl_info {
7600    
7601     #define VD_EXT_DEBUG 0
7602    
7603     +#define SCAN_PD_CHANNEL 0x1
7604     +#define SCAN_VD_CHANNEL 0x2
7605    
7606     enum MR_SCSI_CMD_TYPE {
7607     READ_WRITE_LDIO = 0,
7608     diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
7609     index e994ff944091..3f8d357b1bac 100644
7610     --- a/drivers/scsi/megaraid/megaraid_sas_base.c
7611     +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
7612     @@ -735,6 +735,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
7613     &(regs)->inbound_high_queue_port);
7614     writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
7615     &(regs)->inbound_low_queue_port);
7616     + mmiowb();
7617     spin_unlock_irqrestore(&instance->hba_lock, flags);
7618     }
7619    
7620     @@ -5476,7 +5477,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
7621     spin_lock_init(&instance->hba_lock);
7622     spin_lock_init(&instance->completion_lock);
7623    
7624     - mutex_init(&instance->aen_mutex);
7625     mutex_init(&instance->reset_mutex);
7626    
7627     /*
7628     @@ -6443,10 +6443,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7629     }
7630     spin_unlock_irqrestore(&instance->hba_lock, flags);
7631    
7632     - mutex_lock(&instance->aen_mutex);
7633     + mutex_lock(&instance->reset_mutex);
7634     error = megasas_register_aen(instance, aen.seq_num,
7635     aen.class_locale_word);
7636     - mutex_unlock(&instance->aen_mutex);
7637     + mutex_unlock(&instance->reset_mutex);
7638     return error;
7639     }
7640    
7641     @@ -6477,9 +6477,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7642     int i;
7643     int error = 0;
7644     compat_uptr_t ptr;
7645     - unsigned long local_raw_ptr;
7646     u32 local_sense_off;
7647     u32 local_sense_len;
7648     + u32 user_sense_off;
7649    
7650     if (clear_user(ioc, sizeof(*ioc)))
7651     return -EFAULT;
7652     @@ -6497,17 +6497,16 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7653     * sense_len is not null, so prepare the 64bit value under
7654     * the same condition.
7655     */
7656     - if (get_user(local_raw_ptr, ioc->frame.raw) ||
7657     - get_user(local_sense_off, &ioc->sense_off) ||
7658     - get_user(local_sense_len, &ioc->sense_len))
7659     + if (get_user(local_sense_off, &ioc->sense_off) ||
7660     + get_user(local_sense_len, &ioc->sense_len) ||
7661     + get_user(user_sense_off, &cioc->sense_off))
7662     return -EFAULT;
7663    
7664     -
7665     if (local_sense_len) {
7666     void __user **sense_ioc_ptr =
7667     - (void __user **)((u8*)local_raw_ptr + local_sense_off);
7668     + (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7669     compat_uptr_t *sense_cioc_ptr =
7670     - (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
7671     + (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7672     if (get_user(ptr, sense_cioc_ptr) ||
7673     put_user(compat_ptr(ptr), sense_ioc_ptr))
7674     return -EFAULT;
7675     @@ -6648,6 +6647,7 @@ megasas_aen_polling(struct work_struct *work)
7676     int i, j, doscan = 0;
7677     u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7678     int error;
7679     + u8 dcmd_ret = 0;
7680    
7681     if (!instance) {
7682     printk(KERN_ERR "invalid instance!\n");
7683     @@ -6660,16 +6660,7 @@ megasas_aen_polling(struct work_struct *work)
7684     wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7685    
7686     /* Don't run the event workqueue thread if OCR is running */
7687     - for (i = 0; i < wait_time; i++) {
7688     - if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
7689     - break;
7690     - if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7691     - dev_notice(&instance->pdev->dev, "%s waiting for "
7692     - "controller reset to finish for scsi%d\n",
7693     - __func__, instance->host->host_no);
7694     - }
7695     - msleep(1000);
7696     - }
7697     + mutex_lock(&instance->reset_mutex);
7698    
7699     instance->ev = NULL;
7700     host = instance->host;
7701     @@ -6677,212 +6668,127 @@ megasas_aen_polling(struct work_struct *work)
7702     megasas_decode_evt(instance);
7703    
7704     switch (le32_to_cpu(instance->evt_detail->code)) {
7705     - case MR_EVT_PD_INSERTED:
7706     - if (megasas_get_pd_list(instance) == 0) {
7707     - for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7708     - for (j = 0;
7709     - j < MEGASAS_MAX_DEV_PER_CHANNEL;
7710     - j++) {
7711     -
7712     - pd_index =
7713     - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7714     -
7715     - sdev1 = scsi_device_lookup(host, i, j, 0);
7716     -
7717     - if (instance->pd_list[pd_index].driveState
7718     - == MR_PD_STATE_SYSTEM) {
7719     - if (!sdev1)
7720     - scsi_add_device(host, i, j, 0);
7721     -
7722     - if (sdev1)
7723     - scsi_device_put(sdev1);
7724     - }
7725     - }
7726     - }
7727     - }
7728     - doscan = 0;
7729     - break;
7730    
7731     + case MR_EVT_PD_INSERTED:
7732     case MR_EVT_PD_REMOVED:
7733     - if (megasas_get_pd_list(instance) == 0) {
7734     - for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7735     - for (j = 0;
7736     - j < MEGASAS_MAX_DEV_PER_CHANNEL;
7737     - j++) {
7738     -
7739     - pd_index =
7740     - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7741     -
7742     - sdev1 = scsi_device_lookup(host, i, j, 0);
7743     -
7744     - if (instance->pd_list[pd_index].driveState
7745     - == MR_PD_STATE_SYSTEM) {
7746     - if (sdev1)
7747     - scsi_device_put(sdev1);
7748     - } else {
7749     - if (sdev1) {
7750     - scsi_remove_device(sdev1);
7751     - scsi_device_put(sdev1);
7752     - }
7753     - }
7754     - }
7755     - }
7756     - }
7757     - doscan = 0;
7758     + dcmd_ret = megasas_get_pd_list(instance);
7759     + if (dcmd_ret == 0)
7760     + doscan = SCAN_PD_CHANNEL;
7761     break;
7762    
7763     case MR_EVT_LD_OFFLINE:
7764     case MR_EVT_CFG_CLEARED:
7765     case MR_EVT_LD_DELETED:
7766     - if (!instance->requestorId ||
7767     - megasas_get_ld_vf_affiliation(instance, 0)) {
7768     - if (megasas_ld_list_query(instance,
7769     - MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
7770     - megasas_get_ld_list(instance);
7771     - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7772     - for (j = 0;
7773     - j < MEGASAS_MAX_DEV_PER_CHANNEL;
7774     - j++) {
7775     -
7776     - ld_index =
7777     - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7778     -
7779     - sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7780     -
7781     - if (instance->ld_ids[ld_index]
7782     - != 0xff) {
7783     - if (sdev1)
7784     - scsi_device_put(sdev1);
7785     - } else {
7786     - if (sdev1) {
7787     - scsi_remove_device(sdev1);
7788     - scsi_device_put(sdev1);
7789     - }
7790     - }
7791     - }
7792     - }
7793     - doscan = 0;
7794     - }
7795     - break;
7796     case MR_EVT_LD_CREATED:
7797     if (!instance->requestorId ||
7798     - megasas_get_ld_vf_affiliation(instance, 0)) {
7799     - if (megasas_ld_list_query(instance,
7800     - MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
7801     - megasas_get_ld_list(instance);
7802     - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7803     - for (j = 0;
7804     - j < MEGASAS_MAX_DEV_PER_CHANNEL;
7805     - j++) {
7806     - ld_index =
7807     - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7808     -
7809     - sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7810     -
7811     - if (instance->ld_ids[ld_index]
7812     - != 0xff) {
7813     - if (!sdev1)
7814     - scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7815     - }
7816     - if (sdev1)
7817     - scsi_device_put(sdev1);
7818     - }
7819     - }
7820     - doscan = 0;
7821     - }
7822     + (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7823     + dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7824     +
7825     + if (dcmd_ret == 0)
7826     + doscan = SCAN_VD_CHANNEL;
7827     +
7828     break;
7829     +
7830     case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7831     case MR_EVT_FOREIGN_CFG_IMPORTED:
7832     case MR_EVT_LD_STATE_CHANGE:
7833     - doscan = 1;
7834     + dcmd_ret = megasas_get_pd_list(instance);
7835     +
7836     + if (dcmd_ret != 0)
7837     + break;
7838     +
7839     + if (!instance->requestorId ||
7840     + (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7841     + dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7842     +
7843     + if (dcmd_ret != 0)
7844     + break;
7845     +
7846     + doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7847     + dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7848     + instance->host->host_no);
7849     break;
7850     +
7851     case MR_EVT_CTRL_PROP_CHANGED:
7852     - megasas_get_ctrl_info(instance);
7853     - break;
7854     + dcmd_ret = megasas_get_ctrl_info(instance);
7855     + break;
7856     default:
7857     doscan = 0;
7858     break;
7859     }
7860     } else {
7861     dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7862     + mutex_unlock(&instance->reset_mutex);
7863     kfree(ev);
7864     return;
7865     }
7866    
7867     - if (doscan) {
7868     - dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7869     - instance->host->host_no);
7870     - if (megasas_get_pd_list(instance) == 0) {
7871     - for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7872     - for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7873     - pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7874     - sdev1 = scsi_device_lookup(host, i, j, 0);
7875     - if (instance->pd_list[pd_index].driveState ==
7876     - MR_PD_STATE_SYSTEM) {
7877     - if (!sdev1) {
7878     - scsi_add_device(host, i, j, 0);
7879     - }
7880     - if (sdev1)
7881     - scsi_device_put(sdev1);
7882     - } else {
7883     - if (sdev1) {
7884     - scsi_remove_device(sdev1);
7885     - scsi_device_put(sdev1);
7886     - }
7887     + mutex_unlock(&instance->reset_mutex);
7888     +
7889     + if (doscan & SCAN_PD_CHANNEL) {
7890     + for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7891     + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7892     + pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7893     + sdev1 = scsi_device_lookup(host, i, j, 0);
7894     + if (instance->pd_list[pd_index].driveState ==
7895     + MR_PD_STATE_SYSTEM) {
7896     + if (!sdev1)
7897     + scsi_add_device(host, i, j, 0);
7898     + else
7899     + scsi_device_put(sdev1);
7900     + } else {
7901     + if (sdev1) {
7902     + scsi_remove_device(sdev1);
7903     + scsi_device_put(sdev1);
7904     }
7905     }
7906     }
7907     }
7908     + }
7909    
7910     - if (!instance->requestorId ||
7911     - megasas_get_ld_vf_affiliation(instance, 0)) {
7912     - if (megasas_ld_list_query(instance,
7913     - MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
7914     - megasas_get_ld_list(instance);
7915     - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7916     - for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
7917     - j++) {
7918     - ld_index =
7919     - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7920     -
7921     - sdev1 = scsi_device_lookup(host,
7922     - MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7923     - if (instance->ld_ids[ld_index]
7924     - != 0xff) {
7925     - if (!sdev1)
7926     - scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7927     - else
7928     - scsi_device_put(sdev1);
7929     - } else {
7930     - if (sdev1) {
7931     - scsi_remove_device(sdev1);
7932     - scsi_device_put(sdev1);
7933     - }
7934     + if (doscan & SCAN_VD_CHANNEL) {
7935     + for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7936     + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7937     + ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7938     + sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7939     + if (instance->ld_ids[ld_index] != 0xff) {
7940     + if (!sdev1)
7941     + scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7942     + else
7943     + scsi_device_put(sdev1);
7944     + } else {
7945     + if (sdev1) {
7946     + scsi_remove_device(sdev1);
7947     + scsi_device_put(sdev1);
7948     }
7949     }
7950     }
7951     }
7952     }
7953    
7954     - if (instance->aen_cmd != NULL) {
7955     - kfree(ev);
7956     - return ;
7957     - }
7958     -
7959     - seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7960     + if (dcmd_ret == 0)
7961     + seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7962     + else
7963     + seq_num = instance->last_seq_num;
7964    
7965     /* Register AEN with FW for latest sequence number plus 1 */
7966     class_locale.members.reserved = 0;
7967     class_locale.members.locale = MR_EVT_LOCALE_ALL;
7968     class_locale.members.class = MR_EVT_CLASS_DEBUG;
7969     - mutex_lock(&instance->aen_mutex);
7970     +
7971     + if (instance->aen_cmd != NULL) {
7972     + kfree(ev);
7973     + return;
7974     + }
7975     +
7976     + mutex_lock(&instance->reset_mutex);
7977     error = megasas_register_aen(instance, seq_num,
7978     class_locale.word);
7979     - mutex_unlock(&instance->aen_mutex);
7980     -
7981     if (error)
7982     - dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
7983     + dev_err(&instance->pdev->dev,
7984     + "register aen failed error %x\n", error);
7985    
7986     + mutex_unlock(&instance->reset_mutex);
7987     kfree(ev);
7988     }
7989    
7990     diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
7991     index 4f391e747be2..021b994fdae8 100644
7992     --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
7993     +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
7994     @@ -201,6 +201,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
7995     &instance->reg_set->inbound_low_queue_port);
7996     writel(le32_to_cpu(req_desc->u.high),
7997     &instance->reg_set->inbound_high_queue_port);
7998     + mmiowb();
7999     spin_unlock_irqrestore(&instance->hba_lock, flags);
8000     #endif
8001     }
8002     diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
8003     index 356233f86064..5b2c37f1e908 100644
8004     --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
8005     +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
8006     @@ -2020,8 +2020,10 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
8007     _base_free_irq(ioc);
8008     _base_disable_msix(ioc);
8009    
8010     - if (ioc->msix96_vector)
8011     + if (ioc->msix96_vector) {
8012     kfree(ioc->replyPostRegisterIndex);
8013     + ioc->replyPostRegisterIndex = NULL;
8014     + }
8015    
8016     if (ioc->chip_phys) {
8017     iounmap(ioc->chip);
8018     @@ -2240,6 +2242,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
8019     return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
8020     }
8021    
8022     +static inline u8
8023     +_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
8024     +{
8025     + return ioc->cpu_msix_table[raw_smp_processor_id()];
8026     +}
8027     +
8028     /**
8029     * mpt3sas_base_get_smid - obtain a free smid from internal queue
8030     * @ioc: per adapter object
8031     @@ -2300,6 +2308,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
8032     request->scmd = scmd;
8033     request->cb_idx = cb_idx;
8034     smid = request->smid;
8035     + request->msix_io = _base_get_msix_index(ioc);
8036     list_del(&request->tracker_list);
8037     spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8038     return smid;
8039     @@ -2422,12 +2431,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
8040     }
8041     #endif
8042    
8043     -static inline u8
8044     -_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
8045     -{
8046     - return ioc->cpu_msix_table[raw_smp_processor_id()];
8047     -}
8048     -
8049     /**
8050     * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
8051     * @ioc: per adapter object
8052     @@ -2481,18 +2484,19 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
8053     * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
8054     * @ioc: per adapter object
8055     * @smid: system request message index
8056     - *
8057     + * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
8058     * Return nothing.
8059     */
8060     void
8061     -mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
8062     +mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
8063     + u16 msix_task)
8064     {
8065     Mpi2RequestDescriptorUnion_t descriptor;
8066     u64 *request = (u64 *)&descriptor;
8067    
8068     descriptor.HighPriority.RequestFlags =
8069     MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
8070     - descriptor.HighPriority.MSIxIndex = 0;
8071     + descriptor.HighPriority.MSIxIndex = msix_task;
8072     descriptor.HighPriority.SMID = cpu_to_le16(smid);
8073     descriptor.HighPriority.LMID = 0;
8074     descriptor.HighPriority.Reserved1 = 0;
8075     diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
8076     index 5ad271efbd45..92648a5ea2d2 100644
8077     --- a/drivers/scsi/mpt3sas/mpt3sas_base.h
8078     +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
8079     @@ -643,6 +643,7 @@ struct chain_tracker {
8080     * @cb_idx: callback index
8081     * @direct_io: To indicate whether I/O is direct (WARPDRIVE)
8082     * @tracker_list: list of free request (ioc->free_list)
8083     + * @msix_io: IO's msix
8084     */
8085     struct scsiio_tracker {
8086     u16 smid;
8087     @@ -651,6 +652,7 @@ struct scsiio_tracker {
8088     u8 direct_io;
8089     struct list_head chain_list;
8090     struct list_head tracker_list;
8091     + u16 msix_io;
8092     };
8093    
8094     /**
8095     @@ -1213,7 +1215,8 @@ void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
8096     u16 handle);
8097     void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
8098     u16 handle);
8099     -void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
8100     +void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
8101     + u16 smid, u16 msix_task);
8102     void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
8103     void mpt3sas_base_initialize_callback_handler(void);
8104     u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
8105     diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
8106     index d8366b056b70..4ccde5a05b70 100644
8107     --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
8108     +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
8109     @@ -817,7 +817,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
8110     tm_request->DevHandle));
8111     ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
8112     data_in_dma, data_in_sz);
8113     - mpt3sas_base_put_smid_hi_priority(ioc, smid);
8114     + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
8115     break;
8116     }
8117     case MPI2_FUNCTION_SMP_PASSTHROUGH:
8118     diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
8119     index 9ab77b06434d..6180f7970bbf 100644
8120     --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
8121     +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
8122     @@ -2193,6 +2193,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
8123     unsigned long timeleft;
8124     struct scsiio_tracker *scsi_lookup = NULL;
8125     int rc;
8126     + u16 msix_task = 0;
8127    
8128     if (m_type == TM_MUTEX_ON)
8129     mutex_lock(&ioc->tm_cmds.mutex);
8130     @@ -2256,7 +2257,12 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
8131     int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
8132     mpt3sas_scsih_set_tm_flag(ioc, handle);
8133     init_completion(&ioc->tm_cmds.done);
8134     - mpt3sas_base_put_smid_hi_priority(ioc, smid);
8135     + if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
8136     + (scsi_lookup->msix_io < ioc->reply_queue_count))
8137     + msix_task = scsi_lookup->msix_io;
8138     + else
8139     + msix_task = 0;
8140     + mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
8141     timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
8142     if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
8143     pr_err(MPT3SAS_FMT "%s: timeout\n",
8144     @@ -3151,7 +3157,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8145     mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
8146     mpi_request->DevHandle = cpu_to_le16(handle);
8147     mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
8148     - mpt3sas_base_put_smid_hi_priority(ioc, smid);
8149     + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
8150     mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
8151    
8152     out:
8153     @@ -3332,7 +3338,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8154     mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
8155     mpi_request->DevHandle = cpu_to_le16(handle);
8156     mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
8157     - mpt3sas_base_put_smid_hi_priority(ioc, smid);
8158     + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
8159     }
8160    
8161     /**
8162     diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
8163     index 75514a15bea0..f57d96984ae4 100644
8164     --- a/drivers/scsi/qla2xxx/qla_target.c
8165     +++ b/drivers/scsi/qla2xxx/qla_target.c
8166     @@ -1578,7 +1578,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
8167     qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
8168     0, 0, 0, 0, 0, 0);
8169     else {
8170     - if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
8171     + if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
8172     qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
8173     mcmd->fc_tm_rsp, false);
8174     else
8175     diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
8176     index 9096d311e45d..c2d9b793759d 100644
8177     --- a/drivers/staging/lustre/lustre/llite/llite_internal.h
8178     +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
8179     @@ -631,8 +631,6 @@ struct ll_file_data {
8180    
8181     struct lov_stripe_md;
8182    
8183     -extern spinlock_t inode_lock;
8184     -
8185     extern struct dentry *llite_root;
8186     extern struct kset *llite_kset;
8187    
8188     diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
8189     index 29cfc57d496e..e4110d6de0b5 100644
8190     --- a/drivers/vhost/scsi.c
8191     +++ b/drivers/vhost/scsi.c
8192     @@ -88,7 +88,7 @@ struct vhost_scsi_cmd {
8193     struct scatterlist *tvc_prot_sgl;
8194     struct page **tvc_upages;
8195     /* Pointer to response header iovec */
8196     - struct iovec *tvc_resp_iov;
8197     + struct iovec tvc_resp_iov;
8198     /* Pointer to vhost_scsi for our device */
8199     struct vhost_scsi *tvc_vhost;
8200     /* Pointer to vhost_virtqueue for the cmd */
8201     @@ -557,7 +557,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
8202     memcpy(v_rsp.sense, cmd->tvc_sense_buf,
8203     se_cmd->scsi_sense_length);
8204    
8205     - iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
8206     + iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
8207     cmd->tvc_in_iovs, sizeof(v_rsp));
8208     ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
8209     if (likely(ret == sizeof(v_rsp))) {
8210     @@ -1054,7 +1054,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
8211     }
8212     cmd->tvc_vhost = vs;
8213     cmd->tvc_vq = vq;
8214     - cmd->tvc_resp_iov = &vq->iov[out];
8215     + cmd->tvc_resp_iov = vq->iov[out];
8216     cmd->tvc_in_iovs = in;
8217    
8218     pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
8219     diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
8220     index 5e5db3687e34..353f4bae658c 100644
8221     --- a/fs/btrfs/file.c
8222     +++ b/fs/btrfs/file.c
8223     @@ -1526,27 +1526,24 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
8224    
8225     reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
8226    
8227     - if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
8228     - BTRFS_INODE_PREALLOC)) {
8229     - ret = check_can_nocow(inode, pos, &write_bytes);
8230     - if (ret < 0)
8231     - break;
8232     - if (ret > 0) {
8233     - /*
8234     - * For nodata cow case, no need to reserve
8235     - * data space.
8236     - */
8237     - only_release_metadata = true;
8238     - /*
8239     - * our prealloc extent may be smaller than
8240     - * write_bytes, so scale down.
8241     - */
8242     - num_pages = DIV_ROUND_UP(write_bytes + offset,
8243     - PAGE_CACHE_SIZE);
8244     - reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
8245     - goto reserve_metadata;
8246     - }
8247     + if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
8248     + BTRFS_INODE_PREALLOC)) &&
8249     + check_can_nocow(inode, pos, &write_bytes) > 0) {
8250     + /*
8251     + * For nodata cow case, no need to reserve
8252     + * data space.
8253     + */
8254     + only_release_metadata = true;
8255     + /*
8256     + * our prealloc extent may be smaller than
8257     + * write_bytes, so scale down.
8258     + */
8259     + num_pages = DIV_ROUND_UP(write_bytes + offset,
8260     + PAGE_CACHE_SIZE);
8261     + reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
8262     + goto reserve_metadata;
8263     }
8264     +
8265     ret = btrfs_check_data_free_space(inode, pos, write_bytes);
8266     if (ret < 0)
8267     break;
8268     diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
8269     index 11309683d65f..27794b137b24 100644
8270     --- a/fs/ecryptfs/file.c
8271     +++ b/fs/ecryptfs/file.c
8272     @@ -112,7 +112,6 @@ static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
8273     .sb = inode->i_sb,
8274     };
8275     lower_file = ecryptfs_file_to_lower(file);
8276     - lower_file->f_pos = ctx->pos;
8277     rc = iterate_dir(lower_file, &buf.ctx);
8278     ctx->pos = buf.ctx.pos;
8279     if (rc < 0)
8280     @@ -236,14 +235,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
8281     }
8282     ecryptfs_set_file_lower(
8283     file, ecryptfs_inode_to_private(inode)->lower_file);
8284     - if (d_is_dir(ecryptfs_dentry)) {
8285     - ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
8286     - mutex_lock(&crypt_stat->cs_mutex);
8287     - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
8288     - mutex_unlock(&crypt_stat->cs_mutex);
8289     - rc = 0;
8290     - goto out;
8291     - }
8292     rc = read_or_initialize_metadata(ecryptfs_dentry);
8293     if (rc)
8294     goto out_put;
8295     @@ -260,6 +251,45 @@ out:
8296     return rc;
8297     }
8298    
8299     +/**
8300     + * ecryptfs_dir_open
8301     + * @inode: inode speciying file to open
8302     + * @file: Structure to return filled in
8303     + *
8304     + * Opens the file specified by inode.
8305     + *
8306     + * Returns zero on success; non-zero otherwise
8307     + */
8308     +static int ecryptfs_dir_open(struct inode *inode, struct file *file)
8309     +{
8310     + struct dentry *ecryptfs_dentry = file->f_path.dentry;
8311     + /* Private value of ecryptfs_dentry allocated in
8312     + * ecryptfs_lookup() */
8313     + struct ecryptfs_file_info *file_info;
8314     + struct file *lower_file;
8315     +
8316     + /* Released in ecryptfs_release or end of function if failure */
8317     + file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
8318     + ecryptfs_set_file_private(file, file_info);
8319     + if (unlikely(!file_info)) {
8320     + ecryptfs_printk(KERN_ERR,
8321     + "Error attempting to allocate memory\n");
8322     + return -ENOMEM;
8323     + }
8324     + lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
8325     + file->f_flags, current_cred());
8326     + if (IS_ERR(lower_file)) {
8327     + printk(KERN_ERR "%s: Error attempting to initialize "
8328     + "the lower file for the dentry with name "
8329     + "[%pd]; rc = [%ld]\n", __func__,
8330     + ecryptfs_dentry, PTR_ERR(lower_file));
8331     + kmem_cache_free(ecryptfs_file_info_cache, file_info);
8332     + return PTR_ERR(lower_file);
8333     + }
8334     + ecryptfs_set_file_lower(file, lower_file);
8335     + return 0;
8336     +}
8337     +
8338     static int ecryptfs_flush(struct file *file, fl_owner_t td)
8339     {
8340     struct file *lower_file = ecryptfs_file_to_lower(file);
8341     @@ -280,6 +310,19 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
8342     return 0;
8343     }
8344    
8345     +static int ecryptfs_dir_release(struct inode *inode, struct file *file)
8346     +{
8347     + fput(ecryptfs_file_to_lower(file));
8348     + kmem_cache_free(ecryptfs_file_info_cache,
8349     + ecryptfs_file_to_private(file));
8350     + return 0;
8351     +}
8352     +
8353     +static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
8354     +{
8355     + return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
8356     +}
8357     +
8358     static int
8359     ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8360     {
8361     @@ -359,20 +402,16 @@ const struct file_operations ecryptfs_dir_fops = {
8362     #ifdef CONFIG_COMPAT
8363     .compat_ioctl = ecryptfs_compat_ioctl,
8364     #endif
8365     - .open = ecryptfs_open,
8366     - .flush = ecryptfs_flush,
8367     - .release = ecryptfs_release,
8368     + .open = ecryptfs_dir_open,
8369     + .release = ecryptfs_dir_release,
8370     .fsync = ecryptfs_fsync,
8371     - .fasync = ecryptfs_fasync,
8372     - .splice_read = generic_file_splice_read,
8373     - .llseek = default_llseek,
8374     + .llseek = ecryptfs_dir_llseek,
8375     };
8376    
8377     const struct file_operations ecryptfs_main_fops = {
8378     .llseek = generic_file_llseek,
8379     .read_iter = ecryptfs_read_update_atime,
8380     .write_iter = generic_file_write_iter,
8381     - .iterate = ecryptfs_readdir,
8382     .unlocked_ioctl = ecryptfs_unlocked_ioctl,
8383     #ifdef CONFIG_COMPAT
8384     .compat_ioctl = ecryptfs_compat_ioctl,
8385     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
8386     index 9a5ad0f0d3ed..28702932a908 100644
8387     --- a/fs/ext4/inode.c
8388     +++ b/fs/ext4/inode.c
8389     @@ -51,25 +51,31 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
8390     struct ext4_inode_info *ei)
8391     {
8392     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
8393     - __u16 csum_lo;
8394     - __u16 csum_hi = 0;
8395     __u32 csum;
8396     + __u16 dummy_csum = 0;
8397     + int offset = offsetof(struct ext4_inode, i_checksum_lo);
8398     + unsigned int csum_size = sizeof(dummy_csum);
8399    
8400     - csum_lo = le16_to_cpu(raw->i_checksum_lo);
8401     - raw->i_checksum_lo = 0;
8402     - if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
8403     - EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
8404     - csum_hi = le16_to_cpu(raw->i_checksum_hi);
8405     - raw->i_checksum_hi = 0;
8406     - }
8407     + csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
8408     + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
8409     + offset += csum_size;
8410     + csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
8411     + EXT4_GOOD_OLD_INODE_SIZE - offset);
8412    
8413     - csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
8414     - EXT4_INODE_SIZE(inode->i_sb));
8415     -
8416     - raw->i_checksum_lo = cpu_to_le16(csum_lo);
8417     - if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
8418     - EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
8419     - raw->i_checksum_hi = cpu_to_le16(csum_hi);
8420     + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
8421     + offset = offsetof(struct ext4_inode, i_checksum_hi);
8422     + csum = ext4_chksum(sbi, csum, (__u8 *)raw +
8423     + EXT4_GOOD_OLD_INODE_SIZE,
8424     + offset - EXT4_GOOD_OLD_INODE_SIZE);
8425     + if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
8426     + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
8427     + csum_size);
8428     + offset += csum_size;
8429     + csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
8430     + EXT4_INODE_SIZE(inode->i_sb) -
8431     + offset);
8432     + }
8433     + }
8434    
8435     return csum;
8436     }
8437     @@ -5186,8 +5192,6 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
8438     sbi->s_want_extra_isize,
8439     iloc, handle);
8440     if (ret) {
8441     - ext4_set_inode_state(inode,
8442     - EXT4_STATE_NO_EXPAND);
8443     if (mnt_count !=
8444     le16_to_cpu(sbi->s_es->s_mnt_count)) {
8445     ext4_warning(inode->i_sb,
8446     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
8447     index 91bf36f22dbf..38eb0c8e43b9 100644
8448     --- a/fs/ext4/namei.c
8449     +++ b/fs/ext4/namei.c
8450     @@ -420,15 +420,14 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
8451     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
8452     struct ext4_inode_info *ei = EXT4_I(inode);
8453     __u32 csum;
8454     - __le32 save_csum;
8455     int size;
8456     + __u32 dummy_csum = 0;
8457     + int offset = offsetof(struct dx_tail, dt_checksum);
8458    
8459     size = count_offset + (count * sizeof(struct dx_entry));
8460     - save_csum = t->dt_checksum;
8461     - t->dt_checksum = 0;
8462     csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
8463     - csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
8464     - t->dt_checksum = save_csum;
8465     + csum = ext4_chksum(sbi, csum, (__u8 *)t, offset);
8466     + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
8467    
8468     return cpu_to_le32(csum);
8469     }
8470     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
8471     index c542ebcf7a92..5bab28caa9d4 100644
8472     --- a/fs/ext4/super.c
8473     +++ b/fs/ext4/super.c
8474     @@ -2030,23 +2030,25 @@ failed:
8475     static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
8476     struct ext4_group_desc *gdp)
8477     {
8478     - int offset;
8479     + int offset = offsetof(struct ext4_group_desc, bg_checksum);
8480     __u16 crc = 0;
8481     __le32 le_group = cpu_to_le32(block_group);
8482     struct ext4_sb_info *sbi = EXT4_SB(sb);
8483    
8484     if (ext4_has_metadata_csum(sbi->s_sb)) {
8485     /* Use new metadata_csum algorithm */
8486     - __le16 save_csum;
8487     __u32 csum32;
8488     + __u16 dummy_csum = 0;
8489    
8490     - save_csum = gdp->bg_checksum;
8491     - gdp->bg_checksum = 0;
8492     csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
8493     sizeof(le_group));
8494     - csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
8495     - sbi->s_desc_size);
8496     - gdp->bg_checksum = save_csum;
8497     + csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
8498     + csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
8499     + sizeof(dummy_csum));
8500     + offset += sizeof(dummy_csum);
8501     + if (offset < sbi->s_desc_size)
8502     + csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
8503     + sbi->s_desc_size - offset);
8504    
8505     crc = csum32 & 0xFFFF;
8506     goto out;
8507     @@ -2056,8 +2058,6 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
8508     if (!ext4_has_feature_gdt_csum(sb))
8509     return 0;
8510    
8511     - offset = offsetof(struct ext4_group_desc, bg_checksum);
8512     -
8513     crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
8514     crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
8515     crc = crc16(crc, (__u8 *)gdp, offset);
8516     @@ -2093,6 +2093,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
8517    
8518     /* Called at mount-time, super-block is locked */
8519     static int ext4_check_descriptors(struct super_block *sb,
8520     + ext4_fsblk_t sb_block,
8521     ext4_group_t *first_not_zeroed)
8522     {
8523     struct ext4_sb_info *sbi = EXT4_SB(sb);
8524     @@ -2123,6 +2124,11 @@ static int ext4_check_descriptors(struct super_block *sb,
8525     grp = i;
8526    
8527     block_bitmap = ext4_block_bitmap(sb, gdp);
8528     + if (block_bitmap == sb_block) {
8529     + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
8530     + "Block bitmap for group %u overlaps "
8531     + "superblock", i);
8532     + }
8533     if (block_bitmap < first_block || block_bitmap > last_block) {
8534     ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
8535     "Block bitmap for group %u not in group "
8536     @@ -2130,6 +2136,11 @@ static int ext4_check_descriptors(struct super_block *sb,
8537     return 0;
8538     }
8539     inode_bitmap = ext4_inode_bitmap(sb, gdp);
8540     + if (inode_bitmap == sb_block) {
8541     + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
8542     + "Inode bitmap for group %u overlaps "
8543     + "superblock", i);
8544     + }
8545     if (inode_bitmap < first_block || inode_bitmap > last_block) {
8546     ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
8547     "Inode bitmap for group %u not in group "
8548     @@ -2137,6 +2148,11 @@ static int ext4_check_descriptors(struct super_block *sb,
8549     return 0;
8550     }
8551     inode_table = ext4_inode_table(sb, gdp);
8552     + if (inode_table == sb_block) {
8553     + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
8554     + "Inode table for group %u overlaps "
8555     + "superblock", i);
8556     + }
8557     if (inode_table < first_block ||
8558     inode_table + sbi->s_itb_per_group - 1 > last_block) {
8559     ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
8560     @@ -3640,7 +3656,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
8561     goto failed_mount2;
8562     }
8563     }
8564     - if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
8565     + if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
8566     ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
8567     ret = -EFSCORRUPTED;
8568     goto failed_mount2;
8569     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
8570     index 6b6b3e751f8c..263002f0389d 100644
8571     --- a/fs/ext4/xattr.c
8572     +++ b/fs/ext4/xattr.c
8573     @@ -123,17 +123,18 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
8574     {
8575     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
8576     __u32 csum;
8577     - __le32 save_csum;
8578     __le64 dsk_block_nr = cpu_to_le64(block_nr);
8579     + __u32 dummy_csum = 0;
8580     + int offset = offsetof(struct ext4_xattr_header, h_checksum);
8581    
8582     - save_csum = hdr->h_checksum;
8583     - hdr->h_checksum = 0;
8584     csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
8585     sizeof(dsk_block_nr));
8586     - csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
8587     - EXT4_BLOCK_SIZE(inode->i_sb));
8588     + csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
8589     + csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
8590     + offset += sizeof(dummy_csum);
8591     + csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
8592     + EXT4_BLOCK_SIZE(inode->i_sb) - offset);
8593    
8594     - hdr->h_checksum = save_csum;
8595     return cpu_to_le32(csum);
8596     }
8597    
8598     @@ -1264,15 +1265,19 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
8599     size_t min_offs, free;
8600     int total_ino;
8601     void *base, *start, *end;
8602     - int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
8603     + int error = 0, tried_min_extra_isize = 0;
8604     int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
8605     + int isize_diff; /* How much do we need to grow i_extra_isize */
8606    
8607     down_write(&EXT4_I(inode)->xattr_sem);
8608     + /*
8609     + * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
8610     + */
8611     + ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
8612     retry:
8613     - if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
8614     - up_write(&EXT4_I(inode)->xattr_sem);
8615     - return 0;
8616     - }
8617     + isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
8618     + if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
8619     + goto out;
8620    
8621     header = IHDR(inode, raw_inode);
8622     entry = IFIRST(header);
8623     @@ -1289,7 +1294,7 @@ retry:
8624     total_ino = sizeof(struct ext4_xattr_ibody_header);
8625    
8626     free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
8627     - if (free >= new_extra_isize) {
8628     + if (free >= isize_diff) {
8629     entry = IFIRST(header);
8630     ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
8631     - new_extra_isize, (void *)raw_inode +
8632     @@ -1297,8 +1302,7 @@ retry:
8633     (void *)header, total_ino,
8634     inode->i_sb->s_blocksize);
8635     EXT4_I(inode)->i_extra_isize = new_extra_isize;
8636     - error = 0;
8637     - goto cleanup;
8638     + goto out;
8639     }
8640    
8641     /*
8642     @@ -1321,7 +1325,7 @@ retry:
8643     end = bh->b_data + bh->b_size;
8644     min_offs = end - base;
8645     free = ext4_xattr_free_space(first, &min_offs, base, NULL);
8646     - if (free < new_extra_isize) {
8647     + if (free < isize_diff) {
8648     if (!tried_min_extra_isize && s_min_extra_isize) {
8649     tried_min_extra_isize++;
8650     new_extra_isize = s_min_extra_isize;
8651     @@ -1335,7 +1339,7 @@ retry:
8652     free = inode->i_sb->s_blocksize;
8653     }
8654    
8655     - while (new_extra_isize > 0) {
8656     + while (isize_diff > 0) {
8657     size_t offs, size, entry_size;
8658     struct ext4_xattr_entry *small_entry = NULL;
8659     struct ext4_xattr_info i = {
8660     @@ -1366,7 +1370,7 @@ retry:
8661     EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
8662     EXT4_XATTR_LEN(last->e_name_len);
8663     if (total_size <= free && total_size < min_total_size) {
8664     - if (total_size < new_extra_isize) {
8665     + if (total_size < isize_diff) {
8666     small_entry = last;
8667     } else {
8668     entry = last;
8669     @@ -1421,22 +1425,22 @@ retry:
8670     error = ext4_xattr_ibody_set(handle, inode, &i, is);
8671     if (error)
8672     goto cleanup;
8673     + total_ino -= entry_size;
8674    
8675     entry = IFIRST(header);
8676     - if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
8677     - shift_bytes = new_extra_isize;
8678     + if (entry_size + EXT4_XATTR_SIZE(size) >= isize_diff)
8679     + shift_bytes = isize_diff;
8680     else
8681     - shift_bytes = entry_size + size;
8682     + shift_bytes = entry_size + EXT4_XATTR_SIZE(size);
8683     /* Adjust the offsets and shift the remaining entries ahead */
8684     - ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
8685     - shift_bytes, (void *)raw_inode +
8686     - EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
8687     - (void *)header, total_ino - entry_size,
8688     - inode->i_sb->s_blocksize);
8689     + ext4_xattr_shift_entries(entry, -shift_bytes,
8690     + (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
8691     + EXT4_I(inode)->i_extra_isize + shift_bytes,
8692     + (void *)header, total_ino, inode->i_sb->s_blocksize);
8693    
8694     - extra_isize += shift_bytes;
8695     - new_extra_isize -= shift_bytes;
8696     - EXT4_I(inode)->i_extra_isize = extra_isize;
8697     + isize_diff -= shift_bytes;
8698     + EXT4_I(inode)->i_extra_isize += shift_bytes;
8699     + header = IHDR(inode, raw_inode);
8700    
8701     i.name = b_entry_name;
8702     i.value = buffer;
8703     @@ -1458,6 +1462,8 @@ retry:
8704     kfree(bs);
8705     }
8706     brelse(bh);
8707     +out:
8708     + ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
8709     up_write(&EXT4_I(inode)->xattr_sem);
8710     return 0;
8711    
8712     @@ -1469,6 +1475,10 @@ cleanup:
8713     kfree(is);
8714     kfree(bs);
8715     brelse(bh);
8716     + /*
8717     + * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
8718     + * size expansion failed.
8719     + */
8720     up_write(&EXT4_I(inode)->xattr_sem);
8721     return error;
8722     }
8723     diff --git a/fs/namei.c b/fs/namei.c
8724     index 209ca7737cb2..0b0acba72a71 100644
8725     --- a/fs/namei.c
8726     +++ b/fs/namei.c
8727     @@ -887,6 +887,7 @@ static inline int may_follow_link(struct nameidata *nd)
8728     {
8729     const struct inode *inode;
8730     const struct inode *parent;
8731     + kuid_t puid;
8732    
8733     if (!sysctl_protected_symlinks)
8734     return 0;
8735     @@ -902,7 +903,8 @@ static inline int may_follow_link(struct nameidata *nd)
8736     return 0;
8737    
8738     /* Allowed if parent directory and link owner match. */
8739     - if (uid_eq(parent->i_uid, inode->i_uid))
8740     + puid = parent->i_uid;
8741     + if (uid_valid(puid) && uid_eq(puid, inode->i_uid))
8742     return 0;
8743    
8744     if (nd->flags & LOOKUP_RCU)
8745     diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
8746     index eff6319d5037..9e52609cd683 100644
8747     --- a/fs/overlayfs/copy_up.c
8748     +++ b/fs/overlayfs/copy_up.c
8749     @@ -48,6 +48,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
8750     }
8751    
8752     for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
8753     + if (ovl_is_private_xattr(name))
8754     + continue;
8755     retry:
8756     size = vfs_getxattr(old, name, value, value_size);
8757     if (size == -ERANGE)
8758     diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
8759     index 4f729ffff75d..220b04f04523 100644
8760     --- a/fs/overlayfs/inode.c
8761     +++ b/fs/overlayfs/inode.c
8762     @@ -219,7 +219,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
8763     }
8764    
8765    
8766     -static bool ovl_is_private_xattr(const char *name)
8767     +bool ovl_is_private_xattr(const char *name)
8768     {
8769     return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
8770     }
8771     @@ -277,7 +277,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
8772     struct path realpath;
8773     enum ovl_path_type type = ovl_path_real(dentry, &realpath);
8774     ssize_t res;
8775     - int off;
8776     + size_t len;
8777     + char *s;
8778    
8779     res = vfs_listxattr(realpath.dentry, list, size);
8780     if (res <= 0 || size == 0)
8781     @@ -287,17 +288,19 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
8782     return res;
8783    
8784     /* filter out private xattrs */
8785     - for (off = 0; off < res;) {
8786     - char *s = list + off;
8787     - size_t slen = strlen(s) + 1;
8788     + for (s = list, len = res; len;) {
8789     + size_t slen = strnlen(s, len) + 1;
8790    
8791     - BUG_ON(off + slen > res);
8792     + /* underlying fs providing us with an broken xattr list? */
8793     + if (WARN_ON(slen > len))
8794     + return -EIO;
8795    
8796     + len -= slen;
8797     if (ovl_is_private_xattr(s)) {
8798     res -= slen;
8799     - memmove(s, s + slen, res - off);
8800     + memmove(s, s + slen, len);
8801     } else {
8802     - off += slen;
8803     + s += slen;
8804     }
8805     }
8806    
8807     diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
8808     index 735e1d49b301..c319d5eaabcf 100644
8809     --- a/fs/overlayfs/overlayfs.h
8810     +++ b/fs/overlayfs/overlayfs.h
8811     @@ -174,6 +174,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
8812     ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
8813     int ovl_removexattr(struct dentry *dentry, const char *name);
8814     struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
8815     +bool ovl_is_private_xattr(const char *name);
8816    
8817     struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
8818     struct ovl_entry *oe);
8819     diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
8820     index 70a7bbe199d0..d70208c0de84 100644
8821     --- a/fs/overlayfs/super.c
8822     +++ b/fs/overlayfs/super.c
8823     @@ -763,6 +763,10 @@ retry:
8824     struct kstat stat = {
8825     .mode = S_IFDIR | 0,
8826     };
8827     + struct iattr attr = {
8828     + .ia_valid = ATTR_MODE,
8829     + .ia_mode = stat.mode,
8830     + };
8831    
8832     if (work->d_inode) {
8833     err = -EEXIST;
8834     @@ -778,6 +782,21 @@ retry:
8835     err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
8836     if (err)
8837     goto out_dput;
8838     +
8839     + err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
8840     + if (err && err != -ENODATA && err != -EOPNOTSUPP)
8841     + goto out_dput;
8842     +
8843     + err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
8844     + if (err && err != -ENODATA && err != -EOPNOTSUPP)
8845     + goto out_dput;
8846     +
8847     + /* Clear any inherited mode bits */
8848     + inode_lock(work->d_inode);
8849     + err = notify_change(work, &attr, NULL);
8850     + inode_unlock(work->d_inode);
8851     + if (err)
8852     + goto out_dput;
8853     }
8854     out_unlock:
8855     mutex_unlock(&dir->i_mutex);
8856     diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
8857     index f6478301db00..d598b9c809c1 100644
8858     --- a/fs/proc/task_mmu.c
8859     +++ b/fs/proc/task_mmu.c
8860     @@ -248,23 +248,29 @@ static int do_maps_open(struct inode *inode, struct file *file,
8861     sizeof(struct proc_maps_private));
8862     }
8863    
8864     -static pid_t pid_of_stack(struct proc_maps_private *priv,
8865     - struct vm_area_struct *vma, bool is_pid)
8866     +/*
8867     + * Indicate if the VMA is a stack for the given task; for
8868     + * /proc/PID/maps that is the stack of the main task.
8869     + */
8870     +static int is_stack(struct proc_maps_private *priv,
8871     + struct vm_area_struct *vma, int is_pid)
8872     {
8873     - struct inode *inode = priv->inode;
8874     - struct task_struct *task;
8875     - pid_t ret = 0;
8876     + int stack = 0;
8877     +
8878     + if (is_pid) {
8879     + stack = vma->vm_start <= vma->vm_mm->start_stack &&
8880     + vma->vm_end >= vma->vm_mm->start_stack;
8881     + } else {
8882     + struct inode *inode = priv->inode;
8883     + struct task_struct *task;
8884    
8885     - rcu_read_lock();
8886     - task = pid_task(proc_pid(inode), PIDTYPE_PID);
8887     - if (task) {
8888     - task = task_of_stack(task, vma, is_pid);
8889     + rcu_read_lock();
8890     + task = pid_task(proc_pid(inode), PIDTYPE_PID);
8891     if (task)
8892     - ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
8893     + stack = vma_is_stack_for_task(vma, task);
8894     + rcu_read_unlock();
8895     }
8896     - rcu_read_unlock();
8897     -
8898     - return ret;
8899     + return stack;
8900     }
8901    
8902     static void
8903     @@ -324,8 +330,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
8904    
8905     name = arch_vma_name(vma);
8906     if (!name) {
8907     - pid_t tid;
8908     -
8909     if (!mm) {
8910     name = "[vdso]";
8911     goto done;
8912     @@ -337,21 +341,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
8913     goto done;
8914     }
8915    
8916     - tid = pid_of_stack(priv, vma, is_pid);
8917     - if (tid != 0) {
8918     - /*
8919     - * Thread stack in /proc/PID/task/TID/maps or
8920     - * the main process stack.
8921     - */
8922     - if (!is_pid || (vma->vm_start <= mm->start_stack &&
8923     - vma->vm_end >= mm->start_stack)) {
8924     - name = "[stack]";
8925     - } else {
8926     - /* Thread stack in /proc/PID/maps */
8927     - seq_pad(m, ' ');
8928     - seq_printf(m, "[stack:%d]", tid);
8929     - }
8930     - }
8931     + if (is_stack(priv, vma, is_pid))
8932     + name = "[stack]";
8933     }
8934    
8935     done:
8936     @@ -1566,19 +1557,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
8937     seq_file_path(m, file, "\n\t= ");
8938     } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
8939     seq_puts(m, " heap");
8940     - } else {
8941     - pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
8942     - if (tid != 0) {
8943     - /*
8944     - * Thread stack in /proc/PID/task/TID/maps or
8945     - * the main process stack.
8946     - */
8947     - if (!is_pid || (vma->vm_start <= mm->start_stack &&
8948     - vma->vm_end >= mm->start_stack))
8949     - seq_puts(m, " stack");
8950     - else
8951     - seq_printf(m, " stack:%d", tid);
8952     - }
8953     + } else if (is_stack(proc_priv, vma, is_pid)) {
8954     + seq_puts(m, " stack");
8955     }
8956    
8957     if (is_vm_hugetlb_page(vma))
8958     diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
8959     index e0d64c92e4f6..faacb0c0d857 100644
8960     --- a/fs/proc/task_nommu.c
8961     +++ b/fs/proc/task_nommu.c
8962     @@ -123,23 +123,26 @@ unsigned long task_statm(struct mm_struct *mm,
8963     return size;
8964     }
8965    
8966     -static pid_t pid_of_stack(struct proc_maps_private *priv,
8967     - struct vm_area_struct *vma, bool is_pid)
8968     +static int is_stack(struct proc_maps_private *priv,
8969     + struct vm_area_struct *vma, int is_pid)
8970     {
8971     - struct inode *inode = priv->inode;
8972     - struct task_struct *task;
8973     - pid_t ret = 0;
8974     -
8975     - rcu_read_lock();
8976     - task = pid_task(proc_pid(inode), PIDTYPE_PID);
8977     - if (task) {
8978     - task = task_of_stack(task, vma, is_pid);
8979     + struct mm_struct *mm = vma->vm_mm;
8980     + int stack = 0;
8981     +
8982     + if (is_pid) {
8983     + stack = vma->vm_start <= mm->start_stack &&
8984     + vma->vm_end >= mm->start_stack;
8985     + } else {
8986     + struct inode *inode = priv->inode;
8987     + struct task_struct *task;
8988     +
8989     + rcu_read_lock();
8990     + task = pid_task(proc_pid(inode), PIDTYPE_PID);
8991     if (task)
8992     - ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
8993     + stack = vma_is_stack_for_task(vma, task);
8994     + rcu_read_unlock();
8995     }
8996     - rcu_read_unlock();
8997     -
8998     - return ret;
8999     + return stack;
9000     }
9001    
9002     /*
9003     @@ -181,21 +184,9 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
9004     if (file) {
9005     seq_pad(m, ' ');
9006     seq_file_path(m, file, "");
9007     - } else if (mm) {
9008     - pid_t tid = pid_of_stack(priv, vma, is_pid);
9009     -
9010     - if (tid != 0) {
9011     - seq_pad(m, ' ');
9012     - /*
9013     - * Thread stack in /proc/PID/task/TID/maps or
9014     - * the main process stack.
9015     - */
9016     - if (!is_pid || (vma->vm_start <= mm->start_stack &&
9017     - vma->vm_end >= mm->start_stack))
9018     - seq_printf(m, "[stack]");
9019     - else
9020     - seq_printf(m, "[stack:%d]", tid);
9021     - }
9022     + } else if (mm && is_stack(priv, vma, is_pid)) {
9023     + seq_pad(m, ' ');
9024     + seq_printf(m, "[stack]");
9025     }
9026    
9027     seq_putc(m, '\n');
9028     diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
9029     index b45345d701e7..51157da3f76e 100644
9030     --- a/fs/ubifs/tnc_commit.c
9031     +++ b/fs/ubifs/tnc_commit.c
9032     @@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
9033    
9034     p = c->gap_lebs;
9035     do {
9036     - ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
9037     + ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
9038     written = layout_leb_in_gaps(c, p);
9039     if (written < 0) {
9040     err = written;
9041     diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
9042     index 8a53eaa349f4..7088be6afb3c 100644
9043     --- a/fs/xfs/libxfs/xfs_sb.c
9044     +++ b/fs/xfs/libxfs/xfs_sb.c
9045     @@ -581,7 +581,8 @@ xfs_sb_verify(
9046     * Only check the in progress field for the primary superblock as
9047     * mkfs.xfs doesn't clear it from secondary superblocks.
9048     */
9049     - return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
9050     + return xfs_mount_validate_sb(mp, &sb,
9051     + bp->b_maps[0].bm_bn == XFS_SB_DADDR,
9052     check_version);
9053     }
9054    
9055     diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
9056     index 17c445612e01..2cdc723d750f 100644
9057     --- a/include/drm/i915_pciids.h
9058     +++ b/include/drm/i915_pciids.h
9059     @@ -277,7 +277,9 @@
9060     INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
9061    
9062     #define INTEL_SKL_GT3_IDS(info) \
9063     + INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
9064     INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
9065     + INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
9066     INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
9067     INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
9068    
9069     @@ -289,6 +291,8 @@
9070     #define INTEL_BXT_IDS(info) \
9071     INTEL_VGA_DEVICE(0x0A84, info), \
9072     INTEL_VGA_DEVICE(0x1A84, info), \
9073     - INTEL_VGA_DEVICE(0x5A84, info)
9074     + INTEL_VGA_DEVICE(0x1A85, info), \
9075     + INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
9076     + INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
9077    
9078     #endif /* _I915_PCIIDS_H */
9079     diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
9080     index 168755791ec8..fe14382f9664 100644
9081     --- a/include/linux/blkdev.h
9082     +++ b/include/linux/blkdev.h
9083     @@ -890,7 +890,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
9084     {
9085     struct request_queue *q = rq->q;
9086    
9087     - if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
9088     + if (unlikely(rq->cmd_type != REQ_TYPE_FS))
9089     return q->limits.max_hw_sectors;
9090    
9091     if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
9092     diff --git a/include/linux/capability.h b/include/linux/capability.h
9093     index af9f0b9e80e6..5f8249d378a2 100644
9094     --- a/include/linux/capability.h
9095     +++ b/include/linux/capability.h
9096     @@ -214,6 +214,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
9097     struct user_namespace *ns, int cap);
9098     extern bool capable(int cap);
9099     extern bool ns_capable(struct user_namespace *ns, int cap);
9100     +extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
9101     #else
9102     static inline bool has_capability(struct task_struct *t, int cap)
9103     {
9104     @@ -241,6 +242,10 @@ static inline bool ns_capable(struct user_namespace *ns, int cap)
9105     {
9106     return true;
9107     }
9108     +static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
9109     +{
9110     + return true;
9111     +}
9112     #endif /* CONFIG_MULTIUSER */
9113     extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
9114     extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
9115     diff --git a/include/linux/fs.h b/include/linux/fs.h
9116     index ab3d8d9bb3ef..0166582c4d78 100644
9117     --- a/include/linux/fs.h
9118     +++ b/include/linux/fs.h
9119     @@ -710,6 +710,31 @@ enum inode_i_mutex_lock_class
9120     I_MUTEX_PARENT2,
9121     };
9122    
9123     +static inline void inode_lock(struct inode *inode)
9124     +{
9125     + mutex_lock(&inode->i_mutex);
9126     +}
9127     +
9128     +static inline void inode_unlock(struct inode *inode)
9129     +{
9130     + mutex_unlock(&inode->i_mutex);
9131     +}
9132     +
9133     +static inline int inode_trylock(struct inode *inode)
9134     +{
9135     + return mutex_trylock(&inode->i_mutex);
9136     +}
9137     +
9138     +static inline int inode_is_locked(struct inode *inode)
9139     +{
9140     + return mutex_is_locked(&inode->i_mutex);
9141     +}
9142     +
9143     +static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
9144     +{
9145     + mutex_lock_nested(&inode->i_mutex, subclass);
9146     +}
9147     +
9148     void lock_two_nondirectories(struct inode *, struct inode*);
9149     void unlock_two_nondirectories(struct inode *, struct inode*);
9150    
9151     @@ -3029,8 +3054,8 @@ static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
9152     }
9153     static inline bool dir_relax(struct inode *inode)
9154     {
9155     - mutex_unlock(&inode->i_mutex);
9156     - mutex_lock(&inode->i_mutex);
9157     + inode_unlock(inode);
9158     + inode_lock(inode);
9159     return !IS_DEADDIR(inode);
9160     }
9161    
9162     diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
9163     index 034117b3be5f..f09648d14694 100644
9164     --- a/include/linux/lightnvm.h
9165     +++ b/include/linux/lightnvm.h
9166     @@ -58,8 +58,9 @@ enum {
9167     /* Block Types */
9168     NVM_BLK_T_FREE = 0x0,
9169     NVM_BLK_T_BAD = 0x1,
9170     - NVM_BLK_T_DEV = 0x2,
9171     - NVM_BLK_T_HOST = 0x4,
9172     + NVM_BLK_T_GRWN_BAD = 0x2,
9173     + NVM_BLK_T_DEV = 0x4,
9174     + NVM_BLK_T_HOST = 0x8,
9175     };
9176    
9177     struct nvm_id_group {
9178     diff --git a/include/linux/mm.h b/include/linux/mm.h
9179     index f24df9c0b9df..8a761248d01e 100644
9180     --- a/include/linux/mm.h
9181     +++ b/include/linux/mm.h
9182     @@ -1311,8 +1311,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
9183     !vma_growsup(vma->vm_next, addr);
9184     }
9185    
9186     -extern struct task_struct *task_of_stack(struct task_struct *task,
9187     - struct vm_area_struct *vma, bool in_group);
9188     +int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
9189    
9190     extern unsigned long move_page_tables(struct vm_area_struct *vma,
9191     unsigned long old_addr, struct vm_area_struct *new_vma,
9192     diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
9193     index f9828a48f16a..6cdd50f7f52d 100644
9194     --- a/include/linux/perf_event.h
9195     +++ b/include/linux/perf_event.h
9196     @@ -121,6 +121,7 @@ struct hw_perf_event {
9197     struct { /* intel_cqm */
9198     int cqm_state;
9199     u32 cqm_rmid;
9200     + int is_group_event;
9201     struct list_head cqm_events_entry;
9202     struct list_head cqm_groups_entry;
9203     struct list_head cqm_group_entry;
9204     diff --git a/include/linux/time.h b/include/linux/time.h
9205     index beebe3a02d43..297f09f23896 100644
9206     --- a/include/linux/time.h
9207     +++ b/include/linux/time.h
9208     @@ -125,6 +125,32 @@ static inline bool timeval_valid(const struct timeval *tv)
9209    
9210     extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
9211    
9212     +/*
9213     + * Validates if a timespec/timeval used to inject a time offset is valid.
9214     + * Offsets can be postive or negative. The value of the timeval/timespec
9215     + * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must
9216     + * always be non-negative.
9217     + */
9218     +static inline bool timeval_inject_offset_valid(const struct timeval *tv)
9219     +{
9220     + /* We don't check the tv_sec as it can be positive or negative */
9221     +
9222     + /* Can't have more microseconds then a second */
9223     + if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
9224     + return false;
9225     + return true;
9226     +}
9227     +
9228     +static inline bool timespec_inject_offset_valid(const struct timespec *ts)
9229     +{
9230     + /* We don't check the tv_sec as it can be positive or negative */
9231     +
9232     + /* Can't have more nanoseconds then a second */
9233     + if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
9234     + return false;
9235     + return true;
9236     +}
9237     +
9238     #define CURRENT_TIME (current_kernel_time())
9239     #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
9240    
9241     diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
9242     index e4c0a35d6417..e347b24ef9fb 100644
9243     --- a/include/uapi/linux/hyperv.h
9244     +++ b/include/uapi/linux/hyperv.h
9245     @@ -313,6 +313,7 @@ enum hv_kvp_exchg_pool {
9246     #define HV_INVALIDARG 0x80070057
9247     #define HV_GUID_NOTFOUND 0x80041002
9248     #define HV_ERROR_ALREADY_EXISTS 0x80070050
9249     +#define HV_ERROR_DISK_FULL 0x80070070
9250    
9251     #define ADDR_FAMILY_NONE 0x00
9252     #define ADDR_FAMILY_IPV4 0x01
9253     diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
9254     index a0e87d16b726..421d27413731 100644
9255     --- a/include/uapi/linux/videodev2.h
9256     +++ b/include/uapi/linux/videodev2.h
9257     @@ -621,6 +621,9 @@ struct v4l2_pix_format {
9258     #define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */
9259     #define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */
9260     #define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */
9261     +#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
9262     +#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
9263     +#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
9264    
9265     /* SDR formats - used only for Software Defined Radio devices */
9266     #define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
9267     diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
9268     index 831351b2e660..2302f3ce5f86 100644
9269     --- a/include/uapi/scsi/cxlflash_ioctl.h
9270     +++ b/include/uapi/scsi/cxlflash_ioctl.h
9271     @@ -31,6 +31,16 @@ struct dk_cxlflash_hdr {
9272     };
9273    
9274     /*
9275     + * Return flag definitions available to all ioctls
9276     + *
9277     + * Similar to the input flags, these are grown from the bottom-up with the
9278     + * intention that ioctl-specific return flag definitions would grow from the
9279     + * top-down, allowing the two sets to co-exist. While not required/enforced
9280     + * at this time, this provides future flexibility.
9281     + */
9282     +#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL
9283     +
9284     +/*
9285     * Notes:
9286     * -----
9287     * The 'context_id' field of all ioctl structures contains the context
9288     diff --git a/kernel/capability.c b/kernel/capability.c
9289     index 45432b54d5c6..00411c82dac5 100644
9290     --- a/kernel/capability.c
9291     +++ b/kernel/capability.c
9292     @@ -361,6 +361,24 @@ bool has_capability_noaudit(struct task_struct *t, int cap)
9293     return has_ns_capability_noaudit(t, &init_user_ns, cap);
9294     }
9295    
9296     +static bool ns_capable_common(struct user_namespace *ns, int cap, bool audit)
9297     +{
9298     + int capable;
9299     +
9300     + if (unlikely(!cap_valid(cap))) {
9301     + pr_crit("capable() called with invalid cap=%u\n", cap);
9302     + BUG();
9303     + }
9304     +
9305     + capable = audit ? security_capable(current_cred(), ns, cap) :
9306     + security_capable_noaudit(current_cred(), ns, cap);
9307     + if (capable == 0) {
9308     + current->flags |= PF_SUPERPRIV;
9309     + return true;
9310     + }
9311     + return false;
9312     +}
9313     +
9314     /**
9315     * ns_capable - Determine if the current task has a superior capability in effect
9316     * @ns: The usernamespace we want the capability in
9317     @@ -374,19 +392,27 @@ bool has_capability_noaudit(struct task_struct *t, int cap)
9318     */
9319     bool ns_capable(struct user_namespace *ns, int cap)
9320     {
9321     - if (unlikely(!cap_valid(cap))) {
9322     - pr_crit("capable() called with invalid cap=%u\n", cap);
9323     - BUG();
9324     - }
9325     -
9326     - if (security_capable(current_cred(), ns, cap) == 0) {
9327     - current->flags |= PF_SUPERPRIV;
9328     - return true;
9329     - }
9330     - return false;
9331     + return ns_capable_common(ns, cap, true);
9332     }
9333     EXPORT_SYMBOL(ns_capable);
9334    
9335     +/**
9336     + * ns_capable_noaudit - Determine if the current task has a superior capability
9337     + * (unaudited) in effect
9338     + * @ns: The usernamespace we want the capability in
9339     + * @cap: The capability to be tested for
9340     + *
9341     + * Return true if the current task has the given superior capability currently
9342     + * available for use, false if not.
9343     + *
9344     + * This sets PF_SUPERPRIV on the task if the capability is available on the
9345     + * assumption that it's about to be used.
9346     + */
9347     +bool ns_capable_noaudit(struct user_namespace *ns, int cap)
9348     +{
9349     + return ns_capable_common(ns, cap, false);
9350     +}
9351     +EXPORT_SYMBOL(ns_capable_noaudit);
9352    
9353     /**
9354     * capable - Determine if the current task has a superior capability in effect
9355     diff --git a/kernel/cred.c b/kernel/cred.c
9356     index 71179a09c1d6..ff8606f77d90 100644
9357     --- a/kernel/cred.c
9358     +++ b/kernel/cred.c
9359     @@ -689,6 +689,8 @@ EXPORT_SYMBOL(set_security_override_from_ctx);
9360     */
9361     int set_create_files_as(struct cred *new, struct inode *inode)
9362     {
9363     + if (!uid_valid(inode->i_uid) || !gid_valid(inode->i_gid))
9364     + return -EINVAL;
9365     new->fsuid = inode->i_uid;
9366     new->fsgid = inode->i_gid;
9367     return security_kernel_create_files_as(new, inode);
9368     diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
9369     index 7dad84913abf..da0c09ff6112 100644
9370     --- a/kernel/events/uprobes.c
9371     +++ b/kernel/events/uprobes.c
9372     @@ -171,8 +171,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
9373     mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
9374     err = -EAGAIN;
9375     ptep = page_check_address(page, mm, addr, &ptl, 0);
9376     - if (!ptep)
9377     + if (!ptep) {
9378     + mem_cgroup_cancel_charge(kpage, memcg);
9379     goto unlock;
9380     + }
9381    
9382     get_page(kpage);
9383     page_add_new_anon_rmap(kpage, vma, addr);
9384     @@ -199,7 +201,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
9385    
9386     err = 0;
9387     unlock:
9388     - mem_cgroup_cancel_charge(kpage, memcg);
9389     mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
9390     unlock_page(page);
9391     return err;
9392     diff --git a/kernel/fork.c b/kernel/fork.c
9393     index 1155eac61687..c485cb156772 100644
9394     --- a/kernel/fork.c
9395     +++ b/kernel/fork.c
9396     @@ -1369,7 +1369,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
9397     p->real_start_time = ktime_get_boot_ns();
9398     p->io_context = NULL;
9399     p->audit_context = NULL;
9400     - threadgroup_change_begin(current);
9401     cgroup_fork(p);
9402     #ifdef CONFIG_NUMA
9403     p->mempolicy = mpol_dup(p->mempolicy);
9404     @@ -1521,6 +1520,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
9405     INIT_LIST_HEAD(&p->thread_group);
9406     p->task_works = NULL;
9407    
9408     + threadgroup_change_begin(current);
9409     /*
9410     * Ensure that the cgroup subsystem policies allow the new process to be
9411     * forked. It should be noted the the new process's css_set can be changed
9412     @@ -1621,6 +1621,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
9413     bad_fork_cancel_cgroup:
9414     cgroup_cancel_fork(p, cgrp_ss_priv);
9415     bad_fork_free_pid:
9416     + threadgroup_change_end(current);
9417     if (pid != &init_struct_pid)
9418     free_pid(pid);
9419     bad_fork_cleanup_io:
9420     @@ -1651,7 +1652,6 @@ bad_fork_cleanup_policy:
9421     mpol_put(p->mempolicy);
9422     bad_fork_cleanup_threadgroup_lock:
9423     #endif
9424     - threadgroup_change_end(current);
9425     delayacct_tsk_free(p);
9426     bad_fork_cleanup_count:
9427     atomic_dec(&p->cred->user->processes);
9428     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
9429     index b8b516c37bf1..8f258f437ac2 100644
9430     --- a/kernel/sched/fair.c
9431     +++ b/kernel/sched/fair.c
9432     @@ -1191,8 +1191,6 @@ static void task_numa_assign(struct task_numa_env *env,
9433     {
9434     if (env->best_task)
9435     put_task_struct(env->best_task);
9436     - if (p)
9437     - get_task_struct(p);
9438    
9439     env->best_task = p;
9440     env->best_imp = imp;
9441     @@ -1260,20 +1258,30 @@ static void task_numa_compare(struct task_numa_env *env,
9442     long imp = env->p->numa_group ? groupimp : taskimp;
9443     long moveimp = imp;
9444     int dist = env->dist;
9445     + bool assigned = false;
9446    
9447     rcu_read_lock();
9448    
9449     raw_spin_lock_irq(&dst_rq->lock);
9450     cur = dst_rq->curr;
9451     /*
9452     - * No need to move the exiting task, and this ensures that ->curr
9453     - * wasn't reaped and thus get_task_struct() in task_numa_assign()
9454     - * is safe under RCU read lock.
9455     - * Note that rcu_read_lock() itself can't protect from the final
9456     - * put_task_struct() after the last schedule().
9457     + * No need to move the exiting task or idle task.
9458     */
9459     if ((cur->flags & PF_EXITING) || is_idle_task(cur))
9460     cur = NULL;
9461     + else {
9462     + /*
9463     + * The task_struct must be protected here to protect the
9464     + * p->numa_faults access in the task_weight since the
9465     + * numa_faults could already be freed in the following path:
9466     + * finish_task_switch()
9467     + * --> put_task_struct()
9468     + * --> __put_task_struct()
9469     + * --> task_numa_free()
9470     + */
9471     + get_task_struct(cur);
9472     + }
9473     +
9474     raw_spin_unlock_irq(&dst_rq->lock);
9475    
9476     /*
9477     @@ -1357,6 +1365,7 @@ balance:
9478     */
9479     if (!load_too_imbalanced(src_load, dst_load, env)) {
9480     imp = moveimp - 1;
9481     + put_task_struct(cur);
9482     cur = NULL;
9483     goto assign;
9484     }
9485     @@ -1382,9 +1391,16 @@ balance:
9486     env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
9487    
9488     assign:
9489     + assigned = true;
9490     task_numa_assign(env, cur, imp);
9491     unlock:
9492     rcu_read_unlock();
9493     + /*
9494     + * The dst_rq->curr isn't assigned. The protection for task_struct is
9495     + * finished.
9496     + */
9497     + if (cur && !assigned)
9498     + put_task_struct(cur);
9499     }
9500    
9501     static void task_numa_find_cpu(struct task_numa_env *env,
9502     diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
9503     index 1347882d131e..b98810d2f3b4 100644
9504     --- a/kernel/time/clocksource.c
9505     +++ b/kernel/time/clocksource.c
9506     @@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
9507     /* cs is a watchdog. */
9508     if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
9509     cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
9510     + }
9511     + spin_unlock_irqrestore(&watchdog_lock, flags);
9512     +}
9513     +
9514     +static void clocksource_select_watchdog(bool fallback)
9515     +{
9516     + struct clocksource *cs, *old_wd;
9517     + unsigned long flags;
9518     +
9519     + spin_lock_irqsave(&watchdog_lock, flags);
9520     + /* save current watchdog */
9521     + old_wd = watchdog;
9522     + if (fallback)
9523     + watchdog = NULL;
9524     +
9525     + list_for_each_entry(cs, &clocksource_list, list) {
9526     + /* cs is a clocksource to be watched. */
9527     + if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
9528     + continue;
9529     +
9530     + /* Skip current if we were requested for a fallback. */
9531     + if (fallback && cs == old_wd)
9532     + continue;
9533     +
9534     /* Pick the best watchdog. */
9535     - if (!watchdog || cs->rating > watchdog->rating) {
9536     + if (!watchdog || cs->rating > watchdog->rating)
9537     watchdog = cs;
9538     - /* Reset watchdog cycles */
9539     - clocksource_reset_watchdog();
9540     - }
9541     }
9542     + /* If we failed to find a fallback restore the old one. */
9543     + if (!watchdog)
9544     + watchdog = old_wd;
9545     +
9546     + /* If we changed the watchdog we need to reset cycles. */
9547     + if (watchdog != old_wd)
9548     + clocksource_reset_watchdog();
9549     +
9550     /* Check if the watchdog timer needs to be started. */
9551     clocksource_start_watchdog();
9552     spin_unlock_irqrestore(&watchdog_lock, flags);
9553     @@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
9554     cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
9555     }
9556    
9557     +static void clocksource_select_watchdog(bool fallback) { }
9558     static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
9559     static inline void clocksource_resume_watchdog(void) { }
9560     static inline int __clocksource_watchdog_kthread(void) { return 0; }
9561     @@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
9562     clocksource_enqueue(cs);
9563     clocksource_enqueue_watchdog(cs);
9564     clocksource_select();
9565     + clocksource_select_watchdog(false);
9566     mutex_unlock(&clocksource_mutex);
9567     return 0;
9568     }
9569     @@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
9570     mutex_lock(&clocksource_mutex);
9571     __clocksource_change_rating(cs, rating);
9572     clocksource_select();
9573     + clocksource_select_watchdog(false);
9574     mutex_unlock(&clocksource_mutex);
9575     }
9576     EXPORT_SYMBOL(clocksource_change_rating);
9577     @@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating);
9578     */
9579     static int clocksource_unbind(struct clocksource *cs)
9580     {
9581     - /*
9582     - * I really can't convince myself to support this on hardware
9583     - * designed by lobotomized monkeys.
9584     - */
9585     - if (clocksource_is_watchdog(cs))
9586     - return -EBUSY;
9587     + if (clocksource_is_watchdog(cs)) {
9588     + /* Select and try to install a replacement watchdog. */
9589     + clocksource_select_watchdog(true);
9590     + if (clocksource_is_watchdog(cs))
9591     + return -EBUSY;
9592     + }
9593    
9594     if (cs == curr_clocksource) {
9595     /* Select and try to install a replacement clock source */
9596     diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
9597     index fa909f9fd559..17f7bcff1e02 100644
9598     --- a/kernel/time/hrtimer.c
9599     +++ b/kernel/time/hrtimer.c
9600     @@ -94,6 +94,9 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
9601     };
9602    
9603     static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
9604     + /* Make sure we catch unsupported clockids */
9605     + [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
9606     +
9607     [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
9608     [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
9609     [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
9610     @@ -102,7 +105,9 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
9611    
9612     static inline int hrtimer_clockid_to_base(clockid_t clock_id)
9613     {
9614     - return hrtimer_clock_to_base_table[clock_id];
9615     + int base = hrtimer_clock_to_base_table[clock_id];
9616     + BUG_ON(base == HRTIMER_MAX_CLOCK_BASES);
9617     + return base;
9618     }
9619    
9620     /*
9621     diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
9622     index 149cc8086aea..ab861771e37f 100644
9623     --- a/kernel/time/ntp.c
9624     +++ b/kernel/time/ntp.c
9625     @@ -674,8 +674,24 @@ int ntp_validate_timex(struct timex *txc)
9626     return -EINVAL;
9627     }
9628    
9629     - if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
9630     - return -EPERM;
9631     + if (txc->modes & ADJ_SETOFFSET) {
9632     + /* In order to inject time, you gotta be super-user! */
9633     + if (!capable(CAP_SYS_TIME))
9634     + return -EPERM;
9635     +
9636     + if (txc->modes & ADJ_NANO) {
9637     + struct timespec ts;
9638     +
9639     + ts.tv_sec = txc->time.tv_sec;
9640     + ts.tv_nsec = txc->time.tv_usec;
9641     + if (!timespec_inject_offset_valid(&ts))
9642     + return -EINVAL;
9643     +
9644     + } else {
9645     + if (!timeval_inject_offset_valid(&txc->time))
9646     + return -EINVAL;
9647     + }
9648     + }
9649    
9650     /*
9651     * Check for potential multiplication overflows that can
9652     diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
9653     index 99188ee5d9d0..4ff237dbc006 100644
9654     --- a/kernel/time/timekeeping.c
9655     +++ b/kernel/time/timekeeping.c
9656     @@ -383,7 +383,10 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
9657     do {
9658     seq = raw_read_seqcount_latch(&tkf->seq);
9659     tkr = tkf->base + (seq & 0x01);
9660     - now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
9661     + now = ktime_to_ns(tkr->base);
9662     +
9663     + now += clocksource_delta(tkr->read(tkr->clock),
9664     + tkr->cycle_last, tkr->mask);
9665     } while (read_seqcount_retry(&tkf->seq, seq));
9666    
9667     return now;
9668     @@ -958,7 +961,7 @@ int timekeeping_inject_offset(struct timespec *ts)
9669     struct timespec64 ts64, tmp;
9670     int ret = 0;
9671    
9672     - if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
9673     + if (!timespec_inject_offset_valid(ts))
9674     return -EINVAL;
9675    
9676     ts64 = timespec_to_timespec64(*ts);
9677     diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
9678     index f6bd65236712..107310a6f36f 100644
9679     --- a/kernel/time/timekeeping_debug.c
9680     +++ b/kernel/time/timekeeping_debug.c
9681     @@ -23,7 +23,9 @@
9682    
9683     #include "timekeeping_internal.h"
9684    
9685     -static unsigned int sleep_time_bin[32] = {0};
9686     +#define NUM_BINS 32
9687     +
9688     +static unsigned int sleep_time_bin[NUM_BINS] = {0};
9689    
9690     static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
9691     {
9692     @@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init);
9693    
9694     void tk_debug_account_sleep_time(struct timespec64 *t)
9695     {
9696     - sleep_time_bin[fls(t->tv_sec)]++;
9697     + /* Cap bin index so we don't overflow the array */
9698     + int bin = min(fls(t->tv_sec), NUM_BINS-1);
9699     +
9700     + sleep_time_bin[bin]++;
9701     }
9702    
9703     diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
9704     index 2b3f46c049d4..554522934c44 100644
9705     --- a/lib/asn1_decoder.c
9706     +++ b/lib/asn1_decoder.c
9707     @@ -74,7 +74,7 @@ next_tag:
9708    
9709     /* Extract a tag from the data */
9710     tag = data[dp++];
9711     - if (tag == 0) {
9712     + if (tag == ASN1_EOC) {
9713     /* It appears to be an EOC. */
9714     if (data[dp++] != 0)
9715     goto invalid_eoc;
9716     @@ -96,10 +96,8 @@ next_tag:
9717    
9718     /* Extract the length */
9719     len = data[dp++];
9720     - if (len <= 0x7f) {
9721     - dp += len;
9722     - goto next_tag;
9723     - }
9724     + if (len <= 0x7f)
9725     + goto check_length;
9726    
9727     if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
9728     /* Indefinite length */
9729     @@ -110,14 +108,18 @@ next_tag:
9730     }
9731    
9732     n = len - 0x80;
9733     - if (unlikely(n > sizeof(size_t) - 1))
9734     + if (unlikely(n > sizeof(len) - 1))
9735     goto length_too_long;
9736     if (unlikely(n > datalen - dp))
9737     goto data_overrun_error;
9738     - for (len = 0; n > 0; n--) {
9739     + len = 0;
9740     + for (; n > 0; n--) {
9741     len <<= 8;
9742     len |= data[dp++];
9743     }
9744     +check_length:
9745     + if (len > datalen - dp)
9746     + goto data_overrun_error;
9747     dp += len;
9748     goto next_tag;
9749    
9750     diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
9751     index e00ff00e861c..e37dbf53e226 100644
9752     --- a/lib/mpi/mpicoder.c
9753     +++ b/lib/mpi/mpicoder.c
9754     @@ -367,7 +367,9 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
9755     buf_len = sgl->length;
9756     p2 = sg_virt(sgl);
9757    
9758     - for (i = a->nlimbs - 1; i >= 0; i--) {
9759     + for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
9760     + lzeros %= BYTES_PER_MPI_LIMB;
9761     + i >= 0; i--) {
9762     alimb = a->d[i];
9763     p = (u8 *)&alimb2;
9764     #if BYTES_PER_MPI_LIMB == 4
9765     @@ -388,17 +390,12 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
9766     #error please implement for this limb size.
9767     #endif
9768     if (lzeros > 0) {
9769     - if (lzeros >= sizeof(alimb)) {
9770     - p -= sizeof(alimb);
9771     - continue;
9772     - } else {
9773     - mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
9774     - mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
9775     - + lzeros;
9776     - *limb1 = *limb2;
9777     - p -= lzeros;
9778     - y = lzeros;
9779     - }
9780     + mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
9781     + mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
9782     + + lzeros;
9783     + *limb1 = *limb2;
9784     + p -= lzeros;
9785     + y = lzeros;
9786     lzeros -= sizeof(alimb);
9787     }
9788    
9789     diff --git a/mm/util.c b/mm/util.c
9790     index 9af1c12b310c..d5259b62f8d7 100644
9791     --- a/mm/util.c
9792     +++ b/mm/util.c
9793     @@ -199,36 +199,11 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
9794     }
9795    
9796     /* Check if the vma is being used as a stack by this task */
9797     -static int vm_is_stack_for_task(struct task_struct *t,
9798     - struct vm_area_struct *vma)
9799     +int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
9800     {
9801     return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
9802     }
9803    
9804     -/*
9805     - * Check if the vma is being used as a stack.
9806     - * If is_group is non-zero, check in the entire thread group or else
9807     - * just check in the current task. Returns the task_struct of the task
9808     - * that the vma is stack for. Must be called under rcu_read_lock().
9809     - */
9810     -struct task_struct *task_of_stack(struct task_struct *task,
9811     - struct vm_area_struct *vma, bool in_group)
9812     -{
9813     - if (vm_is_stack_for_task(task, vma))
9814     - return task;
9815     -
9816     - if (in_group) {
9817     - struct task_struct *t;
9818     -
9819     - for_each_thread(task, t) {
9820     - if (vm_is_stack_for_task(t, vma))
9821     - return t;
9822     - }
9823     - }
9824     -
9825     - return NULL;
9826     -}
9827     -
9828     #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
9829     void arch_pick_mmap_layout(struct mm_struct *mm)
9830     {
9831     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
9832     index 44e1632370dd..0b1ea5abcc04 100644
9833     --- a/net/ipv4/udp.c
9834     +++ b/net/ipv4/udp.c
9835     @@ -1275,6 +1275,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
9836     int peeked, off = 0;
9837     int err;
9838     int is_udplite = IS_UDPLITE(sk);
9839     + bool checksum_valid = false;
9840     bool slow;
9841    
9842     if (flags & MSG_ERRQUEUE)
9843     @@ -1300,11 +1301,12 @@ try_again:
9844     */
9845    
9846     if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
9847     - if (udp_lib_checksum_complete(skb))
9848     + checksum_valid = !udp_lib_checksum_complete(skb);
9849     + if (!checksum_valid)
9850     goto csum_copy_err;
9851     }
9852    
9853     - if (skb_csum_unnecessary(skb))
9854     + if (checksum_valid || skb_csum_unnecessary(skb))
9855     err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
9856     msg, copied);
9857     else {
9858     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
9859     index 275af43306f9..e6092bd72ee2 100644
9860     --- a/net/ipv6/udp.c
9861     +++ b/net/ipv6/udp.c
9862     @@ -402,6 +402,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
9863     int peeked, off = 0;
9864     int err;
9865     int is_udplite = IS_UDPLITE(sk);
9866     + bool checksum_valid = false;
9867     int is_udp4;
9868     bool slow;
9869    
9870     @@ -433,11 +434,12 @@ try_again:
9871     */
9872    
9873     if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
9874     - if (udp_lib_checksum_complete(skb))
9875     + checksum_valid = !udp_lib_checksum_complete(skb);
9876     + if (!checksum_valid)
9877     goto csum_copy_err;
9878     }
9879    
9880     - if (skb_csum_unnecessary(skb))
9881     + if (checksum_valid || skb_csum_unnecessary(skb))
9882     err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
9883     msg, copied);
9884     else {
9885     diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
9886     index 25391fb25516..2fc6ca9d1286 100644
9887     --- a/net/netfilter/x_tables.c
9888     +++ b/net/netfilter/x_tables.c
9889     @@ -897,6 +897,12 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
9890     struct xt_table_info *info = NULL;
9891     size_t sz = sizeof(*info) + size;
9892    
9893     + if (sz < sizeof(*info))
9894     + return NULL;
9895     +
9896     + if (sz < sizeof(*info))
9897     + return NULL;
9898     +
9899     /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
9900     if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
9901     return NULL;
9902     diff --git a/net/rds/recv.c b/net/rds/recv.c
9903     index a00462b0d01d..0514af3ab378 100644
9904     --- a/net/rds/recv.c
9905     +++ b/net/rds/recv.c
9906     @@ -545,5 +545,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
9907     minfo.fport = inc->i_hdr.h_dport;
9908     }
9909    
9910     + minfo.flags = 0;
9911     +
9912     rds_info_copy(iter, &minfo, sizeof(minfo));
9913     }
9914     diff --git a/net/sysctl_net.c b/net/sysctl_net.c
9915     index ed98c1fc3de1..46a71c701e7c 100644
9916     --- a/net/sysctl_net.c
9917     +++ b/net/sysctl_net.c
9918     @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
9919     kgid_t root_gid = make_kgid(net->user_ns, 0);
9920    
9921     /* Allow network administrator to have same access as root. */
9922     - if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
9923     + if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN) ||
9924     uid_eq(root_uid, current_euid())) {
9925     int mode = (table->mode >> 6) & 7;
9926     return (mode << 6) | (mode << 3) | mode;
9927     diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
9928     index 2ed732bfe94b..a0c90572d0e5 100644
9929     --- a/net/tipc/netlink_compat.c
9930     +++ b/net/tipc/netlink_compat.c
9931     @@ -574,7 +574,8 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
9932    
9933     link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
9934     link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
9935     - strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
9936     + nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
9937     + TIPC_MAX_LINK_NAME);
9938    
9939     return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
9940     &link_info, sizeof(link_info));
9941     diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
9942     index 69ee2eeef968..f9ff73a8d815 100644
9943     --- a/net/tipc/subscr.c
9944     +++ b/net/tipc/subscr.c
9945     @@ -296,7 +296,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
9946     if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
9947     return tipc_conn_terminate(tn->topsrv, subscrb->conid);
9948    
9949     - tipc_nametbl_subscribe(sub);
9950     + if (sub)
9951     + tipc_nametbl_subscribe(sub);
9952     }
9953    
9954     /* Handle one request to establish a new subscriber */
9955     diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
9956     index 795437b10082..b450a27588c8 100644
9957     --- a/sound/core/rawmidi.c
9958     +++ b/sound/core/rawmidi.c
9959     @@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
9960     return -EBUSY;
9961     }
9962     list_add_tail(&rmidi->list, &snd_rawmidi_devices);
9963     + mutex_unlock(&register_mutex);
9964     err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
9965     rmidi->card, rmidi->device,
9966     &snd_rawmidi_f_ops, rmidi, &rmidi->dev);
9967     if (err < 0) {
9968     rmidi_err(rmidi, "unable to register\n");
9969     + mutex_lock(&register_mutex);
9970     list_del(&rmidi->list);
9971     mutex_unlock(&register_mutex);
9972     return err;
9973     @@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
9974     if (rmidi->ops && rmidi->ops->dev_register &&
9975     (err = rmidi->ops->dev_register(rmidi)) < 0) {
9976     snd_unregister_device(&rmidi->dev);
9977     + mutex_lock(&register_mutex);
9978     list_del(&rmidi->list);
9979     mutex_unlock(&register_mutex);
9980     return err;
9981     @@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
9982     }
9983     }
9984     #endif /* CONFIG_SND_OSSEMUL */
9985     - mutex_unlock(&register_mutex);
9986     sprintf(name, "midi%d", rmidi->device);
9987     entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
9988     if (entry) {
9989     diff --git a/sound/core/timer.c b/sound/core/timer.c
9990     index 637d034bb084..ae4ea2e2e7fe 100644
9991     --- a/sound/core/timer.c
9992     +++ b/sound/core/timer.c
9993     @@ -296,8 +296,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
9994     get_device(&timer->card->card_dev);
9995     timeri->slave_class = tid->dev_sclass;
9996     timeri->slave_id = slave_id;
9997     - if (list_empty(&timer->open_list_head) && timer->hw.open)
9998     - timer->hw.open(timer);
9999     +
10000     + if (list_empty(&timer->open_list_head) && timer->hw.open) {
10001     + int err = timer->hw.open(timer);
10002     + if (err) {
10003     + kfree(timeri->owner);
10004     + kfree(timeri);
10005     +
10006     + if (timer->card)
10007     + put_device(&timer->card->card_dev);
10008     + module_put(timer->module);
10009     + mutex_unlock(&register_mutex);
10010     + return err;
10011     + }
10012     + }
10013     +
10014     list_add_tail(&timeri->open_list, &timer->open_list_head);
10015     snd_timer_check_master(timeri);
10016     mutex_unlock(&register_mutex);
10017     @@ -837,6 +850,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
10018     timer->tmr_subdevice = tid->subdevice;
10019     if (id)
10020     strlcpy(timer->id, id, sizeof(timer->id));
10021     + timer->sticks = 1;
10022     INIT_LIST_HEAD(&timer->device_list);
10023     INIT_LIST_HEAD(&timer->open_list_head);
10024     INIT_LIST_HEAD(&timer->active_list_head);
10025     @@ -1967,6 +1981,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
10026     tu->qused--;
10027     spin_unlock_irq(&tu->qlock);
10028    
10029     + mutex_lock(&tu->ioctl_lock);
10030     if (tu->tread) {
10031     if (copy_to_user(buffer, &tu->tqueue[qhead],
10032     sizeof(struct snd_timer_tread)))
10033     @@ -1976,6 +1991,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
10034     sizeof(struct snd_timer_read)))
10035     err = -EFAULT;
10036     }
10037     + mutex_unlock(&tu->ioctl_lock);
10038    
10039     spin_lock_irq(&tu->qlock);
10040     if (err < 0)
10041     diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
10042     index c7cb7deafe48..2c316a9bc7f6 100644
10043     --- a/sound/firewire/fireworks/fireworks.h
10044     +++ b/sound/firewire/fireworks/fireworks.h
10045     @@ -106,7 +106,6 @@ struct snd_efw {
10046     u8 *resp_buf;
10047     u8 *pull_ptr;
10048     u8 *push_ptr;
10049     - unsigned int resp_queues;
10050     };
10051    
10052     int snd_efw_transaction_cmd(struct fw_unit *unit,
10053     diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
10054     index 33df8655fe81..2e1d9a23920c 100644
10055     --- a/sound/firewire/fireworks/fireworks_hwdep.c
10056     +++ b/sound/firewire/fireworks/fireworks_hwdep.c
10057     @@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
10058     {
10059     unsigned int length, till_end, type;
10060     struct snd_efw_transaction *t;
10061     + u8 *pull_ptr;
10062     long count = 0;
10063    
10064     if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
10065     @@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
10066     buf += sizeof(type);
10067    
10068     /* write into buffer as many responses as possible */
10069     - while (efw->resp_queues > 0) {
10070     - t = (struct snd_efw_transaction *)(efw->pull_ptr);
10071     + spin_lock_irq(&efw->lock);
10072     +
10073     + /*
10074     + * When another task reaches here during this task's access to user
10075     + * space, it picks up current position in buffer and can read the same
10076     + * series of responses.
10077     + */
10078     + pull_ptr = efw->pull_ptr;
10079     +
10080     + while (efw->push_ptr != pull_ptr) {
10081     + t = (struct snd_efw_transaction *)(pull_ptr);
10082     length = be32_to_cpu(t->length) * sizeof(__be32);
10083    
10084     /* confirm enough space for this response */
10085     @@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
10086     /* copy from ring buffer to user buffer */
10087     while (length > 0) {
10088     till_end = snd_efw_resp_buf_size -
10089     - (unsigned int)(efw->pull_ptr - efw->resp_buf);
10090     + (unsigned int)(pull_ptr - efw->resp_buf);
10091     till_end = min_t(unsigned int, length, till_end);
10092    
10093     - if (copy_to_user(buf, efw->pull_ptr, till_end))
10094     + spin_unlock_irq(&efw->lock);
10095     +
10096     + if (copy_to_user(buf, pull_ptr, till_end))
10097     return -EFAULT;
10098    
10099     - efw->pull_ptr += till_end;
10100     - if (efw->pull_ptr >= efw->resp_buf +
10101     - snd_efw_resp_buf_size)
10102     - efw->pull_ptr -= snd_efw_resp_buf_size;
10103     + spin_lock_irq(&efw->lock);
10104     +
10105     + pull_ptr += till_end;
10106     + if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
10107     + pull_ptr -= snd_efw_resp_buf_size;
10108    
10109     length -= till_end;
10110     buf += till_end;
10111     count += till_end;
10112     remained -= till_end;
10113     }
10114     -
10115     - efw->resp_queues--;
10116     }
10117    
10118     + /*
10119     + * All of tasks can read from the buffer nearly simultaneously, but the
10120     + * last position for each task is different depending on the length of
10121     + * given buffer. Here, for simplicity, a position of buffer is set by
10122     + * the latest task. It's better for a listening application to allow one
10123     + * thread to read from the buffer. Unless, each task can read different
10124     + * sequence of responses depending on variation of buffer length.
10125     + */
10126     + efw->pull_ptr = pull_ptr;
10127     +
10128     + spin_unlock_irq(&efw->lock);
10129     +
10130     return count;
10131     }
10132    
10133     @@ -76,14 +99,17 @@ static long
10134     hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
10135     loff_t *offset)
10136     {
10137     - union snd_firewire_event event;
10138     + union snd_firewire_event event = {
10139     + .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
10140     + };
10141    
10142     - memset(&event, 0, sizeof(event));
10143     + spin_lock_irq(&efw->lock);
10144    
10145     - event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
10146     event.lock_status.status = (efw->dev_lock_count > 0);
10147     efw->dev_lock_changed = false;
10148    
10149     + spin_unlock_irq(&efw->lock);
10150     +
10151     count = min_t(long, count, sizeof(event.lock_status));
10152    
10153     if (copy_to_user(buf, &event, count))
10154     @@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
10155     {
10156     struct snd_efw *efw = hwdep->private_data;
10157     DEFINE_WAIT(wait);
10158     + bool dev_lock_changed;
10159     + bool queued;
10160    
10161     spin_lock_irq(&efw->lock);
10162    
10163     - while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
10164     + dev_lock_changed = efw->dev_lock_changed;
10165     + queued = efw->push_ptr != efw->pull_ptr;
10166     +
10167     + while (!dev_lock_changed && !queued) {
10168     prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
10169     spin_unlock_irq(&efw->lock);
10170     schedule();
10171     @@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
10172     if (signal_pending(current))
10173     return -ERESTARTSYS;
10174     spin_lock_irq(&efw->lock);
10175     + dev_lock_changed = efw->dev_lock_changed;
10176     + queued = efw->push_ptr != efw->pull_ptr;
10177     }
10178    
10179     - if (efw->dev_lock_changed)
10180     + spin_unlock_irq(&efw->lock);
10181     +
10182     + if (dev_lock_changed)
10183     count = hwdep_read_locked(efw, buf, count, offset);
10184     - else if (efw->resp_queues > 0)
10185     + else if (queued)
10186     count = hwdep_read_resp_buf(efw, buf, count, offset);
10187    
10188     - spin_unlock_irq(&efw->lock);
10189     -
10190     return count;
10191     }
10192    
10193     @@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
10194     poll_wait(file, &efw->hwdep_wait, wait);
10195    
10196     spin_lock_irq(&efw->lock);
10197     - if (efw->dev_lock_changed || (efw->resp_queues > 0))
10198     + if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
10199     events = POLLIN | POLLRDNORM;
10200     else
10201     events = 0;
10202     diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c
10203     index 0639dcb13f7d..beb0a0ffee57 100644
10204     --- a/sound/firewire/fireworks/fireworks_proc.c
10205     +++ b/sound/firewire/fireworks/fireworks_proc.c
10206     @@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
10207     else
10208     consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
10209    
10210     - snd_iprintf(buffer, "%d %d/%d\n",
10211     - efw->resp_queues, consumed, snd_efw_resp_buf_size);
10212     + snd_iprintf(buffer, "%d/%d\n",
10213     + consumed, snd_efw_resp_buf_size);
10214     }
10215    
10216     static void
10217     diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
10218     index f550808d1784..36a08ba51ec7 100644
10219     --- a/sound/firewire/fireworks/fireworks_transaction.c
10220     +++ b/sound/firewire/fireworks/fireworks_transaction.c
10221     @@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
10222     size_t capacity, till_end;
10223     struct snd_efw_transaction *t;
10224    
10225     - spin_lock_irq(&efw->lock);
10226     -
10227     t = (struct snd_efw_transaction *)data;
10228     length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
10229    
10230     + spin_lock_irq(&efw->lock);
10231     +
10232     if (efw->push_ptr < efw->pull_ptr)
10233     capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
10234     else
10235     @@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
10236     }
10237    
10238     /* for hwdep */
10239     - efw->resp_queues++;
10240     wake_up(&efw->hwdep_wait);
10241    
10242     *rcode = RCODE_COMPLETE;
10243     diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
10244     index 131267c3a042..106406cbfaa3 100644
10245     --- a/sound/firewire/tascam/tascam-hwdep.c
10246     +++ b/sound/firewire/tascam/tascam-hwdep.c
10247     @@ -16,31 +16,14 @@
10248    
10249     #include "tascam.h"
10250    
10251     -static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
10252     - long count)
10253     -{
10254     - union snd_firewire_event event;
10255     -
10256     - memset(&event, 0, sizeof(event));
10257     -
10258     - event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
10259     - event.lock_status.status = (tscm->dev_lock_count > 0);
10260     - tscm->dev_lock_changed = false;
10261     -
10262     - count = min_t(long, count, sizeof(event.lock_status));
10263     -
10264     - if (copy_to_user(buf, &event, count))
10265     - return -EFAULT;
10266     -
10267     - return count;
10268     -}
10269     -
10270     static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
10271     loff_t *offset)
10272     {
10273     struct snd_tscm *tscm = hwdep->private_data;
10274     DEFINE_WAIT(wait);
10275     - union snd_firewire_event event;
10276     + union snd_firewire_event event = {
10277     + .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
10278     + };
10279    
10280     spin_lock_irq(&tscm->lock);
10281    
10282     @@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
10283     spin_lock_irq(&tscm->lock);
10284     }
10285    
10286     - memset(&event, 0, sizeof(event));
10287     - count = hwdep_read_locked(tscm, buf, count);
10288     + event.lock_status.status = (tscm->dev_lock_count > 0);
10289     + tscm->dev_lock_changed = false;
10290     +
10291     spin_unlock_irq(&tscm->lock);
10292    
10293     + count = min_t(long, count, sizeof(event.lock_status));
10294     +
10295     + if (copy_to_user(buf, &event, count))
10296     + return -EFAULT;
10297     +
10298     return count;
10299     }
10300    
10301     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
10302     index 12f7f6fdae4d..d4671973d889 100644
10303     --- a/sound/pci/hda/hda_intel.c
10304     +++ b/sound/pci/hda/hda_intel.c
10305     @@ -2366,6 +2366,10 @@ static const struct pci_device_id azx_ids[] = {
10306     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
10307     { PCI_DEVICE(0x1002, 0xaae8),
10308     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
10309     + { PCI_DEVICE(0x1002, 0xaae0),
10310     + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
10311     + { PCI_DEVICE(0x1002, 0xaaf0),
10312     + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
10313     /* VIA VT8251/VT8237A */
10314     { PCI_DEVICE(0x1106, 0x3288),
10315     .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
10316     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
10317     index f7bcd8dbac14..a8045b8a2a18 100644
10318     --- a/sound/pci/hda/patch_hdmi.c
10319     +++ b/sound/pci/hda/patch_hdmi.c
10320     @@ -51,8 +51,10 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
10321     #define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
10322     #define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
10323     #define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
10324     +#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
10325     #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
10326     - || is_skylake(codec) || is_broxton(codec))
10327     + || is_skylake(codec) || is_broxton(codec) \
10328     + || is_kabylake(codec))
10329    
10330     #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
10331     #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
10332     @@ -3584,6 +3586,7 @@ HDA_CODEC_ENTRY(0x80862807, "Haswell HDMI", patch_generic_hdmi),
10333     HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_generic_hdmi),
10334     HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_generic_hdmi),
10335     HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_generic_hdmi),
10336     +HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_generic_hdmi),
10337     HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
10338     HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_generic_hdmi),
10339     HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_generic_hdmi),
10340     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
10341     index f25479ba3981..eaee626ab185 100644
10342     --- a/sound/pci/hda/patch_realtek.c
10343     +++ b/sound/pci/hda/patch_realtek.c
10344     @@ -4840,6 +4840,7 @@ enum {
10345     ALC221_FIXUP_HP_FRONT_MIC,
10346     ALC292_FIXUP_TPT460,
10347     ALC298_FIXUP_SPK_VOLUME,
10348     + ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
10349     };
10350    
10351     static const struct hda_fixup alc269_fixups[] = {
10352     @@ -5501,6 +5502,15 @@ static const struct hda_fixup alc269_fixups[] = {
10353     .chained = true,
10354     .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
10355     },
10356     + [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
10357     + .type = HDA_FIXUP_PINS,
10358     + .v.pins = (const struct hda_pintbl[]) {
10359     + { 0x1b, 0x90170151 },
10360     + { }
10361     + },
10362     + .chained = true,
10363     + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
10364     + },
10365     };
10366    
10367     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
10368     @@ -5545,6 +5555,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
10369     SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
10370     SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
10371     SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
10372     + SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
10373     SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
10374     SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
10375     SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
10376     @@ -5879,6 +5890,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
10377     {0x12, 0x90a60170},
10378     {0x14, 0x90170120},
10379     {0x21, 0x02211030}),
10380     + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
10381     + {0x12, 0x90a60180},
10382     + {0x14, 0x90170120},
10383     + {0x21, 0x02211030}),
10384     SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
10385     ALC256_STANDARD_PINS),
10386     SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
10387     diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
10388     index ba8def5665c4..6726143c7fc5 100644
10389     --- a/sound/soc/atmel/atmel_ssc_dai.c
10390     +++ b/sound/soc/atmel/atmel_ssc_dai.c
10391     @@ -298,8 +298,9 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
10392     clk_enable(ssc_p->ssc->clk);
10393     ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
10394    
10395     - /* Reset the SSC to keep it at a clean status */
10396     - ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
10397     + /* Reset the SSC unless initialized to keep it in a clean state */
10398     + if (!ssc_p->initialized)
10399     + ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
10400    
10401     if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
10402     dir = 0;
10403     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
10404     index a3e1252ce242..3039e907f1f8 100644
10405     --- a/sound/usb/quirks.c
10406     +++ b/sound/usb/quirks.c
10407     @@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
10408     case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
10409     case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
10410     case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
10411     + case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
10412     case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
10413     case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
10414     case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
10415     diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
10416     index 5480e4e424eb..f1d742682317 100644
10417     --- a/tools/hv/hv_fcopy_daemon.c
10418     +++ b/tools/hv/hv_fcopy_daemon.c
10419     @@ -37,12 +37,14 @@
10420    
10421     static int target_fd;
10422     static char target_fname[W_MAX_PATH];
10423     +static unsigned long long filesize;
10424    
10425     static int hv_start_fcopy(struct hv_start_fcopy *smsg)
10426     {
10427     int error = HV_E_FAIL;
10428     char *q, *p;
10429    
10430     + filesize = 0;
10431     p = (char *)smsg->path_name;
10432     snprintf(target_fname, sizeof(target_fname), "%s/%s",
10433     (char *)smsg->path_name, (char *)smsg->file_name);
10434     @@ -98,14 +100,26 @@ done:
10435     static int hv_copy_data(struct hv_do_fcopy *cpmsg)
10436     {
10437     ssize_t bytes_written;
10438     + int ret = 0;
10439    
10440     bytes_written = pwrite(target_fd, cpmsg->data, cpmsg->size,
10441     cpmsg->offset);
10442    
10443     - if (bytes_written != cpmsg->size)
10444     - return HV_E_FAIL;
10445     + filesize += cpmsg->size;
10446     + if (bytes_written != cpmsg->size) {
10447     + switch (errno) {
10448     + case ENOSPC:
10449     + ret = HV_ERROR_DISK_FULL;
10450     + break;
10451     + default:
10452     + ret = HV_E_FAIL;
10453     + break;
10454     + }
10455     + syslog(LOG_ERR, "pwrite failed to write %llu bytes: %ld (%s)",
10456     + filesize, (long)bytes_written, strerror(errno));
10457     + }
10458    
10459     - return 0;
10460     + return ret;
10461     }
10462    
10463     static int hv_copy_finished(void)