Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.4/0104-4.4.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2772 - (hide annotations) (download)
Thu Mar 10 14:12:47 2016 UTC (8 years, 2 months ago) by niro
File size: 118843 byte(s)
-linux-4.4.5
1 niro 2772 diff --git a/Makefile b/Makefile
2     index 344bc6f27ea1..d13322ade3a0 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 4
8     -SUBLEVEL = 4
9     +SUBLEVEL = 5
10     EXTRAVERSION =
11     NAME = Blurry Fish Butt
12    
13     diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
14     index 96e935bbc38c..3705fc2921c2 100644
15     --- a/arch/arm/kvm/guest.c
16     +++ b/arch/arm/kvm/guest.c
17     @@ -155,7 +155,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
18     u64 val;
19    
20     val = kvm_arm_timer_get_reg(vcpu, reg->id);
21     - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
22     + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
23     }
24    
25     static unsigned long num_core_regs(void)
26     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
27     index 63f52b55defe..fc9f7ef2f4ab 100644
28     --- a/arch/arm64/include/asm/pgtable.h
29     +++ b/arch/arm64/include/asm/pgtable.h
30     @@ -34,13 +34,13 @@
31     /*
32     * VMALLOC and SPARSEMEM_VMEMMAP ranges.
33     *
34     - * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
35     + * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
36     * (rounded up to PUD_SIZE).
37     * VMALLOC_START: beginning of the kernel VA space
38     * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
39     * fixed mappings and modules
40     */
41     -#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
42     +#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
43    
44     #ifndef CONFIG_KASAN
45     #define VMALLOC_START (VA_START)
46     @@ -51,7 +51,8 @@
47    
48     #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
49    
50     -#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
51     +#define VMEMMAP_START (VMALLOC_END + SZ_64K)
52     +#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
53    
54     #define FIRST_USER_ADDRESS 0UL
55    
56     diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
57     index d250160d32bc..3039f080e2d5 100644
58     --- a/arch/arm64/kvm/guest.c
59     +++ b/arch/arm64/kvm/guest.c
60     @@ -186,7 +186,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
61     u64 val;
62    
63     val = kvm_arm_timer_get_reg(vcpu, reg->id);
64     - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
65     + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
66     }
67    
68     /**
69     diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
70     index 17bf39ac83ba..4cb98aa8c27b 100644
71     --- a/arch/arm64/mm/init.c
72     +++ b/arch/arm64/mm/init.c
73     @@ -319,8 +319,8 @@ void __init mem_init(void)
74     #endif
75     MLG(VMALLOC_START, VMALLOC_END),
76     #ifdef CONFIG_SPARSEMEM_VMEMMAP
77     - MLG((unsigned long)vmemmap,
78     - (unsigned long)vmemmap + VMEMMAP_SIZE),
79     + MLG(VMEMMAP_START,
80     + VMEMMAP_START + VMEMMAP_SIZE),
81     MLM((unsigned long)virt_to_page(PAGE_OFFSET),
82     (unsigned long)virt_to_page(high_memory)),
83     #endif
84     diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
85     index 886cb1976e90..ca9a81007489 100644
86     --- a/arch/mips/kernel/traps.c
87     +++ b/arch/mips/kernel/traps.c
88     @@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
89     asmlinkage void do_ov(struct pt_regs *regs)
90     {
91     enum ctx_state prev_state;
92     - siginfo_t info;
93     + siginfo_t info = {
94     + .si_signo = SIGFPE,
95     + .si_code = FPE_INTOVF,
96     + .si_addr = (void __user *)regs->cp0_epc,
97     + };
98    
99     prev_state = exception_enter();
100     die_if_kernel("Integer overflow", regs);
101    
102     - info.si_code = FPE_INTOVF;
103     - info.si_signo = SIGFPE;
104     - info.si_errno = 0;
105     - info.si_addr = (void __user *) regs->cp0_epc;
106     force_sig_info(SIGFPE, &info, current);
107     exception_exit(prev_state);
108     }
109     @@ -874,7 +874,7 @@ out:
110     void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
111     const char *str)
112     {
113     - siginfo_t info;
114     + siginfo_t info = { 0 };
115     char b[40];
116    
117     #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
118     @@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
119     else
120     info.si_code = FPE_INTOVF;
121     info.si_signo = SIGFPE;
122     - info.si_errno = 0;
123     info.si_addr = (void __user *) regs->cp0_epc;
124     force_sig_info(SIGFPE, &info, current);
125     break;
126     diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
127     index b9b803facdbf..2683d04fdda5 100644
128     --- a/arch/mips/kvm/mips.c
129     +++ b/arch/mips/kvm/mips.c
130     @@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
131     } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
132     void __user *uaddr = (void __user *)(long)reg->addr;
133    
134     - return copy_to_user(uaddr, vs, 16);
135     + return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
136     } else {
137     return -EINVAL;
138     }
139     @@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
140     } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
141     void __user *uaddr = (void __user *)(long)reg->addr;
142    
143     - return copy_from_user(vs, uaddr, 16);
144     + return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
145     } else {
146     return -EINVAL;
147     }
148     diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
149     index 3bd0597d9c3d..ddb8154610cc 100644
150     --- a/arch/mips/mm/sc-mips.c
151     +++ b/arch/mips/mm/sc-mips.c
152     @@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
153    
154     sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
155     sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
156     - c->scache.sets = 64 << sets;
157     + if (sets)
158     + c->scache.sets = 64 << sets;
159    
160     line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
161     line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
162     - c->scache.linesz = 2 << line_sz;
163     + if (line_sz)
164     + c->scache.linesz = 2 << line_sz;
165    
166     assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
167     assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
168     @@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
169     c->scache.waysize = c->scache.sets * c->scache.linesz;
170     c->scache.waybit = __ffs(c->scache.waysize);
171    
172     - c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
173     + if (c->scache.linesz) {
174     + c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
175     + return 1;
176     + }
177    
178     - return 1;
179     + return 0;
180     }
181    
182     void __weak platform_early_l2_init(void)
183     diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
184     index 9585c81f755f..ce0b2b4075c7 100644
185     --- a/arch/parisc/kernel/ptrace.c
186     +++ b/arch/parisc/kernel/ptrace.c
187     @@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
188    
189     long do_syscall_trace_enter(struct pt_regs *regs)
190     {
191     - long ret = 0;
192     -
193     /* Do the secure computing check first. */
194     secure_computing_strict(regs->gr[20]);
195    
196     if (test_thread_flag(TIF_SYSCALL_TRACE) &&
197     - tracehook_report_syscall_entry(regs))
198     - ret = -1L;
199     + tracehook_report_syscall_entry(regs)) {
200     + /*
201     + * Tracing decided this syscall should not happen or the
202     + * debugger stored an invalid system call number. Skip
203     + * the system call and the system call restart handling.
204     + */
205     + regs->gr[20] = -1UL;
206     + goto out;
207     + }
208    
209     #ifdef CONFIG_64BIT
210     if (!is_compat_task())
211     @@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
212     regs->gr[24] & 0xffffffff,
213     regs->gr[23] & 0xffffffff);
214    
215     - return ret ? : regs->gr[20];
216     +out:
217     + return regs->gr[20];
218     }
219    
220     void do_syscall_trace_exit(struct pt_regs *regs)
221     diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
222     index 3fbd7252a4b2..fbafa0d0e2bf 100644
223     --- a/arch/parisc/kernel/syscall.S
224     +++ b/arch/parisc/kernel/syscall.S
225     @@ -343,7 +343,7 @@ tracesys_next:
226     #endif
227    
228     comiclr,>>= __NR_Linux_syscalls, %r20, %r0
229     - b,n .Lsyscall_nosys
230     + b,n .Ltracesys_nosys
231    
232     LDREGX %r20(%r19), %r19
233    
234     @@ -359,6 +359,9 @@ tracesys_next:
235     be 0(%sr7,%r19)
236     ldo R%tracesys_exit(%r2),%r2
237    
238     +.Ltracesys_nosys:
239     + ldo -ENOSYS(%r0),%r28 /* set errno */
240     +
241     /* Do *not* call this function on the gateway page, because it
242     makes a direct call to syscall_trace. */
243    
244     diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
245     index d1daead5fcdd..adb3eaf8fe2a 100644
246     --- a/arch/x86/kernel/acpi/sleep.c
247     +++ b/arch/x86/kernel/acpi/sleep.c
248     @@ -16,6 +16,7 @@
249     #include <asm/cacheflush.h>
250     #include <asm/realmode.h>
251    
252     +#include <linux/ftrace.h>
253     #include "../../realmode/rm/wakeup.h"
254     #include "sleep.h"
255    
256     @@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
257     saved_magic = 0x123456789abcdef0L;
258     #endif /* CONFIG_64BIT */
259    
260     + /*
261     + * Pause/unpause graph tracing around do_suspend_lowlevel as it has
262     + * inconsistent call/return info after it jumps to the wakeup vector.
263     + */
264     + pause_graph_tracing();
265     do_suspend_lowlevel();
266     + unpause_graph_tracing();
267     return 0;
268     }
269    
270     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
271     index 10e7693b3540..5fd846cd6e0e 100644
272     --- a/arch/x86/kvm/vmx.c
273     +++ b/arch/x86/kvm/vmx.c
274     @@ -595,6 +595,8 @@ struct vcpu_vmx {
275     /* Support for PML */
276     #define PML_ENTITY_NUM 512
277     struct page *pml_pg;
278     +
279     + u64 current_tsc_ratio;
280     };
281    
282     enum segment_cache_field {
283     @@ -2062,14 +2064,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
284     rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
285     vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
286    
287     - /* Setup TSC multiplier */
288     - if (cpu_has_vmx_tsc_scaling())
289     - vmcs_write64(TSC_MULTIPLIER,
290     - vcpu->arch.tsc_scaling_ratio);
291     -
292     vmx->loaded_vmcs->cpu = cpu;
293     }
294    
295     + /* Setup TSC multiplier */
296     + if (kvm_has_tsc_control &&
297     + vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
298     + vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
299     + vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
300     + }
301     +
302     vmx_vcpu_pi_load(vcpu, cpu);
303     }
304    
305     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
306     index 6ef3856aab4b..d2945024ed33 100644
307     --- a/arch/x86/kvm/x86.c
308     +++ b/arch/x86/kvm/x86.c
309     @@ -2736,7 +2736,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
310     }
311    
312     kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
313     - vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
314     }
315    
316     void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
317     @@ -6545,12 +6544,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
318     * KVM_DEBUGREG_WONT_EXIT again.
319     */
320     if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
321     - int i;
322     -
323     WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
324     kvm_x86_ops->sync_dirty_debug_regs(vcpu);
325     - for (i = 0; i < KVM_NR_DB_REGS; i++)
326     - vcpu->arch.eff_db[i] = vcpu->arch.db[i];
327     + kvm_update_dr0123(vcpu);
328     + kvm_update_dr6(vcpu);
329     + kvm_update_dr7(vcpu);
330     + vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
331     }
332    
333     /*
334     diff --git a/block/blk-settings.c b/block/blk-settings.c
335     index dd4973583978..c7bb666aafd1 100644
336     --- a/block/blk-settings.c
337     +++ b/block/blk-settings.c
338     @@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
339     lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
340     lim->virt_boundary_mask = 0;
341     lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
342     - lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
343     - BLK_SAFE_MAX_SECTORS;
344     + lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
345     + lim->max_dev_sectors = 0;
346     lim->chunk_sectors = 0;
347     lim->max_write_same_sectors = 0;
348     lim->max_discard_sectors = 0;
349     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
350     index 99921aa0daca..60a15831c009 100644
351     --- a/drivers/ata/ahci.c
352     +++ b/drivers/ata/ahci.c
353     @@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
354     { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
355     { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
356     { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
357     + { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
358     { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
359     + { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
360     { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
361     { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
362     { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
363     { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
364     + { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
365     + { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
366     { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
367     { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
368     { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
369     { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
370     + { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
371     + { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
372    
373     /* JMicron 360/1/3/5/6, match class to avoid IDE function */
374     { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
375     diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
376     index 1f225cc1827f..998c6a85ad89 100644
377     --- a/drivers/ata/libahci.c
378     +++ b/drivers/ata/libahci.c
379     @@ -1142,8 +1142,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
380    
381     /* mark esata ports */
382     tmp = readl(port_mmio + PORT_CMD);
383     - if ((tmp & PORT_CMD_HPCP) ||
384     - ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
385     + if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
386     ap->pflags |= ATA_PFLAG_EXTERNAL;
387     }
388    
389     diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
390     index 7e959f90c020..e417e1a1d02c 100644
391     --- a/drivers/ata/libata-scsi.c
392     +++ b/drivers/ata/libata-scsi.c
393     @@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
394     int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
395     int cmd, void __user *arg)
396     {
397     - int val = -EINVAL, rc = -EINVAL;
398     + unsigned long val;
399     + int rc = -EINVAL;
400     unsigned long flags;
401    
402     switch (cmd) {
403     - case ATA_IOC_GET_IO32:
404     + case HDIO_GET_32BIT:
405     spin_lock_irqsave(ap->lock, flags);
406     val = ata_ioc32(ap);
407     spin_unlock_irqrestore(ap->lock, flags);
408     - if (copy_to_user(arg, &val, 1))
409     - return -EFAULT;
410     - return 0;
411     + return put_user(val, (unsigned long __user *)arg);
412    
413     - case ATA_IOC_SET_IO32:
414     + case HDIO_SET_32BIT:
415     val = (unsigned long) arg;
416     rc = 0;
417     spin_lock_irqsave(ap->lock, flags);
418     diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
419     index 12fe0f3bb7e9..c8b6a780a290 100644
420     --- a/drivers/ata/pata_rb532_cf.c
421     +++ b/drivers/ata/pata_rb532_cf.c
422     @@ -32,6 +32,8 @@
423     #include <linux/libata.h>
424     #include <scsi/scsi_host.h>
425    
426     +#include <asm/mach-rc32434/rb.h>
427     +
428     #define DRV_NAME "pata-rb532-cf"
429     #define DRV_VERSION "0.1.0"
430     #define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
431     @@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
432     int gpio;
433     struct resource *res;
434     struct ata_host *ah;
435     + struct cf_device *pdata;
436     struct rb532_cf_info *info;
437     int ret;
438    
439     @@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
440     return -ENOENT;
441     }
442    
443     - gpio = irq_to_gpio(irq);
444     + pdata = dev_get_platdata(&pdev->dev);
445     + if (!pdata) {
446     + dev_err(&pdev->dev, "no platform data specified\n");
447     + return -EINVAL;
448     + }
449     +
450     + gpio = pdata->gpio_pin;
451     if (gpio < 0) {
452     dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
453     return -ENOENT;
454     diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
455     index fc4156afa070..a59061e4221a 100644
456     --- a/drivers/dma/pxa_dma.c
457     +++ b/drivers/dma/pxa_dma.c
458     @@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
459     (PXA_DCMD_LENGTH & sizeof(u32));
460     if (flags & DMA_PREP_INTERRUPT)
461     updater->dcmd |= PXA_DCMD_ENDIRQEN;
462     + if (sw_desc->cyclic)
463     + sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
464     }
465    
466     static bool is_desc_completed(struct virt_dma_desc *vd)
467     @@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
468     dev_dbg(&chan->vc.chan.dev->device,
469     "%s(): checking txd %p[%x]: completed=%d\n",
470     __func__, vd, vd->tx.cookie, is_desc_completed(vd));
471     + if (to_pxad_sw_desc(vd)->cyclic) {
472     + vchan_cyclic_callback(vd);
473     + break;
474     + }
475     if (is_desc_completed(vd)) {
476     list_del(&vd->node);
477     vchan_cookie_complete(vd);
478     @@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
479     return NULL;
480    
481     pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
482     - dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
483     + dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
484     dev_dbg(&chan->vc.chan.dev->device,
485     "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
486     __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
487     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
488     index 89c3dd62ba21..119cdc2c43e7 100644
489     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
490     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
491     @@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
492     } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
493     /* Don't try to start link training before we
494     * have the dpcd */
495     - if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
496     + if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
497     return;
498    
499     /* set it to OFF so that drm_helper_connector_dpms()
500     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
501     index a2a16acee34d..b8fbbd7699e4 100644
502     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
503     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
504     @@ -33,6 +33,7 @@
505     #include <linux/slab.h>
506     #include <drm/drmP.h>
507     #include <drm/amdgpu_drm.h>
508     +#include <drm/drm_cache.h>
509     #include "amdgpu.h"
510     #include "amdgpu_trace.h"
511    
512     @@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
513     AMDGPU_GEM_DOMAIN_OA);
514    
515     bo->flags = flags;
516     +
517     + /* For architectures that don't support WC memory,
518     + * mask out the WC flag from the BO
519     + */
520     + if (!drm_arch_can_wc_memory())
521     + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
522     +
523     amdgpu_fill_placement_to_bo(bo, placement);
524     /* Kernel allocation are uninterruptible */
525     r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
526     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
527     index 03fe25142b78..7ae15fad16ed 100644
528     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
529     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
530     @@ -596,9 +596,6 @@ force:
531     /* update display watermarks based on new power state */
532     amdgpu_display_bandwidth_update(adev);
533    
534     - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
535     - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
536     -
537     /* wait for the rings to drain */
538     for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
539     struct amdgpu_ring *ring = adev->rings[i];
540     @@ -617,6 +614,9 @@ force:
541     /* update displays */
542     amdgpu_dpm_display_configuration_changed(adev);
543    
544     + adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
545     + adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
546     +
547     if (adev->pm.funcs->force_performance_level) {
548     if (adev->pm.dpm.thermal_active) {
549     enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
550     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
551     index 39adbb6470d1..8c5ec151ddac 100644
552     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
553     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
554     @@ -1248,7 +1248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
555     {
556     const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
557     AMDGPU_VM_PTE_COUNT * 8);
558     - unsigned pd_size, pd_entries, pts_size;
559     + unsigned pd_size, pd_entries;
560     int i, r;
561    
562     for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
563     @@ -1266,8 +1266,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
564     pd_entries = amdgpu_vm_num_pdes(adev);
565    
566     /* allocate page table array */
567     - pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
568     - vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
569     + vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
570     if (vm->page_tables == NULL) {
571     DRM_ERROR("Cannot allocate memory for page table array\n");
572     return -ENOMEM;
573     @@ -1327,7 +1326,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
574    
575     for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
576     amdgpu_bo_unref(&vm->page_tables[i].bo);
577     - kfree(vm->page_tables);
578     + drm_free_large(vm->page_tables);
579    
580     amdgpu_bo_unref(&vm->page_directory);
581     fence_put(vm->page_directory_fence);
582     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
583     index 72793f93e2fc..aa491540ba85 100644
584     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
585     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
586     @@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
587     unsigned vm_id, uint64_t pd_addr)
588     {
589     int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
590     + uint32_t seq = ring->fence_drv.sync_seq;
591     + uint64_t addr = ring->fence_drv.gpu_addr;
592     +
593     + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
594     + amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
595     + WAIT_REG_MEM_FUNCTION(3) | /* equal */
596     + WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
597     + amdgpu_ring_write(ring, addr & 0xfffffffc);
598     + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
599     + amdgpu_ring_write(ring, seq);
600     + amdgpu_ring_write(ring, 0xffffffff);
601     + amdgpu_ring_write(ring, 4); /* poll interval */
602     +
603     if (usepfp) {
604     /* synce CE with ME to prevent CE fetch CEIB before context switch done */
605     amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
606     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
607     index 4cb45f4602aa..d1054034d14b 100644
608     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
609     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
610     @@ -4681,7 +4681,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
611    
612     amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
613     amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
614     - WAIT_REG_MEM_FUNCTION(3))); /* equal */
615     + WAIT_REG_MEM_FUNCTION(3) | /* equal */
616     + WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
617     amdgpu_ring_write(ring, addr & 0xfffffffc);
618     amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
619     amdgpu_ring_write(ring, seq);
620     diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
621     index 7628eb44cce2..3e9cbe398151 100644
622     --- a/drivers/gpu/drm/amd/amdgpu/vi.c
623     +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
624     @@ -1082,10 +1082,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
625     },
626     {
627     .type = AMD_IP_BLOCK_TYPE_GMC,
628     - .major = 8,
629     - .minor = 0,
630     + .major = 7,
631     + .minor = 4,
632     .rev = 0,
633     - .funcs = &gmc_v8_0_ip_funcs,
634     + .funcs = &gmc_v7_0_ip_funcs,
635     },
636     {
637     .type = AMD_IP_BLOCK_TYPE_IH,
638     @@ -1129,10 +1129,10 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
639     },
640     {
641     .type = AMD_IP_BLOCK_TYPE_GMC,
642     - .major = 7,
643     - .minor = 4,
644     + .major = 8,
645     + .minor = 0,
646     .rev = 0,
647     - .funcs = &gmc_v7_0_ip_funcs,
648     + .funcs = &gmc_v8_0_ip_funcs,
649     },
650     {
651     .type = AMD_IP_BLOCK_TYPE_IH,
652     diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
653     index 541a610667ad..e0b4586a26fd 100644
654     --- a/drivers/gpu/drm/ast/ast_main.c
655     +++ b/drivers/gpu/drm/ast/ast_main.c
656     @@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
657     } while (ast_read32(ast, 0x10000) != 0x01);
658     data = ast_read32(ast, 0x10004);
659    
660     - if (data & 0x400)
661     + if (data & 0x40)
662     ast->dram_bus_width = 16;
663     else
664     ast->dram_bus_width = 32;
665     diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
666     index 760e0ce4aa26..a6ad938f44a6 100644
667     --- a/drivers/gpu/drm/i915/i915_drv.c
668     +++ b/drivers/gpu/drm/i915/i915_drv.c
669     @@ -531,7 +531,10 @@ void intel_detect_pch(struct drm_device *dev)
670     dev_priv->pch_type = PCH_SPT;
671     DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
672     WARN_ON(!IS_SKYLAKE(dev));
673     - } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
674     + } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
675     + ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
676     + pch->subsystem_vendor == 0x1af4 &&
677     + pch->subsystem_device == 0x1100)) {
678     dev_priv->pch_type = intel_virt_detect_pch(dev);
679     } else
680     continue;
681     diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
682     index f4af19a0d569..d3ce4da6a6ad 100644
683     --- a/drivers/gpu/drm/i915/i915_drv.h
684     +++ b/drivers/gpu/drm/i915/i915_drv.h
685     @@ -2614,6 +2614,7 @@ struct drm_i915_cmd_table {
686     #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
687     #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
688     #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
689     +#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
690    
691     #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
692     #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
693     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
694     index 2081a60d08fb..1fa81215cea1 100644
695     --- a/drivers/gpu/drm/radeon/radeon_pm.c
696     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
697     @@ -1076,10 +1076,6 @@ force:
698     /* update display watermarks based on new power state */
699     radeon_bandwidth_update(rdev);
700    
701     - rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
702     - rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
703     - rdev->pm.dpm.single_display = single_display;
704     -
705     /* wait for the rings to drain */
706     for (i = 0; i < RADEON_NUM_RINGS; i++) {
707     struct radeon_ring *ring = &rdev->ring[i];
708     @@ -1098,6 +1094,10 @@ force:
709     /* update displays */
710     radeon_dpm_display_configuration_changed(rdev);
711    
712     + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
713     + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
714     + rdev->pm.dpm.single_display = single_display;
715     +
716     if (rdev->asic->dpm.force_performance_level) {
717     if (rdev->pm.dpm.thermal_active) {
718     enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
719     diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
720     index 8e9637eea512..81115abf3c1f 100644
721     --- a/drivers/i2c/busses/i2c-brcmstb.c
722     +++ b/drivers/i2c/busses/i2c-brcmstb.c
723     @@ -562,8 +562,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
724     if (!dev)
725     return -ENOMEM;
726    
727     - dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *),
728     - GFP_KERNEL);
729     + dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
730     if (!dev->bsc_regmap)
731     return -ENOMEM;
732    
733     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
734     index 013bdfff2d4d..bf4959f4225b 100644
735     --- a/drivers/iommu/amd_iommu_init.c
736     +++ b/drivers/iommu/amd_iommu_init.c
737     @@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
738     static int __init iommu_go_to_state(enum iommu_init_state state);
739     static void init_device_table_dma(void);
740    
741     +static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
742     + u8 bank, u8 cntr, u8 fxn,
743     + u64 *value, bool is_write);
744     +
745     static inline void update_last_devid(u16 devid)
746     {
747     if (devid > amd_iommu_last_bdf)
748     @@ -1016,6 +1020,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
749     }
750    
751     /*
752     + * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
753     + * Workaround:
754     + * BIOS should enable ATS write permission check by setting
755     + * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
756     + */
757     +static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
758     +{
759     + u32 value;
760     +
761     + if ((boot_cpu_data.x86 != 0x15) ||
762     + (boot_cpu_data.x86_model < 0x30) ||
763     + (boot_cpu_data.x86_model > 0x3f))
764     + return;
765     +
766     + /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
767     + value = iommu_read_l2(iommu, 0x47);
768     +
769     + if (value & BIT(0))
770     + return;
771     +
772     + /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
773     + iommu_write_l2(iommu, 0x47, value | BIT(0));
774     +
775     + pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
776     + dev_name(&iommu->dev->dev));
777     +}
778     +
779     +/*
780     * This function clues the initialization function for one IOMMU
781     * together and also allocates the command buffer and programs the
782     * hardware. It does NOT enable the IOMMU. This is done afterwards.
783     @@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
784     amd_iommu_pc_present = true;
785    
786     /* Check if the performance counters can be written to */
787     - if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
788     - (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
789     + if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
790     + (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
791     (val != val2)) {
792     pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
793     amd_iommu_pc_present = false;
794     @@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
795     }
796    
797     amd_iommu_erratum_746_workaround(iommu);
798     + amd_iommu_ats_write_check_workaround(iommu);
799    
800     iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
801     amd_iommu_groups, "ivhd%d",
802     @@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
803     }
804     EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
805    
806     -int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
807     +static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
808     + u8 bank, u8 cntr, u8 fxn,
809     u64 *value, bool is_write)
810     {
811     - struct amd_iommu *iommu;
812     u32 offset;
813     u32 max_offset_lim;
814    
815     - /* Make sure the IOMMU PC resource is available */
816     - if (!amd_iommu_pc_present)
817     - return -ENODEV;
818     -
819     - /* Locate the iommu associated with the device ID */
820     - iommu = amd_iommu_rlookup_table[devid];
821     -
822     /* Check for valid iommu and pc register indexing */
823     - if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
824     + if (WARN_ON((fxn > 0x28) || (fxn & 7)))
825     return -ENODEV;
826    
827     offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
828     @@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
829     return 0;
830     }
831     EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
832     +
833     +int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
834     + u64 *value, bool is_write)
835     +{
836     + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
837     +
838     + /* Make sure the IOMMU PC resource is available */
839     + if (!amd_iommu_pc_present || iommu == NULL)
840     + return -ENODEV;
841     +
842     + return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
843     + value, is_write);
844     +}
845     diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
846     index 55a19e49205b..3821c4786662 100644
847     --- a/drivers/iommu/dmar.c
848     +++ b/drivers/iommu/dmar.c
849     @@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
850     /* Only care about add/remove events for physical functions */
851     if (pdev->is_virtfn)
852     return NOTIFY_DONE;
853     - if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE)
854     + if (action != BUS_NOTIFY_ADD_DEVICE &&
855     + action != BUS_NOTIFY_REMOVED_DEVICE)
856     return NOTIFY_DONE;
857    
858     info = dmar_alloc_pci_notify_info(pdev, action);
859     @@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
860     down_write(&dmar_global_lock);
861     if (action == BUS_NOTIFY_ADD_DEVICE)
862     dmar_pci_bus_add_dev(info);
863     - else if (action == BUS_NOTIFY_DEL_DEVICE)
864     + else if (action == BUS_NOTIFY_REMOVED_DEVICE)
865     dmar_pci_bus_del_dev(info);
866     up_write(&dmar_global_lock);
867    
868     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
869     index 986a53e3eb96..a2e1b7f14df2 100644
870     --- a/drivers/iommu/intel-iommu.c
871     +++ b/drivers/iommu/intel-iommu.c
872     @@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
873     rmrru->devices_cnt);
874     if(ret < 0)
875     return ret;
876     - } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
877     + } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
878     dmar_remove_dev_scope(info, rmrr->segment,
879     rmrru->devices, rmrru->devices_cnt);
880     }
881     @@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
882     break;
883     else if(ret < 0)
884     return ret;
885     - } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
886     + } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
887     if (dmar_remove_dev_scope(info, atsr->segment,
888     atsru->devices, atsru->devices_cnt))
889     break;
890     diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
891     index 5631ec004eed..01adcdc52346 100644
892     --- a/drivers/media/i2c/adv7604.c
893     +++ b/drivers/media/i2c/adv7604.c
894     @@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
895     }
896    
897     /* tx 5v detect */
898     - tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
899     + tx_5v = irq_reg_0x70 & info->cable_det_mask;
900     if (tx_5v) {
901     v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
902     - io_write(sd, 0x71, tx_5v);
903     adv76xx_s_detect_tx_5v_ctrl(sd);
904     if (handled)
905     *handled = true;
906     diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
907     index 85761d7eb333..be2c8e248e2e 100644
908     --- a/drivers/misc/cxl/pci.c
909     +++ b/drivers/misc/cxl/pci.c
910     @@ -414,7 +414,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
911     delta = mftb() - psl_tb;
912     if (delta < 0)
913     delta = -delta;
914     - } while (cputime_to_usecs(delta) > 16);
915     + } while (tb_to_ns(delta) > 16000);
916    
917     return 0;
918     }
919     diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
920     index 2a1b6e037e1a..0134ba32a057 100644
921     --- a/drivers/mtd/ubi/upd.c
922     +++ b/drivers/mtd/ubi/upd.c
923     @@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
924     vol->changing_leb = 1;
925     vol->ch_lnum = req->lnum;
926    
927     - vol->upd_buf = vmalloc(req->bytes);
928     + vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
929     if (!vol->upd_buf)
930     return -ENOMEM;
931    
932     diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
933     index ed34c9520a02..6153853ca9c3 100644
934     --- a/drivers/pci/host/pci-keystone-dw.c
935     +++ b/drivers/pci/host/pci-keystone-dw.c
936     @@ -58,11 +58,6 @@
937    
938     #define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
939    
940     -static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
941     -{
942     - return sys->private_data;
943     -}
944     -
945     static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
946     u32 *bit_pos)
947     {
948     @@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
949     struct pcie_port *pp;
950    
951     msi = irq_data_get_msi_desc(d);
952     - pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
953     + pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
954     ks_pcie = to_keystone_pcie(pp);
955     offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
956     update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
957     @@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
958     u32 offset;
959    
960     msi = irq_data_get_msi_desc(d);
961     - pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
962     + pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
963     ks_pcie = to_keystone_pcie(pp);
964     offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
965    
966     @@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
967     u32 offset;
968    
969     msi = irq_data_get_msi_desc(d);
970     - pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
971     + pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
972     ks_pcie = to_keystone_pcie(pp);
973     offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
974    
975     diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
976     index 91a003011acf..a9bac3bf20de 100644
977     --- a/drivers/sh/pm_runtime.c
978     +++ b/drivers/sh/pm_runtime.c
979     @@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
980    
981     static int __init sh_pm_runtime_init(void)
982     {
983     - if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
984     + if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
985     if (!of_find_compatible_node(NULL, NULL,
986     "renesas,cpg-mstp-clocks"))
987     return 0;
988     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
989     index 88ea4e4f124b..3436a83568ea 100644
990     --- a/drivers/target/target_core_device.c
991     +++ b/drivers/target/target_core_device.c
992     @@ -826,6 +826,49 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
993     return dev;
994     }
995    
996     +/*
997     + * Check if the underlying struct block_device request_queue supports
998     + * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
999     + * in ATA and we need to set TPE=1
1000     + */
1001     +bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
1002     + struct request_queue *q, int block_size)
1003     +{
1004     + if (!blk_queue_discard(q))
1005     + return false;
1006     +
1007     + attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
1008     + block_size;
1009     + /*
1010     + * Currently hardcoded to 1 in Linux/SCSI code..
1011     + */
1012     + attrib->max_unmap_block_desc_count = 1;
1013     + attrib->unmap_granularity = q->limits.discard_granularity / block_size;
1014     + attrib->unmap_granularity_alignment = q->limits.discard_alignment /
1015     + block_size;
1016     + return true;
1017     +}
1018     +EXPORT_SYMBOL(target_configure_unmap_from_queue);
1019     +
1020     +/*
1021     + * Convert from blocksize advertised to the initiator to the 512 byte
1022     + * units unconditionally used by the Linux block layer.
1023     + */
1024     +sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
1025     +{
1026     + switch (dev->dev_attrib.block_size) {
1027     + case 4096:
1028     + return lb << 3;
1029     + case 2048:
1030     + return lb << 2;
1031     + case 1024:
1032     + return lb << 1;
1033     + default:
1034     + return lb;
1035     + }
1036     +}
1037     +EXPORT_SYMBOL(target_to_linux_sector);
1038     +
1039     int target_configure_device(struct se_device *dev)
1040     {
1041     struct se_hba *hba = dev->se_hba;
1042     diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
1043     index e3195700211a..75f0f08b2a34 100644
1044     --- a/drivers/target/target_core_file.c
1045     +++ b/drivers/target/target_core_file.c
1046     @@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev)
1047     " block_device blocks: %llu logical_block_size: %d\n",
1048     dev_size, div_u64(dev_size, fd_dev->fd_block_size),
1049     fd_dev->fd_block_size);
1050     - /*
1051     - * Check if the underlying struct block_device request_queue supports
1052     - * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
1053     - * in ATA and we need to set TPE=1
1054     - */
1055     - if (blk_queue_discard(q)) {
1056     - dev->dev_attrib.max_unmap_lba_count =
1057     - q->limits.max_discard_sectors;
1058     - /*
1059     - * Currently hardcoded to 1 in Linux/SCSI code..
1060     - */
1061     - dev->dev_attrib.max_unmap_block_desc_count = 1;
1062     - dev->dev_attrib.unmap_granularity =
1063     - q->limits.discard_granularity >> 9;
1064     - dev->dev_attrib.unmap_granularity_alignment =
1065     - q->limits.discard_alignment;
1066     +
1067     + if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
1068     + fd_dev->fd_block_size))
1069     pr_debug("IFILE: BLOCK Discard support available,"
1070     - " disabled by default\n");
1071     - }
1072     + " disabled by default\n");
1073     /*
1074     * Enable write same emulation for IBLOCK and use 0xFFFF as
1075     * the smaller WRITE_SAME(10) only has a two-byte block count.
1076     @@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
1077     if (S_ISBLK(inode->i_mode)) {
1078     /* The backend is block device, use discard */
1079     struct block_device *bdev = inode->i_bdev;
1080     + struct se_device *dev = cmd->se_dev;
1081    
1082     - ret = blkdev_issue_discard(bdev, lba,
1083     - nolb, GFP_KERNEL, 0);
1084     + ret = blkdev_issue_discard(bdev,
1085     + target_to_linux_sector(dev, lba),
1086     + target_to_linux_sector(dev, nolb),
1087     + GFP_KERNEL, 0);
1088     if (ret < 0) {
1089     pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
1090     ret);
1091     diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
1092     index f29c69120054..2c53dcefff3e 100644
1093     --- a/drivers/target/target_core_iblock.c
1094     +++ b/drivers/target/target_core_iblock.c
1095     @@ -121,27 +121,11 @@ static int iblock_configure_device(struct se_device *dev)
1096     dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
1097     dev->dev_attrib.hw_queue_depth = q->nr_requests;
1098    
1099     - /*
1100     - * Check if the underlying struct block_device request_queue supports
1101     - * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
1102     - * in ATA and we need to set TPE=1
1103     - */
1104     - if (blk_queue_discard(q)) {
1105     - dev->dev_attrib.max_unmap_lba_count =
1106     - q->limits.max_discard_sectors;
1107     -
1108     - /*
1109     - * Currently hardcoded to 1 in Linux/SCSI code..
1110     - */
1111     - dev->dev_attrib.max_unmap_block_desc_count = 1;
1112     - dev->dev_attrib.unmap_granularity =
1113     - q->limits.discard_granularity >> 9;
1114     - dev->dev_attrib.unmap_granularity_alignment =
1115     - q->limits.discard_alignment;
1116     -
1117     + if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
1118     + dev->dev_attrib.hw_block_size))
1119     pr_debug("IBLOCK: BLOCK Discard support available,"
1120     - " disabled by default\n");
1121     - }
1122     + " disabled by default\n");
1123     +
1124     /*
1125     * Enable write same emulation for IBLOCK and use 0xFFFF as
1126     * the smaller WRITE_SAME(10) only has a two-byte block count.
1127     @@ -413,9 +397,13 @@ static sense_reason_t
1128     iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
1129     {
1130     struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
1131     + struct se_device *dev = cmd->se_dev;
1132     int ret;
1133    
1134     - ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
1135     + ret = blkdev_issue_discard(bdev,
1136     + target_to_linux_sector(dev, lba),
1137     + target_to_linux_sector(dev, nolb),
1138     + GFP_KERNEL, 0);
1139     if (ret < 0) {
1140     pr_err("blkdev_issue_discard() failed: %d\n", ret);
1141     return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1142     @@ -431,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
1143     struct scatterlist *sg;
1144     struct bio *bio;
1145     struct bio_list list;
1146     - sector_t block_lba = cmd->t_task_lba;
1147     - sector_t sectors = sbc_get_write_same_sectors(cmd);
1148     + struct se_device *dev = cmd->se_dev;
1149     + sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
1150     + sector_t sectors = target_to_linux_sector(dev,
1151     + sbc_get_write_same_sectors(cmd));
1152    
1153     if (cmd->prot_op) {
1154     pr_err("WRITE_SAME: Protection information with IBLOCK"
1155     @@ -646,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
1156     enum dma_data_direction data_direction)
1157     {
1158     struct se_device *dev = cmd->se_dev;
1159     + sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
1160     struct iblock_req *ibr;
1161     struct bio *bio, *bio_start;
1162     struct bio_list list;
1163     struct scatterlist *sg;
1164     u32 sg_num = sgl_nents;
1165     - sector_t block_lba;
1166     unsigned bio_cnt;
1167     int rw = 0;
1168     int i;
1169     @@ -677,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
1170     rw = READ;
1171     }
1172    
1173     - /*
1174     - * Convert the blocksize advertised to the initiator to the 512 byte
1175     - * units unconditionally used by the Linux block layer.
1176     - */
1177     - if (dev->dev_attrib.block_size == 4096)
1178     - block_lba = (cmd->t_task_lba << 3);
1179     - else if (dev->dev_attrib.block_size == 2048)
1180     - block_lba = (cmd->t_task_lba << 2);
1181     - else if (dev->dev_attrib.block_size == 1024)
1182     - block_lba = (cmd->t_task_lba << 1);
1183     - else if (dev->dev_attrib.block_size == 512)
1184     - block_lba = cmd->t_task_lba;
1185     - else {
1186     - pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
1187     - " %u\n", dev->dev_attrib.block_size);
1188     - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1189     - }
1190     -
1191     ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
1192     if (!ibr)
1193     goto fail;
1194     diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
1195     index e3fbc5a5d88f..6ceac4f2d4b2 100644
1196     --- a/drivers/thermal/cpu_cooling.c
1197     +++ b/drivers/thermal/cpu_cooling.c
1198     @@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
1199     * get_load() - get load for a cpu since last updated
1200     * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
1201     * @cpu: cpu number
1202     + * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
1203     *
1204     * Return: The average load of cpu @cpu in percentage since this
1205     * function was last called.
1206     */
1207     -static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu)
1208     +static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
1209     + int cpu_idx)
1210     {
1211     u32 load;
1212     u64 now, now_idle, delta_time, delta_idle;
1213    
1214     now_idle = get_cpu_idle_time(cpu, &now, 0);
1215     - delta_idle = now_idle - cpufreq_device->time_in_idle[cpu];
1216     - delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu];
1217     + delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
1218     + delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
1219    
1220     if (delta_time <= delta_idle)
1221     load = 0;
1222     else
1223     load = div64_u64(100 * (delta_time - delta_idle), delta_time);
1224    
1225     - cpufreq_device->time_in_idle[cpu] = now_idle;
1226     - cpufreq_device->time_in_idle_timestamp[cpu] = now;
1227     + cpufreq_device->time_in_idle[cpu_idx] = now_idle;
1228     + cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
1229    
1230     return load;
1231     }
1232     @@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
1233     u32 load;
1234    
1235     if (cpu_online(cpu))
1236     - load = get_load(cpufreq_device, cpu);
1237     + load = get_load(cpufreq_device, cpu, i);
1238     else
1239     load = 0;
1240    
1241     diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
1242     index 45f86da1d6d3..03b6743461d1 100644
1243     --- a/drivers/usb/chipidea/otg.c
1244     +++ b/drivers/usb/chipidea/otg.c
1245     @@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
1246     int ci_hdrc_otg_init(struct ci_hdrc *ci)
1247     {
1248     INIT_WORK(&ci->work, ci_otg_work);
1249     - ci->wq = create_singlethread_workqueue("ci_otg");
1250     + ci->wq = create_freezable_workqueue("ci_otg");
1251     if (!ci->wq) {
1252     dev_err(ci->dev, "can't create workqueue\n");
1253     return -ENODEV;
1254     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1255     index a7caf53d8b5e..7a76fe4c2f9e 100644
1256     --- a/drivers/usb/serial/cp210x.c
1257     +++ b/drivers/usb/serial/cp210x.c
1258     @@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = {
1259     { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
1260     { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
1261     { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
1262     + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
1263     { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
1264     { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
1265     { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
1266     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1267     index 8849439a8f18..348e19834b83 100644
1268     --- a/drivers/usb/serial/option.c
1269     +++ b/drivers/usb/serial/option.c
1270     @@ -270,6 +270,7 @@ static void option_instat_callback(struct urb *urb);
1271     #define TELIT_PRODUCT_UE910_V2 0x1012
1272     #define TELIT_PRODUCT_LE922_USBCFG0 0x1042
1273     #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
1274     +#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
1275     #define TELIT_PRODUCT_LE920 0x1200
1276     #define TELIT_PRODUCT_LE910 0x1201
1277    
1278     @@ -1132,6 +1133,8 @@ static const struct usb_device_id option_ids[] = {
1279     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1280     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1281     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1282     + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
1283     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1284     { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1285     { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1286     { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
1287     @@ -1183,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
1288     .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1289     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
1290     .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1291     + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1292     + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1293     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1294     .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1295     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
1296     diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1297     index 9919d2a9faf2..1bc6089b9008 100644
1298     --- a/drivers/usb/serial/qcserial.c
1299     +++ b/drivers/usb/serial/qcserial.c
1300     @@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
1301     {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
1302     {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
1303     {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
1304     - {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */
1305     - {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */
1306     + {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
1307     + {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
1308     + {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
1309     + {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
1310     {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
1311     {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
1312     {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
1313     {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
1314     {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
1315     {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
1316     + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1317    
1318     /* Huawei devices */
1319     {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
1320     diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
1321     index 56bf6dbb93db..9982cb176ce8 100644
1322     --- a/drivers/vfio/pci/vfio_pci.c
1323     +++ b/drivers/vfio/pci/vfio_pci.c
1324     @@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
1325     info.num_regions = VFIO_PCI_NUM_REGIONS;
1326     info.num_irqs = VFIO_PCI_NUM_IRQS;
1327    
1328     - return copy_to_user((void __user *)arg, &info, minsz);
1329     + return copy_to_user((void __user *)arg, &info, minsz) ?
1330     + -EFAULT : 0;
1331    
1332     } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1333     struct pci_dev *pdev = vdev->pdev;
1334     @@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
1335     return -EINVAL;
1336     }
1337    
1338     - return copy_to_user((void __user *)arg, &info, minsz);
1339     + return copy_to_user((void __user *)arg, &info, minsz) ?
1340     + -EFAULT : 0;
1341    
1342     } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1343     struct vfio_irq_info info;
1344     @@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
1345     else
1346     info.flags |= VFIO_IRQ_INFO_NORESIZE;
1347    
1348     - return copy_to_user((void __user *)arg, &info, minsz);
1349     + return copy_to_user((void __user *)arg, &info, minsz) ?
1350     + -EFAULT : 0;
1351    
1352     } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1353     struct vfio_irq_set hdr;
1354     diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
1355     index 418cdd9ba3f4..e65b142d3422 100644
1356     --- a/drivers/vfio/platform/vfio_platform_common.c
1357     +++ b/drivers/vfio/platform/vfio_platform_common.c
1358     @@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
1359     info.num_regions = vdev->num_regions;
1360     info.num_irqs = vdev->num_irqs;
1361    
1362     - return copy_to_user((void __user *)arg, &info, minsz);
1363     + return copy_to_user((void __user *)arg, &info, minsz) ?
1364     + -EFAULT : 0;
1365    
1366     } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1367     struct vfio_region_info info;
1368     @@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
1369     info.size = vdev->regions[info.index].size;
1370     info.flags = vdev->regions[info.index].flags;
1371    
1372     - return copy_to_user((void __user *)arg, &info, minsz);
1373     + return copy_to_user((void __user *)arg, &info, minsz) ?
1374     + -EFAULT : 0;
1375    
1376     } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1377     struct vfio_irq_info info;
1378     @@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
1379     info.flags = vdev->irqs[info.index].flags;
1380     info.count = vdev->irqs[info.index].count;
1381    
1382     - return copy_to_user((void __user *)arg, &info, minsz);
1383     + return copy_to_user((void __user *)arg, &info, minsz) ?
1384     + -EFAULT : 0;
1385    
1386     } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1387     struct vfio_irq_set hdr;
1388     diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
1389     index 59d47cb638d5..ecb826eefe02 100644
1390     --- a/drivers/vfio/vfio_iommu_type1.c
1391     +++ b/drivers/vfio/vfio_iommu_type1.c
1392     @@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
1393    
1394     info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
1395    
1396     - return copy_to_user((void __user *)arg, &info, minsz);
1397     + return copy_to_user((void __user *)arg, &info, minsz) ?
1398     + -EFAULT : 0;
1399    
1400     } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1401     struct vfio_iommu_type1_dma_map map;
1402     @@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
1403     if (ret)
1404     return ret;
1405    
1406     - return copy_to_user((void __user *)arg, &unmap, minsz);
1407     + return copy_to_user((void __user *)arg, &unmap, minsz) ?
1408     + -EFAULT : 0;
1409     }
1410    
1411     return -ENOTTY;
1412     diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
1413     index 92f394927f24..6e92917ba77a 100644
1414     --- a/drivers/video/console/fbcon.c
1415     +++ b/drivers/video/console/fbcon.c
1416     @@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
1417     }
1418    
1419     if (!err) {
1420     + ops->cur_blink_jiffies = HZ / 5;
1421     info->fbcon_par = ops;
1422    
1423     if (vc)
1424     @@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
1425     ops->currcon = -1;
1426     ops->graphics = 1;
1427     ops->cur_rotate = -1;
1428     + ops->cur_blink_jiffies = HZ / 5;
1429     info->fbcon_par = ops;
1430     p->con_rotate = initial_rotation;
1431     set_blitting_type(vc, info);
1432     diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
1433     index 3e36e4adc4a3..9aba42b78253 100644
1434     --- a/fs/btrfs/async-thread.c
1435     +++ b/fs/btrfs/async-thread.c
1436     @@ -328,8 +328,8 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
1437     list_add_tail(&work->ordered_list, &wq->ordered_list);
1438     spin_unlock_irqrestore(&wq->list_lock, flags);
1439     }
1440     - queue_work(wq->normal_wq, &work->normal_work);
1441     trace_btrfs_work_queued(work);
1442     + queue_work(wq->normal_wq, &work->normal_work);
1443     }
1444    
1445     void btrfs_queue_work(struct btrfs_workqueue *wq,
1446     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
1447     index 35489e7129a7..385b449fd7ed 100644
1448     --- a/fs/btrfs/ctree.h
1449     +++ b/fs/btrfs/ctree.h
1450     @@ -1572,7 +1572,7 @@ struct btrfs_fs_info {
1451    
1452     spinlock_t delayed_iput_lock;
1453     struct list_head delayed_iputs;
1454     - struct rw_semaphore delayed_iput_sem;
1455     + struct mutex cleaner_delayed_iput_mutex;
1456    
1457     /* this protects tree_mod_seq_list */
1458     spinlock_t tree_mod_seq_lock;
1459     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1460     index 4958360a44f7..41fb43183406 100644
1461     --- a/fs/btrfs/disk-io.c
1462     +++ b/fs/btrfs/disk-io.c
1463     @@ -1796,7 +1796,10 @@ static int cleaner_kthread(void *arg)
1464     goto sleep;
1465     }
1466    
1467     + mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
1468     btrfs_run_delayed_iputs(root);
1469     + mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
1470     +
1471     again = btrfs_clean_one_deleted_snapshot(root);
1472     mutex_unlock(&root->fs_info->cleaner_mutex);
1473    
1474     @@ -2556,8 +2559,8 @@ int open_ctree(struct super_block *sb,
1475     mutex_init(&fs_info->delete_unused_bgs_mutex);
1476     mutex_init(&fs_info->reloc_mutex);
1477     mutex_init(&fs_info->delalloc_root_mutex);
1478     + mutex_init(&fs_info->cleaner_delayed_iput_mutex);
1479     seqlock_init(&fs_info->profiles_lock);
1480     - init_rwsem(&fs_info->delayed_iput_sem);
1481    
1482     INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1483     INIT_LIST_HEAD(&fs_info->space_info);
1484     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
1485     index c4661db2b72a..2368cac1115a 100644
1486     --- a/fs/btrfs/extent-tree.c
1487     +++ b/fs/btrfs/extent-tree.c
1488     @@ -4086,8 +4086,10 @@ commit_trans:
1489     !atomic_read(&root->fs_info->open_ioctl_trans)) {
1490     need_commit--;
1491    
1492     - if (need_commit > 0)
1493     + if (need_commit > 0) {
1494     + btrfs_start_delalloc_roots(fs_info, 0, -1);
1495     btrfs_wait_ordered_roots(fs_info, -1);
1496     + }
1497    
1498     trans = btrfs_join_transaction(root);
1499     if (IS_ERR(trans))
1500     @@ -4100,11 +4102,12 @@ commit_trans:
1501     if (ret)
1502     return ret;
1503     /*
1504     - * make sure that all running delayed iput are
1505     - * done
1506     + * The cleaner kthread might still be doing iput
1507     + * operations. Wait for it to finish so that
1508     + * more space is released.
1509     */
1510     - down_write(&root->fs_info->delayed_iput_sem);
1511     - up_write(&root->fs_info->delayed_iput_sem);
1512     + mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
1513     + mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
1514     goto again;
1515     } else {
1516     btrfs_end_transaction(trans, root);
1517     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1518     index 52fc1b5e9f03..4bc9dbf29a73 100644
1519     --- a/fs/btrfs/inode.c
1520     +++ b/fs/btrfs/inode.c
1521     @@ -3142,8 +3142,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
1522     if (empty)
1523     return;
1524    
1525     - down_read(&fs_info->delayed_iput_sem);
1526     -
1527     spin_lock(&fs_info->delayed_iput_lock);
1528     list_splice_init(&fs_info->delayed_iputs, &list);
1529     spin_unlock(&fs_info->delayed_iput_lock);
1530     @@ -3154,8 +3152,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
1531     iput(delayed->inode);
1532     kfree(delayed);
1533     }
1534     -
1535     - up_read(&root->fs_info->delayed_iput_sem);
1536     }
1537    
1538     /*
1539     diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
1540     index 7cf8509deda7..2c849b08a91b 100644
1541     --- a/fs/btrfs/root-tree.c
1542     +++ b/fs/btrfs/root-tree.c
1543     @@ -310,8 +310,16 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
1544     set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
1545    
1546     err = btrfs_insert_fs_root(root->fs_info, root);
1547     + /*
1548     + * The root might have been inserted already, as before we look
1549     + * for orphan roots, log replay might have happened, which
1550     + * triggers a transaction commit and qgroup accounting, which
1551     + * in turn reads and inserts fs roots while doing backref
1552     + * walking.
1553     + */
1554     + if (err == -EEXIST)
1555     + err = 0;
1556     if (err) {
1557     - BUG_ON(err == -EEXIST);
1558     btrfs_free_fs_root(root);
1559     break;
1560     }
1561     diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
1562     index c3cc1609025f..44b3d4280abb 100644
1563     --- a/fs/cifs/cifsfs.h
1564     +++ b/fs/cifs/cifsfs.h
1565     @@ -31,19 +31,15 @@
1566     * so that it will fit. We use hash_64 to convert the value to 31 bits, and
1567     * then add 1, to ensure that we don't end up with a 0 as the value.
1568     */
1569     -#if BITS_PER_LONG == 64
1570     static inline ino_t
1571     cifs_uniqueid_to_ino_t(u64 fileid)
1572     {
1573     + if ((sizeof(ino_t)) < (sizeof(u64)))
1574     + return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
1575     +
1576     return (ino_t)fileid;
1577     +
1578     }
1579     -#else
1580     -static inline ino_t
1581     -cifs_uniqueid_to_ino_t(u64 fileid)
1582     -{
1583     - return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
1584     -}
1585     -#endif
1586    
1587     extern struct file_system_type cifs_fs_type;
1588     extern const struct address_space_operations cifs_addr_ops;
1589     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
1590     index 90b4f9f7de66..76fcb50295a3 100644
1591     --- a/fs/cifs/cifssmb.c
1592     +++ b/fs/cifs/cifssmb.c
1593     @@ -1396,11 +1396,10 @@ openRetry:
1594     * current bigbuf.
1595     */
1596     static int
1597     -cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1598     +discard_remaining_data(struct TCP_Server_Info *server)
1599     {
1600     unsigned int rfclen = get_rfc1002_length(server->smallbuf);
1601     int remaining = rfclen + 4 - server->total_read;
1602     - struct cifs_readdata *rdata = mid->callback_data;
1603    
1604     while (remaining > 0) {
1605     int length;
1606     @@ -1414,10 +1413,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1607     remaining -= length;
1608     }
1609    
1610     - dequeue_mid(mid, rdata->result);
1611     return 0;
1612     }
1613    
1614     +static int
1615     +cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1616     +{
1617     + int length;
1618     + struct cifs_readdata *rdata = mid->callback_data;
1619     +
1620     + length = discard_remaining_data(server);
1621     + dequeue_mid(mid, rdata->result);
1622     + return length;
1623     +}
1624     +
1625     int
1626     cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1627     {
1628     @@ -1446,6 +1455,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1629     return length;
1630     server->total_read += length;
1631    
1632     + if (server->ops->is_status_pending &&
1633     + server->ops->is_status_pending(buf, server, 0)) {
1634     + discard_remaining_data(server);
1635     + return -1;
1636     + }
1637     +
1638     /* Was the SMB read successful? */
1639     rdata->result = server->ops->map_error(buf, false);
1640     if (rdata->result != 0) {
1641     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1642     index 767555518d40..373b5cd1c913 100644
1643     --- a/fs/cifs/smb2pdu.c
1644     +++ b/fs/cifs/smb2pdu.c
1645     @@ -1109,21 +1109,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
1646     {
1647     char *data_offset;
1648     struct create_context *cc;
1649     - unsigned int next = 0;
1650     + unsigned int next;
1651     + unsigned int remaining;
1652     char *name;
1653    
1654     data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
1655     + remaining = le32_to_cpu(rsp->CreateContextsLength);
1656     cc = (struct create_context *)data_offset;
1657     - do {
1658     - cc = (struct create_context *)((char *)cc + next);
1659     + while (remaining >= sizeof(struct create_context)) {
1660     name = le16_to_cpu(cc->NameOffset) + (char *)cc;
1661     - if (le16_to_cpu(cc->NameLength) != 4 ||
1662     - strncmp(name, "RqLs", 4)) {
1663     - next = le32_to_cpu(cc->Next);
1664     - continue;
1665     - }
1666     - return server->ops->parse_lease_buf(cc, epoch);
1667     - } while (next != 0);
1668     + if (le16_to_cpu(cc->NameLength) == 4 &&
1669     + strncmp(name, "RqLs", 4) == 0)
1670     + return server->ops->parse_lease_buf(cc, epoch);
1671     +
1672     + next = le32_to_cpu(cc->Next);
1673     + if (!next)
1674     + break;
1675     + remaining -= next;
1676     + cc = (struct create_context *)((char *)cc + next);
1677     + }
1678    
1679     return 0;
1680     }
1681     diff --git a/fs/dcache.c b/fs/dcache.c
1682     index 5c33aeb0f68f..877bcbbd03ff 100644
1683     --- a/fs/dcache.c
1684     +++ b/fs/dcache.c
1685     @@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry)
1686     return dentry->d_name.name != dentry->d_iname;
1687     }
1688    
1689     -/*
1690     - * Make sure other CPUs see the inode attached before the type is set.
1691     - */
1692     static inline void __d_set_inode_and_type(struct dentry *dentry,
1693     struct inode *inode,
1694     unsigned type_flags)
1695     @@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
1696     unsigned flags;
1697    
1698     dentry->d_inode = inode;
1699     - smp_wmb();
1700     flags = READ_ONCE(dentry->d_flags);
1701     flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
1702     flags |= type_flags;
1703     WRITE_ONCE(dentry->d_flags, flags);
1704     }
1705    
1706     -/*
1707     - * Ideally, we want to make sure that other CPUs see the flags cleared before
1708     - * the inode is detached, but this is really a violation of RCU principles
1709     - * since the ordering suggests we should always set inode before flags.
1710     - *
1711     - * We should instead replace or discard the entire dentry - but that sucks
1712     - * performancewise on mass deletion/rename.
1713     - */
1714     static inline void __d_clear_type_and_inode(struct dentry *dentry)
1715     {
1716     unsigned flags = READ_ONCE(dentry->d_flags);
1717    
1718     flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
1719     WRITE_ONCE(dentry->d_flags, flags);
1720     - smp_wmb();
1721     dentry->d_inode = NULL;
1722     }
1723    
1724     @@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
1725     __releases(dentry->d_inode->i_lock)
1726     {
1727     struct inode *inode = dentry->d_inode;
1728     +
1729     + raw_write_seqcount_begin(&dentry->d_seq);
1730     __d_clear_type_and_inode(dentry);
1731     hlist_del_init(&dentry->d_u.d_alias);
1732     - dentry_rcuwalk_invalidate(dentry);
1733     + raw_write_seqcount_end(&dentry->d_seq);
1734     spin_unlock(&dentry->d_lock);
1735     spin_unlock(&inode->i_lock);
1736     if (!inode->i_nlink)
1737     @@ -1757,8 +1746,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1738     spin_lock(&dentry->d_lock);
1739     if (inode)
1740     hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1741     + raw_write_seqcount_begin(&dentry->d_seq);
1742     __d_set_inode_and_type(dentry, inode, add_flags);
1743     - dentry_rcuwalk_invalidate(dentry);
1744     + raw_write_seqcount_end(&dentry->d_seq);
1745     spin_unlock(&dentry->d_lock);
1746     fsnotify_d_instantiate(dentry, inode);
1747     }
1748     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
1749     index e5232bbcbe3d..7a8ea1351584 100644
1750     --- a/fs/fs-writeback.c
1751     +++ b/fs/fs-writeback.c
1752     @@ -223,6 +223,9 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi,
1753     #define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
1754     /* one round can affect upto 5 slots */
1755    
1756     +static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
1757     +static struct workqueue_struct *isw_wq;
1758     +
1759     void __inode_attach_wb(struct inode *inode, struct page *page)
1760     {
1761     struct backing_dev_info *bdi = inode_to_bdi(inode);
1762     @@ -317,7 +320,6 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
1763     struct inode_switch_wbs_context *isw =
1764     container_of(work, struct inode_switch_wbs_context, work);
1765     struct inode *inode = isw->inode;
1766     - struct super_block *sb = inode->i_sb;
1767     struct address_space *mapping = inode->i_mapping;
1768     struct bdi_writeback *old_wb = inode->i_wb;
1769     struct bdi_writeback *new_wb = isw->new_wb;
1770     @@ -424,8 +426,9 @@ skip_switch:
1771     wb_put(new_wb);
1772    
1773     iput(inode);
1774     - deactivate_super(sb);
1775     kfree(isw);
1776     +
1777     + atomic_dec(&isw_nr_in_flight);
1778     }
1779    
1780     static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
1781     @@ -435,7 +438,7 @@ static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
1782    
1783     /* needs to grab bh-unsafe locks, bounce to work item */
1784     INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
1785     - schedule_work(&isw->work);
1786     + queue_work(isw_wq, &isw->work);
1787     }
1788    
1789     /**
1790     @@ -471,20 +474,20 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
1791    
1792     /* while holding I_WB_SWITCH, no one else can update the association */
1793     spin_lock(&inode->i_lock);
1794     -
1795     - if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
1796     - inode_to_wb(inode) == isw->new_wb)
1797     - goto out_unlock;
1798     -
1799     - if (!atomic_inc_not_zero(&inode->i_sb->s_active))
1800     - goto out_unlock;
1801     -
1802     + if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
1803     + inode->i_state & (I_WB_SWITCH | I_FREEING) ||
1804     + inode_to_wb(inode) == isw->new_wb) {
1805     + spin_unlock(&inode->i_lock);
1806     + goto out_free;
1807     + }
1808     inode->i_state |= I_WB_SWITCH;
1809     spin_unlock(&inode->i_lock);
1810    
1811     ihold(inode);
1812     isw->inode = inode;
1813    
1814     + atomic_inc(&isw_nr_in_flight);
1815     +
1816     /*
1817     * In addition to synchronizing among switchers, I_WB_SWITCH tells
1818     * the RCU protected stat update paths to grab the mapping's
1819     @@ -494,8 +497,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
1820     call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
1821     return;
1822    
1823     -out_unlock:
1824     - spin_unlock(&inode->i_lock);
1825     out_free:
1826     if (isw->new_wb)
1827     wb_put(isw->new_wb);
1828     @@ -849,6 +850,33 @@ restart:
1829     wb_put(last_wb);
1830     }
1831    
1832     +/**
1833     + * cgroup_writeback_umount - flush inode wb switches for umount
1834     + *
1835     + * This function is called when a super_block is about to be destroyed and
1836     + * flushes in-flight inode wb switches. An inode wb switch goes through
1837     + * RCU and then workqueue, so the two need to be flushed in order to ensure
1838     + * that all previously scheduled switches are finished. As wb switches are
1839     + * rare occurrences and synchronize_rcu() can take a while, perform
1840     + * flushing iff wb switches are in flight.
1841     + */
1842     +void cgroup_writeback_umount(void)
1843     +{
1844     + if (atomic_read(&isw_nr_in_flight)) {
1845     + synchronize_rcu();
1846     + flush_workqueue(isw_wq);
1847     + }
1848     +}
1849     +
1850     +static int __init cgroup_writeback_init(void)
1851     +{
1852     + isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1853     + if (!isw_wq)
1854     + return -ENOMEM;
1855     + return 0;
1856     +}
1857     +fs_initcall(cgroup_writeback_init);
1858     +
1859     #else /* CONFIG_CGROUP_WRITEBACK */
1860    
1861     static struct bdi_writeback *
1862     diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
1863     index 3ea36554107f..8918ac905a3b 100644
1864     --- a/fs/jffs2/README.Locking
1865     +++ b/fs/jffs2/README.Locking
1866     @@ -2,10 +2,6 @@
1867     JFFS2 LOCKING DOCUMENTATION
1868     ---------------------------
1869    
1870     -At least theoretically, JFFS2 does not require the Big Kernel Lock
1871     -(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
1872     -code. It has its own locking, as described below.
1873     -
1874     This document attempts to describe the existing locking rules for
1875     JFFS2. It is not expected to remain perfectly up to date, but ought to
1876     be fairly close.
1877     @@ -69,6 +65,7 @@ Ordering constraints:
1878     any f->sem held.
1879     2. Never attempt to lock two file mutexes in one thread.
1880     No ordering rules have been made for doing so.
1881     + 3. Never lock a page cache page with f->sem held.
1882    
1883    
1884     erase_completion_lock spinlock
1885     diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
1886     index a3750f902adc..c1f04947d7dc 100644
1887     --- a/fs/jffs2/build.c
1888     +++ b/fs/jffs2/build.c
1889     @@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
1890    
1891    
1892     static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
1893     - struct jffs2_inode_cache *ic)
1894     + struct jffs2_inode_cache *ic,
1895     + int *dir_hardlinks)
1896     {
1897     struct jffs2_full_dirent *fd;
1898    
1899     @@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
1900     dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
1901     fd->name, fd->ino, ic->ino);
1902     jffs2_mark_node_obsolete(c, fd->raw);
1903     + /* Clear the ic/raw union so it doesn't cause problems later. */
1904     + fd->ic = NULL;
1905     continue;
1906     }
1907    
1908     + /* From this point, fd->raw is no longer used so we can set fd->ic */
1909     + fd->ic = child_ic;
1910     + child_ic->pino_nlink++;
1911     + /* If we appear (at this stage) to have hard-linked directories,
1912     + * set a flag to trigger a scan later */
1913     if (fd->type == DT_DIR) {
1914     - if (child_ic->pino_nlink) {
1915     - JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
1916     - fd->name, fd->ino, ic->ino);
1917     - /* TODO: What do we do about it? */
1918     - } else {
1919     - child_ic->pino_nlink = ic->ino;
1920     - }
1921     - } else
1922     - child_ic->pino_nlink++;
1923     + child_ic->flags |= INO_FLAGS_IS_DIR;
1924     + if (child_ic->pino_nlink > 1)
1925     + *dir_hardlinks = 1;
1926     + }
1927    
1928     dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
1929     /* Can't free scan_dents so far. We might need them in pass 2 */
1930     @@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
1931     */
1932     static int jffs2_build_filesystem(struct jffs2_sb_info *c)
1933     {
1934     - int ret;
1935     - int i;
1936     + int ret, i, dir_hardlinks = 0;
1937     struct jffs2_inode_cache *ic;
1938     struct jffs2_full_dirent *fd;
1939     struct jffs2_full_dirent *dead_fds = NULL;
1940     @@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
1941     /* Now scan the directory tree, increasing nlink according to every dirent found. */
1942     for_each_inode(i, c, ic) {
1943     if (ic->scan_dents) {
1944     - jffs2_build_inode_pass1(c, ic);
1945     + jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
1946     cond_resched();
1947     }
1948     }
1949     @@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
1950     }
1951    
1952     dbg_fsbuild("pass 2a complete\n");
1953     +
1954     + if (dir_hardlinks) {
1955     + /* If we detected directory hardlinks earlier, *hopefully*
1956     + * they are gone now because some of the links were from
1957     + * dead directories which still had some old dirents lying
1958     + * around and not yet garbage-collected, but which have
1959     + * been discarded above. So clear the pino_nlink field
1960     + * in each directory, so that the final scan below can
1961     + * print appropriate warnings. */
1962     + for_each_inode(i, c, ic) {
1963     + if (ic->flags & INO_FLAGS_IS_DIR)
1964     + ic->pino_nlink = 0;
1965     + }
1966     + }
1967     dbg_fsbuild("freeing temporary data structures\n");
1968    
1969     /* Finally, we can scan again and free the dirent structs */
1970     @@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
1971     while(ic->scan_dents) {
1972     fd = ic->scan_dents;
1973     ic->scan_dents = fd->next;
1974     + /* We do use the pino_nlink field to count nlink of
1975     + * directories during fs build, so set it to the
1976     + * parent ino# now. Now that there's hopefully only
1977     + * one. */
1978     + if (fd->type == DT_DIR) {
1979     + if (!fd->ic) {
1980     + /* We'll have complained about it and marked the coresponding
1981     + raw node obsolete already. Just skip it. */
1982     + continue;
1983     + }
1984     +
1985     + /* We *have* to have set this in jffs2_build_inode_pass1() */
1986     + BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
1987     +
1988     + /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
1989     + * is set. Otherwise, we know this should never trigger anyway, so
1990     + * we don't do the check. And ic->pino_nlink still contains the nlink
1991     + * value (which is 1). */
1992     + if (dir_hardlinks && fd->ic->pino_nlink) {
1993     + JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
1994     + fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
1995     + /* Should we unlink it from its previous parent? */
1996     + }
1997     +
1998     + /* For directories, ic->pino_nlink holds that parent inode # */
1999     + fd->ic->pino_nlink = ic->ino;
2000     + }
2001     jffs2_free_full_dirent(fd);
2002     }
2003     ic->scan_dents = NULL;
2004     @@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
2005    
2006     /* Reduce nlink of the child. If it's now zero, stick it on the
2007     dead_fds list to be cleaned up later. Else just free the fd */
2008     -
2009     - if (fd->type == DT_DIR)
2010     - child_ic->pino_nlink = 0;
2011     - else
2012     - child_ic->pino_nlink--;
2013     + child_ic->pino_nlink--;
2014    
2015     if (!child_ic->pino_nlink) {
2016     dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
2017     diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
2018     index f509f62e12f6..3361979d728c 100644
2019     --- a/fs/jffs2/file.c
2020     +++ b/fs/jffs2/file.c
2021     @@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
2022     struct page *pg;
2023     struct inode *inode = mapping->host;
2024     struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
2025     - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
2026     - struct jffs2_raw_inode ri;
2027     - uint32_t alloc_len = 0;
2028     pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2029     uint32_t pageofs = index << PAGE_CACHE_SHIFT;
2030     int ret = 0;
2031    
2032     - jffs2_dbg(1, "%s()\n", __func__);
2033     -
2034     - if (pageofs > inode->i_size) {
2035     - ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
2036     - ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
2037     - if (ret)
2038     - return ret;
2039     - }
2040     -
2041     - mutex_lock(&f->sem);
2042     pg = grab_cache_page_write_begin(mapping, index, flags);
2043     - if (!pg) {
2044     - if (alloc_len)
2045     - jffs2_complete_reservation(c);
2046     - mutex_unlock(&f->sem);
2047     + if (!pg)
2048     return -ENOMEM;
2049     - }
2050     *pagep = pg;
2051    
2052     - if (alloc_len) {
2053     + jffs2_dbg(1, "%s()\n", __func__);
2054     +
2055     + if (pageofs > inode->i_size) {
2056     /* Make new hole frag from old EOF to new page */
2057     + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
2058     + struct jffs2_raw_inode ri;
2059     struct jffs2_full_dnode *fn;
2060     + uint32_t alloc_len;
2061    
2062     jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
2063     (unsigned int)inode->i_size, pageofs);
2064    
2065     + ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
2066     + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
2067     + if (ret)
2068     + goto out_page;
2069     +
2070     + mutex_lock(&f->sem);
2071     memset(&ri, 0, sizeof(ri));
2072    
2073     ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
2074     @@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
2075     if (IS_ERR(fn)) {
2076     ret = PTR_ERR(fn);
2077     jffs2_complete_reservation(c);
2078     + mutex_unlock(&f->sem);
2079     goto out_page;
2080     }
2081     ret = jffs2_add_full_dnode_to_inode(c, f, fn);
2082     @@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
2083     jffs2_mark_node_obsolete(c, fn->raw);
2084     jffs2_free_full_dnode(fn);
2085     jffs2_complete_reservation(c);
2086     + mutex_unlock(&f->sem);
2087     goto out_page;
2088     }
2089     jffs2_complete_reservation(c);
2090     inode->i_size = pageofs;
2091     + mutex_unlock(&f->sem);
2092     }
2093    
2094     /*
2095     @@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
2096     * case of a short-copy.
2097     */
2098     if (!PageUptodate(pg)) {
2099     + mutex_lock(&f->sem);
2100     ret = jffs2_do_readpage_nolock(inode, pg);
2101     + mutex_unlock(&f->sem);
2102     if (ret)
2103     goto out_page;
2104     }
2105     - mutex_unlock(&f->sem);
2106     jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
2107     return ret;
2108    
2109     out_page:
2110     unlock_page(pg);
2111     page_cache_release(pg);
2112     - mutex_unlock(&f->sem);
2113     return ret;
2114     }
2115    
2116     diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
2117     index 5a2dec2b064c..95d5880a63ee 100644
2118     --- a/fs/jffs2/gc.c
2119     +++ b/fs/jffs2/gc.c
2120     @@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
2121     BUG_ON(start > orig_start);
2122     }
2123    
2124     - /* First, use readpage() to read the appropriate page into the page cache */
2125     - /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
2126     - * triggered garbage collection in the first place?
2127     - * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
2128     - * page OK. We'll actually write it out again in commit_write, which is a little
2129     - * suboptimal, but at least we're correct.
2130     - */
2131     + /* The rules state that we must obtain the page lock *before* f->sem, so
2132     + * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
2133     + * actually going to *change* so we're safe; we only allow reading.
2134     + *
2135     + * It is important to note that jffs2_write_begin() will ensure that its
2136     + * page is marked Uptodate before allocating space. That means that if we
2137     + * end up here trying to GC the *same* page that jffs2_write_begin() is
2138     + * trying to write out, read_cache_page() will not deadlock. */
2139     + mutex_unlock(&f->sem);
2140     pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
2141     + mutex_lock(&f->sem);
2142    
2143     if (IS_ERR(pg_ptr)) {
2144     pr_warn("read_cache_page() returned error: %ld\n",
2145     diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
2146     index fa35ff79ab35..0637271f3770 100644
2147     --- a/fs/jffs2/nodelist.h
2148     +++ b/fs/jffs2/nodelist.h
2149     @@ -194,6 +194,7 @@ struct jffs2_inode_cache {
2150     #define INO_STATE_CLEARING 6 /* In clear_inode() */
2151    
2152     #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
2153     +#define INO_FLAGS_IS_DIR 0x02 /* is a directory */
2154    
2155     #define RAWNODE_CLASS_INODE_CACHE 0
2156     #define RAWNODE_CLASS_XATTR_DATUM 1
2157     @@ -249,7 +250,10 @@ struct jffs2_readinode_info
2158    
2159     struct jffs2_full_dirent
2160     {
2161     - struct jffs2_raw_node_ref *raw;
2162     + union {
2163     + struct jffs2_raw_node_ref *raw;
2164     + struct jffs2_inode_cache *ic; /* Just during part of build */
2165     + };
2166     struct jffs2_full_dirent *next;
2167     uint32_t version;
2168     uint32_t ino; /* == zero for unlink */
2169     diff --git a/fs/super.c b/fs/super.c
2170     index 954aeb80e202..f5f4b328f860 100644
2171     --- a/fs/super.c
2172     +++ b/fs/super.c
2173     @@ -415,6 +415,7 @@ void generic_shutdown_super(struct super_block *sb)
2174     sb->s_flags &= ~MS_ACTIVE;
2175    
2176     fsnotify_unmount_inodes(sb);
2177     + cgroup_writeback_umount();
2178    
2179     evict_inodes(sb);
2180    
2181     diff --git a/include/linux/ata.h b/include/linux/ata.h
2182     index d2992bfa1706..c1a2f345cbe6 100644
2183     --- a/include/linux/ata.h
2184     +++ b/include/linux/ata.h
2185     @@ -487,8 +487,8 @@ enum ata_tf_protocols {
2186     };
2187    
2188     enum ata_ioctls {
2189     - ATA_IOC_GET_IO32 = 0x309,
2190     - ATA_IOC_SET_IO32 = 0x324,
2191     + ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
2192     + ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
2193     };
2194    
2195     /* core structures */
2196     diff --git a/include/linux/bio.h b/include/linux/bio.h
2197     index b9b6e046b52e..79cfaeef1b0d 100644
2198     --- a/include/linux/bio.h
2199     +++ b/include/linux/bio.h
2200     @@ -310,6 +310,43 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
2201     bio->bi_flags &= ~(1U << bit);
2202     }
2203    
2204     +static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
2205     +{
2206     + *bv = bio_iovec(bio);
2207     +}
2208     +
2209     +static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
2210     +{
2211     + struct bvec_iter iter = bio->bi_iter;
2212     + int idx;
2213     +
2214     + if (!bio_flagged(bio, BIO_CLONED)) {
2215     + *bv = bio->bi_io_vec[bio->bi_vcnt - 1];
2216     + return;
2217     + }
2218     +
2219     + if (unlikely(!bio_multiple_segments(bio))) {
2220     + *bv = bio_iovec(bio);
2221     + return;
2222     + }
2223     +
2224     + bio_advance_iter(bio, &iter, iter.bi_size);
2225     +
2226     + if (!iter.bi_bvec_done)
2227     + idx = iter.bi_idx - 1;
2228     + else /* in the middle of bvec */
2229     + idx = iter.bi_idx;
2230     +
2231     + *bv = bio->bi_io_vec[idx];
2232     +
2233     + /*
2234     + * iter.bi_bvec_done records actual length of the last bvec
2235     + * if this bio ends in the middle of one io vector
2236     + */
2237     + if (iter.bi_bvec_done)
2238     + bv->bv_len = iter.bi_bvec_done;
2239     +}
2240     +
2241     enum bip_flags {
2242     BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
2243     BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
2244     diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2245     index c70e3588a48c..168755791ec8 100644
2246     --- a/include/linux/blkdev.h
2247     +++ b/include/linux/blkdev.h
2248     @@ -1367,6 +1367,13 @@ static inline void put_dev_sector(Sector p)
2249     page_cache_release(p.v);
2250     }
2251    
2252     +static inline bool __bvec_gap_to_prev(struct request_queue *q,
2253     + struct bio_vec *bprv, unsigned int offset)
2254     +{
2255     + return offset ||
2256     + ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
2257     +}
2258     +
2259     /*
2260     * Check if adding a bio_vec after bprv with offset would create a gap in
2261     * the SG list. Most drivers don't care about this, but some do.
2262     @@ -1376,18 +1383,22 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
2263     {
2264     if (!queue_virt_boundary(q))
2265     return false;
2266     - return offset ||
2267     - ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
2268     + return __bvec_gap_to_prev(q, bprv, offset);
2269     }
2270    
2271     static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
2272     struct bio *next)
2273     {
2274     - if (!bio_has_data(prev))
2275     - return false;
2276     + if (bio_has_data(prev) && queue_virt_boundary(q)) {
2277     + struct bio_vec pb, nb;
2278     +
2279     + bio_get_last_bvec(prev, &pb);
2280     + bio_get_first_bvec(next, &nb);
2281    
2282     - return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
2283     - next->bi_io_vec[0].bv_offset);
2284     + return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
2285     + }
2286     +
2287     + return false;
2288     }
2289    
2290     static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
2291     diff --git a/include/linux/dcache.h b/include/linux/dcache.h
2292     index d67ae119cf4e..8a2e009c8a5a 100644
2293     --- a/include/linux/dcache.h
2294     +++ b/include/linux/dcache.h
2295     @@ -409,9 +409,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
2296     */
2297     static inline unsigned __d_entry_type(const struct dentry *dentry)
2298     {
2299     - unsigned type = READ_ONCE(dentry->d_flags);
2300     - smp_rmb();
2301     - return type & DCACHE_ENTRY_TYPE;
2302     + return dentry->d_flags & DCACHE_ENTRY_TYPE;
2303     }
2304    
2305     static inline bool d_is_miss(const struct dentry *dentry)
2306     diff --git a/include/linux/libata.h b/include/linux/libata.h
2307     index 600c1e0626a5..b20a2752f934 100644
2308     --- a/include/linux/libata.h
2309     +++ b/include/linux/libata.h
2310     @@ -718,7 +718,7 @@ struct ata_device {
2311     union {
2312     u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
2313     u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
2314     - };
2315     + } ____cacheline_aligned;
2316    
2317     /* DEVSLP Timing Variables from Identify Device Data Log */
2318     u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
2319     diff --git a/include/linux/module.h b/include/linux/module.h
2320     index 3a19c79918e0..b229a9961d02 100644
2321     --- a/include/linux/module.h
2322     +++ b/include/linux/module.h
2323     @@ -302,6 +302,12 @@ struct mod_tree_node {
2324     struct latch_tree_node node;
2325     };
2326    
2327     +struct mod_kallsyms {
2328     + Elf_Sym *symtab;
2329     + unsigned int num_symtab;
2330     + char *strtab;
2331     +};
2332     +
2333     struct module {
2334     enum module_state state;
2335    
2336     @@ -411,14 +417,9 @@ struct module {
2337     #endif
2338    
2339     #ifdef CONFIG_KALLSYMS
2340     - /*
2341     - * We keep the symbol and string tables for kallsyms.
2342     - * The core_* fields below are temporary, loader-only (they
2343     - * could really be discarded after module init).
2344     - */
2345     - Elf_Sym *symtab, *core_symtab;
2346     - unsigned int num_symtab, core_num_syms;
2347     - char *strtab, *core_strtab;
2348     + /* Protected by RCU and/or module_mutex: use rcu_dereference() */
2349     + struct mod_kallsyms *kallsyms;
2350     + struct mod_kallsyms core_kallsyms;
2351    
2352     /* Section attributes */
2353     struct module_sect_attrs *sect_attrs;
2354     diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
2355     index 429fdfc3baf5..925730bc9fc1 100644
2356     --- a/include/linux/trace_events.h
2357     +++ b/include/linux/trace_events.h
2358     @@ -568,6 +568,8 @@ enum {
2359     FILTER_DYN_STRING,
2360     FILTER_PTR_STRING,
2361     FILTER_TRACE_FN,
2362     + FILTER_COMM,
2363     + FILTER_CPU,
2364     };
2365    
2366     extern int trace_event_raw_init(struct trace_event_call *call);
2367     diff --git a/include/linux/writeback.h b/include/linux/writeback.h
2368     index b333c945e571..d0b5ca5d4e08 100644
2369     --- a/include/linux/writeback.h
2370     +++ b/include/linux/writeback.h
2371     @@ -198,6 +198,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
2372     void wbc_detach_inode(struct writeback_control *wbc);
2373     void wbc_account_io(struct writeback_control *wbc, struct page *page,
2374     size_t bytes);
2375     +void cgroup_writeback_umount(void);
2376    
2377     /**
2378     * inode_attach_wb - associate an inode with its wb
2379     @@ -301,6 +302,10 @@ static inline void wbc_account_io(struct writeback_control *wbc,
2380     {
2381     }
2382    
2383     +static inline void cgroup_writeback_umount(void)
2384     +{
2385     +}
2386     +
2387     #endif /* CONFIG_CGROUP_WRITEBACK */
2388    
2389     /*
2390     diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
2391     index 56cf8e485ef2..28ee5c2e6bcd 100644
2392     --- a/include/target/target_core_backend.h
2393     +++ b/include/target/target_core_backend.h
2394     @@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
2395     sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
2396    
2397     bool target_sense_desc_format(struct se_device *dev);
2398     +sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
2399     +bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
2400     + struct request_queue *q, int block_size);
2401    
2402     #endif /* TARGET_CORE_BACKEND_H */
2403     diff --git a/kernel/module.c b/kernel/module.c
2404     index 14833e6d5e37..0e5c71195f18 100644
2405     --- a/kernel/module.c
2406     +++ b/kernel/module.c
2407     @@ -327,6 +327,9 @@ struct load_info {
2408     struct _ddebug *debug;
2409     unsigned int num_debug;
2410     bool sig_ok;
2411     +#ifdef CONFIG_KALLSYMS
2412     + unsigned long mod_kallsyms_init_off;
2413     +#endif
2414     struct {
2415     unsigned int sym, str, mod, vers, info, pcpu;
2416     } index;
2417     @@ -2492,10 +2495,21 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2418     strsect->sh_flags |= SHF_ALLOC;
2419     strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2420     info->index.str) | INIT_OFFSET_MASK;
2421     - mod->init_size = debug_align(mod->init_size);
2422     pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2423     +
2424     + /* We'll tack temporary mod_kallsyms on the end. */
2425     + mod->init_size = ALIGN(mod->init_size,
2426     + __alignof__(struct mod_kallsyms));
2427     + info->mod_kallsyms_init_off = mod->init_size;
2428     + mod->init_size += sizeof(struct mod_kallsyms);
2429     + mod->init_size = debug_align(mod->init_size);
2430     }
2431    
2432     +/*
2433     + * We use the full symtab and strtab which layout_symtab arranged to
2434     + * be appended to the init section. Later we switch to the cut-down
2435     + * core-only ones.
2436     + */
2437     static void add_kallsyms(struct module *mod, const struct load_info *info)
2438     {
2439     unsigned int i, ndst;
2440     @@ -2504,28 +2518,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
2441     char *s;
2442     Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2443    
2444     - mod->symtab = (void *)symsec->sh_addr;
2445     - mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2446     + /* Set up to point into init section. */
2447     + mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off;
2448     +
2449     + mod->kallsyms->symtab = (void *)symsec->sh_addr;
2450     + mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2451     /* Make sure we get permanent strtab: don't use info->strtab. */
2452     - mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2453     + mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2454    
2455     /* Set types up while we still have access to sections. */
2456     - for (i = 0; i < mod->num_symtab; i++)
2457     - mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2458     -
2459     - mod->core_symtab = dst = mod->module_core + info->symoffs;
2460     - mod->core_strtab = s = mod->module_core + info->stroffs;
2461     - src = mod->symtab;
2462     - for (ndst = i = 0; i < mod->num_symtab; i++) {
2463     + for (i = 0; i < mod->kallsyms->num_symtab; i++)
2464     + mod->kallsyms->symtab[i].st_info
2465     + = elf_type(&mod->kallsyms->symtab[i], info);
2466     +
2467     + /* Now populate the cut down core kallsyms for after init. */
2468     + mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs;
2469     + mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs;
2470     + src = mod->kallsyms->symtab;
2471     + for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2472     if (i == 0 ||
2473     is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2474     dst[ndst] = src[i];
2475     - dst[ndst++].st_name = s - mod->core_strtab;
2476     - s += strlcpy(s, &mod->strtab[src[i].st_name],
2477     + dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2478     + s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2479     KSYM_NAME_LEN) + 1;
2480     }
2481     }
2482     - mod->core_num_syms = ndst;
2483     + mod->core_kallsyms.num_symtab = ndst;
2484     }
2485     #else
2486     static inline void layout_symtab(struct module *mod, struct load_info *info)
2487     @@ -3274,9 +3293,8 @@ static noinline int do_init_module(struct module *mod)
2488     module_put(mod);
2489     trim_init_extable(mod);
2490     #ifdef CONFIG_KALLSYMS
2491     - mod->num_symtab = mod->core_num_syms;
2492     - mod->symtab = mod->core_symtab;
2493     - mod->strtab = mod->core_strtab;
2494     + /* Switch to core kallsyms now init is done: kallsyms may be walking! */
2495     + rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
2496     #endif
2497     mod_tree_remove_init(mod);
2498     unset_module_init_ro_nx(mod);
2499     @@ -3646,9 +3664,9 @@ static inline int is_arm_mapping_symbol(const char *str)
2500     && (str[2] == '\0' || str[2] == '.');
2501     }
2502    
2503     -static const char *symname(struct module *mod, unsigned int symnum)
2504     +static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
2505     {
2506     - return mod->strtab + mod->symtab[symnum].st_name;
2507     + return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
2508     }
2509    
2510     static const char *get_ksymbol(struct module *mod,
2511     @@ -3658,6 +3676,7 @@ static const char *get_ksymbol(struct module *mod,
2512     {
2513     unsigned int i, best = 0;
2514     unsigned long nextval;
2515     + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
2516    
2517     /* At worse, next value is at end of module */
2518     if (within_module_init(addr, mod))
2519     @@ -3667,32 +3686,32 @@ static const char *get_ksymbol(struct module *mod,
2520    
2521     /* Scan for closest preceding symbol, and next symbol. (ELF
2522     starts real symbols at 1). */
2523     - for (i = 1; i < mod->num_symtab; i++) {
2524     - if (mod->symtab[i].st_shndx == SHN_UNDEF)
2525     + for (i = 1; i < kallsyms->num_symtab; i++) {
2526     + if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
2527     continue;
2528    
2529     /* We ignore unnamed symbols: they're uninformative
2530     * and inserted at a whim. */
2531     - if (*symname(mod, i) == '\0'
2532     - || is_arm_mapping_symbol(symname(mod, i)))
2533     + if (*symname(kallsyms, i) == '\0'
2534     + || is_arm_mapping_symbol(symname(kallsyms, i)))
2535     continue;
2536    
2537     - if (mod->symtab[i].st_value <= addr
2538     - && mod->symtab[i].st_value > mod->symtab[best].st_value)
2539     + if (kallsyms->symtab[i].st_value <= addr
2540     + && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
2541     best = i;
2542     - if (mod->symtab[i].st_value > addr
2543     - && mod->symtab[i].st_value < nextval)
2544     - nextval = mod->symtab[i].st_value;
2545     + if (kallsyms->symtab[i].st_value > addr
2546     + && kallsyms->symtab[i].st_value < nextval)
2547     + nextval = kallsyms->symtab[i].st_value;
2548     }
2549    
2550     if (!best)
2551     return NULL;
2552    
2553     if (size)
2554     - *size = nextval - mod->symtab[best].st_value;
2555     + *size = nextval - kallsyms->symtab[best].st_value;
2556     if (offset)
2557     - *offset = addr - mod->symtab[best].st_value;
2558     - return symname(mod, best);
2559     + *offset = addr - kallsyms->symtab[best].st_value;
2560     + return symname(kallsyms, best);
2561     }
2562    
2563     /* For kallsyms to ask for address resolution. NULL means not found. Careful
2564     @@ -3782,18 +3801,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2565    
2566     preempt_disable();
2567     list_for_each_entry_rcu(mod, &modules, list) {
2568     + struct mod_kallsyms *kallsyms;
2569     +
2570     if (mod->state == MODULE_STATE_UNFORMED)
2571     continue;
2572     - if (symnum < mod->num_symtab) {
2573     - *value = mod->symtab[symnum].st_value;
2574     - *type = mod->symtab[symnum].st_info;
2575     - strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN);
2576     + kallsyms = rcu_dereference_sched(mod->kallsyms);
2577     + if (symnum < kallsyms->num_symtab) {
2578     + *value = kallsyms->symtab[symnum].st_value;
2579     + *type = kallsyms->symtab[symnum].st_info;
2580     + strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
2581     strlcpy(module_name, mod->name, MODULE_NAME_LEN);
2582     *exported = is_exported(name, *value, mod);
2583     preempt_enable();
2584     return 0;
2585     }
2586     - symnum -= mod->num_symtab;
2587     + symnum -= kallsyms->num_symtab;
2588     }
2589     preempt_enable();
2590     return -ERANGE;
2591     @@ -3802,11 +3824,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2592     static unsigned long mod_find_symname(struct module *mod, const char *name)
2593     {
2594     unsigned int i;
2595     + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
2596    
2597     - for (i = 0; i < mod->num_symtab; i++)
2598     - if (strcmp(name, symname(mod, i)) == 0 &&
2599     - mod->symtab[i].st_info != 'U')
2600     - return mod->symtab[i].st_value;
2601     + for (i = 0; i < kallsyms->num_symtab; i++)
2602     + if (strcmp(name, symname(kallsyms, i)) == 0 &&
2603     + kallsyms->symtab[i].st_info != 'U')
2604     + return kallsyms->symtab[i].st_value;
2605     return 0;
2606     }
2607    
2608     @@ -3845,11 +3868,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
2609     module_assert_mutex();
2610    
2611     list_for_each_entry(mod, &modules, list) {
2612     + /* We hold module_mutex: no need for rcu_dereference_sched */
2613     + struct mod_kallsyms *kallsyms = mod->kallsyms;
2614     +
2615     if (mod->state == MODULE_STATE_UNFORMED)
2616     continue;
2617     - for (i = 0; i < mod->num_symtab; i++) {
2618     - ret = fn(data, symname(mod, i),
2619     - mod, mod->symtab[i].st_value);
2620     + for (i = 0; i < kallsyms->num_symtab; i++) {
2621     + ret = fn(data, symname(kallsyms, i),
2622     + mod, kallsyms->symtab[i].st_value);
2623     if (ret != 0)
2624     return ret;
2625     }
2626     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2627     index debf6e878076..d202d991edae 100644
2628     --- a/kernel/trace/trace_events.c
2629     +++ b/kernel/trace/trace_events.c
2630     @@ -97,16 +97,16 @@ trace_find_event_field(struct trace_event_call *call, char *name)
2631     struct ftrace_event_field *field;
2632     struct list_head *head;
2633    
2634     - field = __find_event_field(&ftrace_generic_fields, name);
2635     + head = trace_get_fields(call);
2636     + field = __find_event_field(head, name);
2637     if (field)
2638     return field;
2639    
2640     - field = __find_event_field(&ftrace_common_fields, name);
2641     + field = __find_event_field(&ftrace_generic_fields, name);
2642     if (field)
2643     return field;
2644    
2645     - head = trace_get_fields(call);
2646     - return __find_event_field(head, name);
2647     + return __find_event_field(&ftrace_common_fields, name);
2648     }
2649    
2650     static int __trace_define_field(struct list_head *head, const char *type,
2651     @@ -171,8 +171,10 @@ static int trace_define_generic_fields(void)
2652     {
2653     int ret;
2654    
2655     - __generic_field(int, cpu, FILTER_OTHER);
2656     - __generic_field(char *, comm, FILTER_PTR_STRING);
2657     + __generic_field(int, CPU, FILTER_CPU);
2658     + __generic_field(int, cpu, FILTER_CPU);
2659     + __generic_field(char *, COMM, FILTER_COMM);
2660     + __generic_field(char *, comm, FILTER_COMM);
2661    
2662     return ret;
2663     }
2664     diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
2665     index f93a219b18da..6816302542b2 100644
2666     --- a/kernel/trace/trace_events_filter.c
2667     +++ b/kernel/trace/trace_events_filter.c
2668     @@ -1043,13 +1043,14 @@ static int init_pred(struct filter_parse_state *ps,
2669     return -EINVAL;
2670     }
2671    
2672     - if (is_string_field(field)) {
2673     + if (field->filter_type == FILTER_COMM) {
2674     + filter_build_regex(pred);
2675     + fn = filter_pred_comm;
2676     + pred->regex.field_len = TASK_COMM_LEN;
2677     + } else if (is_string_field(field)) {
2678     filter_build_regex(pred);
2679    
2680     - if (!strcmp(field->name, "comm")) {
2681     - fn = filter_pred_comm;
2682     - pred->regex.field_len = TASK_COMM_LEN;
2683     - } else if (field->filter_type == FILTER_STATIC_STRING) {
2684     + if (field->filter_type == FILTER_STATIC_STRING) {
2685     fn = filter_pred_string;
2686     pred->regex.field_len = field->size;
2687     } else if (field->filter_type == FILTER_DYN_STRING)
2688     @@ -1072,7 +1073,7 @@ static int init_pred(struct filter_parse_state *ps,
2689     }
2690     pred->val = val;
2691    
2692     - if (!strcmp(field->name, "cpu"))
2693     + if (field->filter_type == FILTER_CPU)
2694     fn = filter_pred_cpu;
2695     else
2696     fn = select_comparison_fn(pred->op, field->size,
2697     diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
2698     index b9c0910fb8c4..0608f216f359 100644
2699     --- a/sound/core/control_compat.c
2700     +++ b/sound/core/control_compat.c
2701     @@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
2702     unsigned char reserved[128];
2703     };
2704    
2705     +#ifdef CONFIG_X86_X32
2706     +/* x32 has a different alignment for 64bit values from ia32 */
2707     +struct snd_ctl_elem_value_x32 {
2708     + struct snd_ctl_elem_id id;
2709     + unsigned int indirect; /* bit-field causes misalignment */
2710     + union {
2711     + s32 integer[128];
2712     + unsigned char data[512];
2713     + s64 integer64[64];
2714     + } value;
2715     + unsigned char reserved[128];
2716     +};
2717     +#endif /* CONFIG_X86_X32 */
2718    
2719     /* get the value type and count of the control */
2720     static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
2721     @@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
2722    
2723     static int copy_ctl_value_from_user(struct snd_card *card,
2724     struct snd_ctl_elem_value *data,
2725     - struct snd_ctl_elem_value32 __user *data32,
2726     + void __user *userdata,
2727     + void __user *valuep,
2728     int *typep, int *countp)
2729     {
2730     + struct snd_ctl_elem_value32 __user *data32 = userdata;
2731     int i, type, size;
2732     int uninitialized_var(count);
2733     unsigned int indirect;
2734     @@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
2735     if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
2736     type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
2737     for (i = 0; i < count; i++) {
2738     + s32 __user *intp = valuep;
2739     int val;
2740     - if (get_user(val, &data32->value.integer[i]))
2741     + if (get_user(val, &intp[i]))
2742     return -EFAULT;
2743     data->value.integer.value[i] = val;
2744     }
2745     @@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
2746     dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
2747     return -EINVAL;
2748     }
2749     - if (copy_from_user(data->value.bytes.data,
2750     - data32->value.data, size))
2751     + if (copy_from_user(data->value.bytes.data, valuep, size))
2752     return -EFAULT;
2753     }
2754    
2755     @@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
2756     }
2757    
2758     /* restore the value to 32bit */
2759     -static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
2760     +static int copy_ctl_value_to_user(void __user *userdata,
2761     + void __user *valuep,
2762     struct snd_ctl_elem_value *data,
2763     int type, int count)
2764     {
2765     @@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
2766     if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
2767     type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
2768     for (i = 0; i < count; i++) {
2769     + s32 __user *intp = valuep;
2770     int val;
2771     val = data->value.integer.value[i];
2772     - if (put_user(val, &data32->value.integer[i]))
2773     + if (put_user(val, &intp[i]))
2774     return -EFAULT;
2775     }
2776     } else {
2777     size = get_elem_size(type, count);
2778     - if (copy_to_user(data32->value.data,
2779     - data->value.bytes.data, size))
2780     + if (copy_to_user(valuep, data->value.bytes.data, size))
2781     return -EFAULT;
2782     }
2783     return 0;
2784     }
2785    
2786     -static int snd_ctl_elem_read_user_compat(struct snd_card *card,
2787     - struct snd_ctl_elem_value32 __user *data32)
2788     +static int ctl_elem_read_user(struct snd_card *card,
2789     + void __user *userdata, void __user *valuep)
2790     {
2791     struct snd_ctl_elem_value *data;
2792     int err, type, count;
2793     @@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
2794     if (data == NULL)
2795     return -ENOMEM;
2796    
2797     - if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
2798     + err = copy_ctl_value_from_user(card, data, userdata, valuep,
2799     + &type, &count);
2800     + if (err < 0)
2801     goto error;
2802    
2803     snd_power_lock(card);
2804     @@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
2805     err = snd_ctl_elem_read(card, data);
2806     snd_power_unlock(card);
2807     if (err >= 0)
2808     - err = copy_ctl_value_to_user(data32, data, type, count);
2809     + err = copy_ctl_value_to_user(userdata, valuep, data,
2810     + type, count);
2811     error:
2812     kfree(data);
2813     return err;
2814     }
2815    
2816     -static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
2817     - struct snd_ctl_elem_value32 __user *data32)
2818     +static int ctl_elem_write_user(struct snd_ctl_file *file,
2819     + void __user *userdata, void __user *valuep)
2820     {
2821     struct snd_ctl_elem_value *data;
2822     struct snd_card *card = file->card;
2823     @@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
2824     if (data == NULL)
2825     return -ENOMEM;
2826    
2827     - if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
2828     + err = copy_ctl_value_from_user(card, data, userdata, valuep,
2829     + &type, &count);
2830     + if (err < 0)
2831     goto error;
2832    
2833     snd_power_lock(card);
2834     @@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
2835     err = snd_ctl_elem_write(card, file, data);
2836     snd_power_unlock(card);
2837     if (err >= 0)
2838     - err = copy_ctl_value_to_user(data32, data, type, count);
2839     + err = copy_ctl_value_to_user(userdata, valuep, data,
2840     + type, count);
2841     error:
2842     kfree(data);
2843     return err;
2844     }
2845    
2846     +static int snd_ctl_elem_read_user_compat(struct snd_card *card,
2847     + struct snd_ctl_elem_value32 __user *data32)
2848     +{
2849     + return ctl_elem_read_user(card, data32, &data32->value);
2850     +}
2851     +
2852     +static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
2853     + struct snd_ctl_elem_value32 __user *data32)
2854     +{
2855     + return ctl_elem_write_user(file, data32, &data32->value);
2856     +}
2857     +
2858     +#ifdef CONFIG_X86_X32
2859     +static int snd_ctl_elem_read_user_x32(struct snd_card *card,
2860     + struct snd_ctl_elem_value_x32 __user *data32)
2861     +{
2862     + return ctl_elem_read_user(card, data32, &data32->value);
2863     +}
2864     +
2865     +static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
2866     + struct snd_ctl_elem_value_x32 __user *data32)
2867     +{
2868     + return ctl_elem_write_user(file, data32, &data32->value);
2869     +}
2870     +#endif /* CONFIG_X86_X32 */
2871     +
2872     /* add or replace a user control */
2873     static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
2874     struct snd_ctl_elem_info32 __user *data32,
2875     @@ -393,6 +441,10 @@ enum {
2876     SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
2877     SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
2878     SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
2879     +#ifdef CONFIG_X86_X32
2880     + SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
2881     + SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
2882     +#endif /* CONFIG_X86_X32 */
2883     };
2884    
2885     static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
2886     @@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
2887     return snd_ctl_elem_add_compat(ctl, argp, 0);
2888     case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
2889     return snd_ctl_elem_add_compat(ctl, argp, 1);
2890     +#ifdef CONFIG_X86_X32
2891     + case SNDRV_CTL_IOCTL_ELEM_READ_X32:
2892     + return snd_ctl_elem_read_user_x32(ctl->card, argp);
2893     + case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
2894     + return snd_ctl_elem_write_user_x32(ctl, argp);
2895     +#endif /* CONFIG_X86_X32 */
2896     }
2897    
2898     down_read(&snd_ioctl_rwsem);
2899     diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
2900     index 9630e9f72b7b..1f64ab0c2a95 100644
2901     --- a/sound/core/pcm_compat.c
2902     +++ b/sound/core/pcm_compat.c
2903     @@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream
2904     return err;
2905     }
2906    
2907     +#ifdef CONFIG_X86_X32
2908     +/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */
2909     +static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
2910     + struct snd_pcm_channel_info __user *src);
2911     +#define snd_pcm_ioctl_channel_info_x32(s, p) \
2912     + snd_pcm_channel_info_user(s, p)
2913     +#endif /* CONFIG_X86_X32 */
2914     +
2915     struct snd_pcm_status32 {
2916     s32 state;
2917     struct compat_timespec trigger_tstamp;
2918     @@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
2919     return err;
2920     }
2921    
2922     +#ifdef CONFIG_X86_X32
2923     +/* X32 ABI has 64bit timespec and 64bit alignment */
2924     +struct snd_pcm_status_x32 {
2925     + s32 state;
2926     + u32 rsvd; /* alignment */
2927     + struct timespec trigger_tstamp;
2928     + struct timespec tstamp;
2929     + u32 appl_ptr;
2930     + u32 hw_ptr;
2931     + s32 delay;
2932     + u32 avail;
2933     + u32 avail_max;
2934     + u32 overrange;
2935     + s32 suspended_state;
2936     + u32 audio_tstamp_data;
2937     + struct timespec audio_tstamp;
2938     + struct timespec driver_tstamp;
2939     + u32 audio_tstamp_accuracy;
2940     + unsigned char reserved[52-2*sizeof(struct timespec)];
2941     +} __packed;
2942     +
2943     +#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
2944     +
2945     +static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream,
2946     + struct snd_pcm_status_x32 __user *src,
2947     + bool ext)
2948     +{
2949     + struct snd_pcm_status status;
2950     + int err;
2951     +
2952     + memset(&status, 0, sizeof(status));
2953     + /*
2954     + * with extension, parameters are read/write,
2955     + * get audio_tstamp_data from user,
2956     + * ignore rest of status structure
2957     + */
2958     + if (ext && get_user(status.audio_tstamp_data,
2959     + (u32 __user *)(&src->audio_tstamp_data)))
2960     + return -EFAULT;
2961     + err = snd_pcm_status(substream, &status);
2962     + if (err < 0)
2963     + return err;
2964     +
2965     + if (clear_user(src, sizeof(*src)))
2966     + return -EFAULT;
2967     + if (put_user(status.state, &src->state) ||
2968     + put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
2969     + put_timespec(&status.tstamp, &src->tstamp) ||
2970     + put_user(status.appl_ptr, &src->appl_ptr) ||
2971     + put_user(status.hw_ptr, &src->hw_ptr) ||
2972     + put_user(status.delay, &src->delay) ||
2973     + put_user(status.avail, &src->avail) ||
2974     + put_user(status.avail_max, &src->avail_max) ||
2975     + put_user(status.overrange, &src->overrange) ||
2976     + put_user(status.suspended_state, &src->suspended_state) ||
2977     + put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
2978     + put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
2979     + put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
2980     + put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
2981     + return -EFAULT;
2982     +
2983     + return err;
2984     +}
2985     +#endif /* CONFIG_X86_X32 */
2986     +
2987     /* both for HW_PARAMS and HW_REFINE */
2988     static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
2989     int refine,
2990     @@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
2991     return 0;
2992     }
2993    
2994     +#ifdef CONFIG_X86_X32
2995     +/* X32 ABI has 64bit timespec and 64bit alignment */
2996     +struct snd_pcm_mmap_status_x32 {
2997     + s32 state;
2998     + s32 pad1;
2999     + u32 hw_ptr;
3000     + u32 pad2; /* alignment */
3001     + struct timespec tstamp;
3002     + s32 suspended_state;
3003     + struct timespec audio_tstamp;
3004     +} __packed;
3005     +
3006     +struct snd_pcm_mmap_control_x32 {
3007     + u32 appl_ptr;
3008     + u32 avail_min;
3009     +};
3010     +
3011     +struct snd_pcm_sync_ptr_x32 {
3012     + u32 flags;
3013     + u32 rsvd; /* alignment */
3014     + union {
3015     + struct snd_pcm_mmap_status_x32 status;
3016     + unsigned char reserved[64];
3017     + } s;
3018     + union {
3019     + struct snd_pcm_mmap_control_x32 control;
3020     + unsigned char reserved[64];
3021     + } c;
3022     +} __packed;
3023     +
3024     +static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
3025     + struct snd_pcm_sync_ptr_x32 __user *src)
3026     +{
3027     + struct snd_pcm_runtime *runtime = substream->runtime;
3028     + volatile struct snd_pcm_mmap_status *status;
3029     + volatile struct snd_pcm_mmap_control *control;
3030     + u32 sflags;
3031     + struct snd_pcm_mmap_control scontrol;
3032     + struct snd_pcm_mmap_status sstatus;
3033     + snd_pcm_uframes_t boundary;
3034     + int err;
3035     +
3036     + if (snd_BUG_ON(!runtime))
3037     + return -EINVAL;
3038     +
3039     + if (get_user(sflags, &src->flags) ||
3040     + get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
3041     + get_user(scontrol.avail_min, &src->c.control.avail_min))
3042     + return -EFAULT;
3043     + if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
3044     + err = snd_pcm_hwsync(substream);
3045     + if (err < 0)
3046     + return err;
3047     + }
3048     + status = runtime->status;
3049     + control = runtime->control;
3050     + boundary = recalculate_boundary(runtime);
3051     + if (!boundary)
3052     + boundary = 0x7fffffff;
3053     + snd_pcm_stream_lock_irq(substream);
3054     + /* FIXME: we should consider the boundary for the sync from app */
3055     + if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
3056     + control->appl_ptr = scontrol.appl_ptr;
3057     + else
3058     + scontrol.appl_ptr = control->appl_ptr % boundary;
3059     + if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
3060     + control->avail_min = scontrol.avail_min;
3061     + else
3062     + scontrol.avail_min = control->avail_min;
3063     + sstatus.state = status->state;
3064     + sstatus.hw_ptr = status->hw_ptr % boundary;
3065     + sstatus.tstamp = status->tstamp;
3066     + sstatus.suspended_state = status->suspended_state;
3067     + sstatus.audio_tstamp = status->audio_tstamp;
3068     + snd_pcm_stream_unlock_irq(substream);
3069     + if (put_user(sstatus.state, &src->s.status.state) ||
3070     + put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
3071     + put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
3072     + put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
3073     + put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) ||
3074     + put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
3075     + put_user(scontrol.avail_min, &src->c.control.avail_min))
3076     + return -EFAULT;
3077     +
3078     + return 0;
3079     +}
3080     +#endif /* CONFIG_X86_X32 */
3081    
3082     /*
3083     */
3084     @@ -487,7 +647,12 @@ enum {
3085     SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
3086     SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
3087     SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
3088     -
3089     +#ifdef CONFIG_X86_X32
3090     + SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info),
3091     + SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32),
3092     + SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32),
3093     + SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32),
3094     +#endif /* CONFIG_X86_X32 */
3095     };
3096    
3097     static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
3098     @@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
3099     return snd_pcm_ioctl_rewind_compat(substream, argp);
3100     case SNDRV_PCM_IOCTL_FORWARD32:
3101     return snd_pcm_ioctl_forward_compat(substream, argp);
3102     +#ifdef CONFIG_X86_X32
3103     + case SNDRV_PCM_IOCTL_STATUS_X32:
3104     + return snd_pcm_status_user_x32(substream, argp, false);
3105     + case SNDRV_PCM_IOCTL_STATUS_EXT_X32:
3106     + return snd_pcm_status_user_x32(substream, argp, true);
3107     + case SNDRV_PCM_IOCTL_SYNC_PTR_X32:
3108     + return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
3109     + case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
3110     + return snd_pcm_ioctl_channel_info_x32(substream, argp);
3111     +#endif /* CONFIG_X86_X32 */
3112     }
3113    
3114     return -ENOIOCTLCMD;
3115     diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
3116     index 5268c1f58c25..09a89094dcf7 100644
3117     --- a/sound/core/rawmidi_compat.c
3118     +++ b/sound/core/rawmidi_compat.c
3119     @@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
3120     return 0;
3121     }
3122    
3123     +#ifdef CONFIG_X86_X32
3124     +/* X32 ABI has 64bit timespec and 64bit alignment */
3125     +struct snd_rawmidi_status_x32 {
3126     + s32 stream;
3127     + u32 rsvd; /* alignment */
3128     + struct timespec tstamp;
3129     + u32 avail;
3130     + u32 xruns;
3131     + unsigned char reserved[16];
3132     +} __attribute__((packed));
3133     +
3134     +#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
3135     +
3136     +static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
3137     + struct snd_rawmidi_status_x32 __user *src)
3138     +{
3139     + int err;
3140     + struct snd_rawmidi_status status;
3141     +
3142     + if (rfile->output == NULL)
3143     + return -EINVAL;
3144     + if (get_user(status.stream, &src->stream))
3145     + return -EFAULT;
3146     +
3147     + switch (status.stream) {
3148     + case SNDRV_RAWMIDI_STREAM_OUTPUT:
3149     + err = snd_rawmidi_output_status(rfile->output, &status);
3150     + break;
3151     + case SNDRV_RAWMIDI_STREAM_INPUT:
3152     + err = snd_rawmidi_input_status(rfile->input, &status);
3153     + break;
3154     + default:
3155     + return -EINVAL;
3156     + }
3157     + if (err < 0)
3158     + return err;
3159     +
3160     + if (put_timespec(&status.tstamp, &src->tstamp) ||
3161     + put_user(status.avail, &src->avail) ||
3162     + put_user(status.xruns, &src->xruns))
3163     + return -EFAULT;
3164     +
3165     + return 0;
3166     +}
3167     +#endif /* CONFIG_X86_X32 */
3168     +
3169     enum {
3170     SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
3171     SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
3172     +#ifdef CONFIG_X86_X32
3173     + SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
3174     +#endif /* CONFIG_X86_X32 */
3175     };
3176    
3177     static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
3178     @@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
3179     return snd_rawmidi_ioctl_params_compat(rfile, argp);
3180     case SNDRV_RAWMIDI_IOCTL_STATUS32:
3181     return snd_rawmidi_ioctl_status_compat(rfile, argp);
3182     +#ifdef CONFIG_X86_X32
3183     + case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
3184     + return snd_rawmidi_ioctl_status_x32(rfile, argp);
3185     +#endif /* CONFIG_X86_X32 */
3186     }
3187     return -ENOIOCTLCMD;
3188     }
3189     diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
3190     index 7354b8bed860..cb23899100ee 100644
3191     --- a/sound/core/seq/oss/seq_oss.c
3192     +++ b/sound/core/seq/oss/seq_oss.c
3193     @@ -148,8 +148,6 @@ odev_release(struct inode *inode, struct file *file)
3194     if ((dp = file->private_data) == NULL)
3195     return 0;
3196    
3197     - snd_seq_oss_drain_write(dp);
3198     -
3199     mutex_lock(&register_mutex);
3200     snd_seq_oss_release(dp);
3201     mutex_unlock(&register_mutex);
3202     diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
3203     index b43924325249..d7b4d016b547 100644
3204     --- a/sound/core/seq/oss/seq_oss_device.h
3205     +++ b/sound/core/seq/oss/seq_oss_device.h
3206     @@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
3207     unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
3208    
3209     void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
3210     -void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
3211    
3212     /* */
3213     void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
3214     diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
3215     index 6779e82b46dd..92c96a95a903 100644
3216     --- a/sound/core/seq/oss/seq_oss_init.c
3217     +++ b/sound/core/seq/oss/seq_oss_init.c
3218     @@ -436,22 +436,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
3219    
3220    
3221     /*
3222     - * Wait until the queue is empty (if we don't have nonblock)
3223     - */
3224     -void
3225     -snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
3226     -{
3227     - if (! dp->timer->running)
3228     - return;
3229     - if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
3230     - dp->writeq) {
3231     - while (snd_seq_oss_writeq_sync(dp->writeq))
3232     - ;
3233     - }
3234     -}
3235     -
3236     -
3237     -/*
3238     * reset sequencer devices
3239     */
3240     void
3241     diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
3242     index e05802ae6e1b..2e908225d754 100644
3243     --- a/sound/core/timer_compat.c
3244     +++ b/sound/core/timer_compat.c
3245     @@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
3246     struct snd_timer_status32 __user *_status)
3247     {
3248     struct snd_timer_user *tu;
3249     - struct snd_timer_status status;
3250     + struct snd_timer_status32 status;
3251    
3252     tu = file->private_data;
3253     if (snd_BUG_ON(!tu->timeri))
3254     return -ENXIO;
3255     memset(&status, 0, sizeof(status));
3256     - status.tstamp = tu->tstamp;
3257     + status.tstamp.tv_sec = tu->tstamp.tv_sec;
3258     + status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
3259     status.resolution = snd_timer_resolution(tu->timeri);
3260     status.lost = tu->timeri->lost;
3261     status.overrun = tu->overrun;
3262     @@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
3263     return 0;
3264     }
3265    
3266     +#ifdef CONFIG_X86_X32
3267     +/* X32 ABI has the same struct as x86-64 */
3268     +#define snd_timer_user_status_x32(file, s) \
3269     + snd_timer_user_status(file, s)
3270     +#endif /* CONFIG_X86_X32 */
3271     +
3272     /*
3273     */
3274    
3275     enum {
3276     SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
3277     SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
3278     +#ifdef CONFIG_X86_X32
3279     + SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
3280     +#endif /* CONFIG_X86_X32 */
3281     };
3282    
3283     static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
3284     @@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
3285     return snd_timer_user_info_compat(file, argp);
3286     case SNDRV_TIMER_IOCTL_STATUS32:
3287     return snd_timer_user_status_compat(file, argp);
3288     +#ifdef CONFIG_X86_X32
3289     + case SNDRV_TIMER_IOCTL_STATUS_X32:
3290     + return snd_timer_user_status_x32(file, argp);
3291     +#endif /* CONFIG_X86_X32 */
3292     }
3293     return -ENOIOCTLCMD;
3294     }
3295     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3296     index 72fa58dd7723..c2430b36e1ce 100644
3297     --- a/sound/pci/hda/patch_realtek.c
3298     +++ b/sound/pci/hda/patch_realtek.c
3299     @@ -5386,6 +5386,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3300     SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
3301     SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
3302     SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
3303     + SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
3304     SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
3305     SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
3306     SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
3307     diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
3308     index 2875b4f6d8c9..7c8941b8b2de 100644
3309     --- a/sound/pci/rme9652/hdsp.c
3310     +++ b/sound/pci/rme9652/hdsp.c
3311     @@ -2879,7 +2879,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
3312     {
3313     struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
3314    
3315     - ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
3316     + ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
3317     return 0;
3318     }
3319    
3320     @@ -2891,7 +2891,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
3321    
3322     if (!snd_hdsp_use_is_exclusive(hdsp))
3323     return -EBUSY;
3324     - val = ucontrol->value.enumerated.item[0];
3325     + val = ucontrol->value.integer.value[0];
3326     spin_lock_irq(&hdsp->lock);
3327     if (val != hdsp_dds_offset(hdsp))
3328     change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
3329     diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
3330     index 8bc8016c173d..a4a999a0317e 100644
3331     --- a/sound/pci/rme9652/hdspm.c
3332     +++ b/sound/pci/rme9652/hdspm.c
3333     @@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
3334     {
3335     u64 n;
3336    
3337     + if (snd_BUG_ON(rate <= 0))
3338     + return;
3339     +
3340     if (rate >= 112000)
3341     rate /= 4;
3342     else if (rate >= 56000)
3343     @@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
3344     } else {
3345     /* slave mode, return external sample rate */
3346     rate = hdspm_external_sample_rate(hdspm);
3347     + if (!rate)
3348     + rate = hdspm->system_sample_rate;
3349     }
3350     }
3351    
3352     @@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
3353     ucontrol)
3354     {
3355     struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
3356     + int rate = ucontrol->value.integer.value[0];
3357    
3358     - hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
3359     + if (rate < 27000 || rate > 207000)
3360     + return -EINVAL;
3361     + hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
3362     return 0;
3363     }
3364    
3365     @@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
3366     {
3367     struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
3368    
3369     - ucontrol->value.enumerated.item[0] = hdspm->tco->term;
3370     + ucontrol->value.integer.value[0] = hdspm->tco->term;
3371    
3372     return 0;
3373     }
3374     @@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
3375     {
3376     struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
3377    
3378     - if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
3379     - hdspm->tco->term = ucontrol->value.enumerated.item[0];
3380     + if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
3381     + hdspm->tco->term = ucontrol->value.integer.value[0];
3382    
3383     hdspm_tco_write(hdspm);
3384    
3385     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3386     index 4f6ce1cac8e2..c458d60d5030 100644
3387     --- a/sound/usb/quirks.c
3388     +++ b/sound/usb/quirks.c
3389     @@ -1124,6 +1124,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
3390     case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
3391     case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
3392     case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
3393     + case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
3394     case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
3395     case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
3396     case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */