Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.1/0119-4.1.20-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2776 - (hide annotations) (download)
Thu Apr 7 12:10:09 2016 UTC (8 years, 1 month ago) by niro
File size: 135332 byte(s)
-linux-4.1.20
1 niro 2776 diff --git a/Makefile b/Makefile
2     index 06107f683bbe..39be1bbd373a 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 1
8     -SUBLEVEL = 19
9     +SUBLEVEL = 20
10     EXTRAVERSION =
11     NAME = Series 4800
12    
13     diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
14     index d503fbb787d3..88993cc95e8e 100644
15     --- a/arch/arm/kvm/guest.c
16     +++ b/arch/arm/kvm/guest.c
17     @@ -155,7 +155,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
18     u64 val;
19    
20     val = kvm_arm_timer_get_reg(vcpu, reg->id);
21     - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
22     + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
23     }
24    
25     static unsigned long num_core_regs(void)
26     diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
27     index 9535bd555d1d..d4e04d2237c4 100644
28     --- a/arch/arm64/kvm/guest.c
29     +++ b/arch/arm64/kvm/guest.c
30     @@ -184,7 +184,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
31     u64 val;
32    
33     val = kvm_arm_timer_get_reg(vcpu, reg->id);
34     - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
35     + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
36     }
37    
38     /**
39     diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
40     index 5f5f44edc77d..54923d6b7e16 100644
41     --- a/arch/mips/kernel/traps.c
42     +++ b/arch/mips/kernel/traps.c
43     @@ -693,15 +693,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
44     asmlinkage void do_ov(struct pt_regs *regs)
45     {
46     enum ctx_state prev_state;
47     - siginfo_t info;
48     + siginfo_t info = {
49     + .si_signo = SIGFPE,
50     + .si_code = FPE_INTOVF,
51     + .si_addr = (void __user *)regs->cp0_epc,
52     + };
53    
54     prev_state = exception_enter();
55     die_if_kernel("Integer overflow", regs);
56    
57     - info.si_code = FPE_INTOVF;
58     - info.si_signo = SIGFPE;
59     - info.si_errno = 0;
60     - info.si_addr = (void __user *) regs->cp0_epc;
61     force_sig_info(SIGFPE, &info, current);
62     exception_exit(prev_state);
63     }
64     @@ -877,7 +877,7 @@ out:
65     void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
66     const char *str)
67     {
68     - siginfo_t info;
69     + siginfo_t info = { 0 };
70     char b[40];
71    
72     #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
73     @@ -905,7 +905,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
74     else
75     info.si_code = FPE_INTOVF;
76     info.si_signo = SIGFPE;
77     - info.si_errno = 0;
78     info.si_addr = (void __user *) regs->cp0_epc;
79     force_sig_info(SIGFPE, &info, current);
80     break;
81     diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
82     index 22ee0afc7d5d..ace4ed7d41c6 100644
83     --- a/arch/mips/kvm/mips.c
84     +++ b/arch/mips/kvm/mips.c
85     @@ -700,7 +700,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
86     } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
87     void __user *uaddr = (void __user *)(long)reg->addr;
88    
89     - return copy_to_user(uaddr, vs, 16);
90     + return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
91     } else {
92     return -EINVAL;
93     }
94     @@ -730,7 +730,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
95     } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
96     void __user *uaddr = (void __user *)(long)reg->addr;
97    
98     - return copy_from_user(vs, uaddr, 16);
99     + return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
100     } else {
101     return -EINVAL;
102     }
103     diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
104     index 9585c81f755f..ce0b2b4075c7 100644
105     --- a/arch/parisc/kernel/ptrace.c
106     +++ b/arch/parisc/kernel/ptrace.c
107     @@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
108    
109     long do_syscall_trace_enter(struct pt_regs *regs)
110     {
111     - long ret = 0;
112     -
113     /* Do the secure computing check first. */
114     secure_computing_strict(regs->gr[20]);
115    
116     if (test_thread_flag(TIF_SYSCALL_TRACE) &&
117     - tracehook_report_syscall_entry(regs))
118     - ret = -1L;
119     + tracehook_report_syscall_entry(regs)) {
120     + /*
121     + * Tracing decided this syscall should not happen or the
122     + * debugger stored an invalid system call number. Skip
123     + * the system call and the system call restart handling.
124     + */
125     + regs->gr[20] = -1UL;
126     + goto out;
127     + }
128    
129     #ifdef CONFIG_64BIT
130     if (!is_compat_task())
131     @@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
132     regs->gr[24] & 0xffffffff,
133     regs->gr[23] & 0xffffffff);
134    
135     - return ret ? : regs->gr[20];
136     +out:
137     + return regs->gr[20];
138     }
139    
140     void do_syscall_trace_exit(struct pt_regs *regs)
141     diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
142     index 0b8d26d3ba43..02cf40c96fe3 100644
143     --- a/arch/parisc/kernel/syscall.S
144     +++ b/arch/parisc/kernel/syscall.S
145     @@ -343,7 +343,7 @@ tracesys_next:
146     #endif
147    
148     comiclr,>>= __NR_Linux_syscalls, %r20, %r0
149     - b,n .Lsyscall_nosys
150     + b,n .Ltracesys_nosys
151    
152     LDREGX %r20(%r19), %r19
153    
154     @@ -359,6 +359,9 @@ tracesys_next:
155     be 0(%sr7,%r19)
156     ldo R%tracesys_exit(%r2),%r2
157    
158     +.Ltracesys_nosys:
159     + ldo -ENOSYS(%r0),%r28 /* set errno */
160     +
161     /* Do *not* call this function on the gateway page, because it
162     makes a direct call to syscall_trace. */
163    
164     diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
165     index c78ba51ae285..24b7e554db27 100644
166     --- a/arch/s390/kernel/compat_signal.c
167     +++ b/arch/s390/kernel/compat_signal.c
168     @@ -293,7 +293,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
169    
170     /* Restore high gprs from signal stack */
171     if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
172     - sizeof(&sregs_ext->gprs_high)))
173     + sizeof(sregs_ext->gprs_high)))
174     return -EFAULT;
175     for (i = 0; i < NUM_GPRS; i++)
176     *(__u32 *)&regs->gprs[i] = gprs_high[i];
177     diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
178     index d1daead5fcdd..adb3eaf8fe2a 100644
179     --- a/arch/x86/kernel/acpi/sleep.c
180     +++ b/arch/x86/kernel/acpi/sleep.c
181     @@ -16,6 +16,7 @@
182     #include <asm/cacheflush.h>
183     #include <asm/realmode.h>
184    
185     +#include <linux/ftrace.h>
186     #include "../../realmode/rm/wakeup.h"
187     #include "sleep.h"
188    
189     @@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
190     saved_magic = 0x123456789abcdef0L;
191     #endif /* CONFIG_64BIT */
192    
193     + /*
194     + * Pause/unpause graph tracing around do_suspend_lowlevel as it has
195     + * inconsistent call/return info after it jumps to the wakeup vector.
196     + */
197     + pause_graph_tracing();
198     do_suspend_lowlevel();
199     + unpause_graph_tracing();
200     return 0;
201     }
202    
203     diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
204     index 6e6d115fe9b5..d537c9badeb6 100644
205     --- a/arch/x86/kvm/paging_tmpl.h
206     +++ b/arch/x86/kvm/paging_tmpl.h
207     @@ -257,7 +257,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
208     return ret;
209    
210     mark_page_dirty(vcpu->kvm, table_gfn);
211     - walker->ptes[level] = pte;
212     + walker->ptes[level - 1] = pte;
213     }
214     return 0;
215     }
216     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
217     index fed4c84eac44..41a3fb4ed346 100644
218     --- a/arch/x86/kvm/x86.c
219     +++ b/arch/x86/kvm/x86.c
220     @@ -2117,6 +2117,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
221    
222     static void record_steal_time(struct kvm_vcpu *vcpu)
223     {
224     + accumulate_steal_time(vcpu);
225     +
226     if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
227     return;
228    
229     @@ -2262,12 +2264,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
230     if (!(data & KVM_MSR_ENABLED))
231     break;
232    
233     - vcpu->arch.st.last_steal = current->sched_info.run_delay;
234     -
235     - preempt_disable();
236     - accumulate_steal_time(vcpu);
237     - preempt_enable();
238     -
239     kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
240    
241     break;
242     @@ -2966,7 +2962,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
243     vcpu->cpu = cpu;
244     }
245    
246     - accumulate_steal_time(vcpu);
247     kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
248     }
249    
250     @@ -6371,12 +6366,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
251     * KVM_DEBUGREG_WONT_EXIT again.
252     */
253     if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
254     - int i;
255     -
256     WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
257     kvm_x86_ops->sync_dirty_debug_regs(vcpu);
258     - for (i = 0; i < KVM_NR_DB_REGS; i++)
259     - vcpu->arch.eff_db[i] = vcpu->arch.db[i];
260     + kvm_update_dr0123(vcpu);
261     + kvm_update_dr6(vcpu);
262     + kvm_update_dr7(vcpu);
263     + vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
264     }
265    
266     /*
267     diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
268     index f738c61bc891..6a3c774eaff6 100644
269     --- a/arch/x86/mm/mpx.c
270     +++ b/arch/x86/mm/mpx.c
271     @@ -142,7 +142,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
272     break;
273     }
274    
275     - if (regno > nr_registers) {
276     + if (regno >= nr_registers) {
277     WARN_ONCE(1, "decoded an instruction with an invalid register");
278     return -EINVAL;
279     }
280     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
281     index 6607f3c6ace1..f1a26d937d98 100644
282     --- a/drivers/android/binder.c
283     +++ b/drivers/android/binder.c
284     @@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
285     if (get_user(cookie, (binder_uintptr_t __user *)ptr))
286     return -EFAULT;
287    
288     - ptr += sizeof(void *);
289     + ptr += sizeof(cookie);
290     list_for_each_entry(w, &proc->delivered_death, entry) {
291     struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
292    
293     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
294     index 666fd8a1500a..34825d63d483 100644
295     --- a/drivers/ata/ahci.c
296     +++ b/drivers/ata/ahci.c
297     @@ -332,6 +332,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
298     { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
299     { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
300     { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
301     + { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
302     + { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
303     + { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
304     + { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
305     + { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
306     + { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
307     + { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
308     + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
309     + { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
310     + { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
311     { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
312     { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
313     { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
314     @@ -362,6 +372,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
315     { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
316     { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
317     { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
318     + { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
319     + { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
320     + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
321     + { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
322     + { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
323     + { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
324     + { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
325     + { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
326     + { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
327     + { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
328     + { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
329     + { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
330     + { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
331     + { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
332     + { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
333     + { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
334    
335     /* JMicron 360/1/3/5/6, match class to avoid IDE function */
336     { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
337     diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
338     index 0d7f0da3a269..ae7cfcb562dc 100644
339     --- a/drivers/ata/libata-scsi.c
340     +++ b/drivers/ata/libata-scsi.c
341     @@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
342     int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
343     int cmd, void __user *arg)
344     {
345     - int val = -EINVAL, rc = -EINVAL;
346     + unsigned long val;
347     + int rc = -EINVAL;
348     unsigned long flags;
349    
350     switch (cmd) {
351     - case ATA_IOC_GET_IO32:
352     + case HDIO_GET_32BIT:
353     spin_lock_irqsave(ap->lock, flags);
354     val = ata_ioc32(ap);
355     spin_unlock_irqrestore(ap->lock, flags);
356     - if (copy_to_user(arg, &val, 1))
357     - return -EFAULT;
358     - return 0;
359     + return put_user(val, (unsigned long __user *)arg);
360    
361     - case ATA_IOC_SET_IO32:
362     + case HDIO_SET_32BIT:
363     val = (unsigned long) arg;
364     rc = 0;
365     spin_lock_irqsave(ap->lock, flags);
366     diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
367     index 035dacc93382..fd5c5f3370f6 100644
368     --- a/drivers/gpu/drm/ast/ast_main.c
369     +++ b/drivers/gpu/drm/ast/ast_main.c
370     @@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
371     } while (ast_read32(ast, 0x10000) != 0x01);
372     data = ast_read32(ast, 0x10004);
373    
374     - if (data & 0x400)
375     + if (data & 0x40)
376     ast->dram_bus_width = 16;
377     else
378     ast->dram_bus_width = 32;
379     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
380     index 91764320c56f..a56eab7f0ab1 100644
381     --- a/drivers/gpu/drm/radeon/radeon_pm.c
382     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
383     @@ -1079,12 +1079,6 @@ force:
384    
385     /* update display watermarks based on new power state */
386     radeon_bandwidth_update(rdev);
387     - /* update displays */
388     - radeon_dpm_display_configuration_changed(rdev);
389     -
390     - rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
391     - rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
392     - rdev->pm.dpm.single_display = single_display;
393    
394     /* wait for the rings to drain */
395     for (i = 0; i < RADEON_NUM_RINGS; i++) {
396     @@ -1101,6 +1095,13 @@ force:
397    
398     radeon_dpm_post_set_power_state(rdev);
399    
400     + /* update displays */
401     + radeon_dpm_display_configuration_changed(rdev);
402     +
403     + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
404     + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
405     + rdev->pm.dpm.single_display = single_display;
406     +
407     if (rdev->asic->dpm.force_performance_level) {
408     if (rdev->pm.dpm.thermal_active) {
409     enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
410     diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
411     index f155b8380481..2b3105c8aed3 100644
412     --- a/drivers/hwmon/ads1015.c
413     +++ b/drivers/hwmon/ads1015.c
414     @@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
415     struct ads1015_data *data = i2c_get_clientdata(client);
416     unsigned int pga = data->channel_data[channel].pga;
417     int fullscale = fullscale_table[pga];
418     - const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
419     + const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
420    
421     return DIV_ROUND_CLOSEST(reg * fullscale, mask);
422     }
423     diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
424     index a3dae6d0082a..83ea8c8039fa 100644
425     --- a/drivers/hwmon/gpio-fan.c
426     +++ b/drivers/hwmon/gpio-fan.c
427     @@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
428     unsigned long *state)
429     {
430     struct gpio_fan_data *fan_data = cdev->devdata;
431     - int r;
432    
433     if (!fan_data)
434     return -EINVAL;
435    
436     - r = get_fan_speed_index(fan_data);
437     - if (r < 0)
438     - return r;
439     -
440     - *state = r;
441     + *state = fan_data->speed_index;
442     return 0;
443     }
444    
445     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
446     index 450ef5001a65..1750db0ef61c 100644
447     --- a/drivers/iommu/amd_iommu_init.c
448     +++ b/drivers/iommu/amd_iommu_init.c
449     @@ -227,6 +227,10 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
450     static int amd_iommu_enable_interrupts(void);
451     static int __init iommu_go_to_state(enum iommu_init_state state);
452    
453     +static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
454     + u8 bank, u8 cntr, u8 fxn,
455     + u64 *value, bool is_write);
456     +
457     static inline void update_last_devid(u16 devid)
458     {
459     if (devid > amd_iommu_last_bdf)
460     @@ -1066,6 +1070,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
461     }
462    
463     /*
464     + * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
465     + * Workaround:
466     + * BIOS should enable ATS write permission check by setting
467     + * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
468     + */
469     +static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
470     +{
471     + u32 value;
472     +
473     + if ((boot_cpu_data.x86 != 0x15) ||
474     + (boot_cpu_data.x86_model < 0x30) ||
475     + (boot_cpu_data.x86_model > 0x3f))
476     + return;
477     +
478     + /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
479     + value = iommu_read_l2(iommu, 0x47);
480     +
481     + if (value & BIT(0))
482     + return;
483     +
484     + /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
485     + iommu_write_l2(iommu, 0x47, value | BIT(0));
486     +
487     + pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
488     + dev_name(&iommu->dev->dev));
489     +}
490     +
491     +/*
492     * This function clues the initialization function for one IOMMU
493     * together and also allocates the command buffer and programs the
494     * hardware. It does NOT enable the IOMMU. This is done afterwards.
495     @@ -1192,8 +1224,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
496     amd_iommu_pc_present = true;
497    
498     /* Check if the performance counters can be written to */
499     - if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
500     - (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
501     + if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
502     + (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
503     (val != val2)) {
504     pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
505     amd_iommu_pc_present = false;
506     @@ -1339,6 +1371,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
507     }
508    
509     amd_iommu_erratum_746_workaround(iommu);
510     + amd_iommu_ats_write_check_workaround(iommu);
511    
512     iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
513     amd_iommu_groups, "ivhd%d",
514     @@ -2362,22 +2395,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
515     }
516     EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
517    
518     -int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
519     +static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
520     + u8 bank, u8 cntr, u8 fxn,
521     u64 *value, bool is_write)
522     {
523     - struct amd_iommu *iommu;
524     u32 offset;
525     u32 max_offset_lim;
526    
527     - /* Make sure the IOMMU PC resource is available */
528     - if (!amd_iommu_pc_present)
529     - return -ENODEV;
530     -
531     - /* Locate the iommu associated with the device ID */
532     - iommu = amd_iommu_rlookup_table[devid];
533     -
534     /* Check for valid iommu and pc register indexing */
535     - if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
536     + if (WARN_ON((fxn > 0x28) || (fxn & 7)))
537     return -ENODEV;
538    
539     offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
540     @@ -2401,3 +2427,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
541     return 0;
542     }
543     EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
544     +
545     +int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
546     + u64 *value, bool is_write)
547     +{
548     + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
549     +
550     + /* Make sure the IOMMU PC resource is available */
551     + if (!amd_iommu_pc_present || iommu == NULL)
552     + return -ENODEV;
553     +
554     + return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
555     + value, is_write);
556     +}
557     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
558     index 8b72ceee0f61..62610aafaac7 100644
559     --- a/drivers/md/dm.c
560     +++ b/drivers/md/dm.c
561     @@ -1146,6 +1146,8 @@ static void dm_unprep_request(struct request *rq)
562    
563     if (clone)
564     free_rq_clone(clone);
565     + else if (!tio->md->queue->mq_ops)
566     + free_rq_tio(tio);
567     }
568    
569     /*
570     diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
571     index 60ffcf098bef..5f92ec23bb07 100644
572     --- a/drivers/media/i2c/adv7604.c
573     +++ b/drivers/media/i2c/adv7604.c
574     @@ -1911,10 +1911,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
575     }
576    
577     /* tx 5v detect */
578     - tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
579     + tx_5v = irq_reg_0x70 & info->cable_det_mask;
580     if (tx_5v) {
581     v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
582     - io_write(sd, 0x71, tx_5v);
583     adv76xx_s_detect_tx_5v_ctrl(sd);
584     if (handled)
585     *handled = true;
586     diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
587     index 2a1b6e037e1a..0134ba32a057 100644
588     --- a/drivers/mtd/ubi/upd.c
589     +++ b/drivers/mtd/ubi/upd.c
590     @@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
591     vol->changing_leb = 1;
592     vol->ch_lnum = req->lnum;
593    
594     - vol->upd_buf = vmalloc(req->bytes);
595     + vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
596     if (!vol->upd_buf)
597     return -ENOMEM;
598    
599     diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
600     index 866bac0ae7e9..339b0c5ce60c 100644
601     --- a/drivers/net/can/usb/ems_usb.c
602     +++ b/drivers/net/can/usb/ems_usb.c
603     @@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
604     */
605     #define EMS_USB_ARM7_CLOCK 8000000
606    
607     +#define CPC_TX_QUEUE_TRIGGER_LOW 25
608     +#define CPC_TX_QUEUE_TRIGGER_HIGH 35
609     +
610     /*
611     * CAN-Message representation in a CPC_MSG. Message object type is
612     * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
613     @@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
614     switch (urb->status) {
615     case 0:
616     dev->free_slots = dev->intr_in_buffer[1];
617     + if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
618     + if (netif_queue_stopped(netdev)){
619     + netif_wake_queue(netdev);
620     + }
621     + }
622     break;
623    
624     case -ECONNRESET: /* unlink */
625     @@ -529,8 +537,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
626     /* Release context */
627     context->echo_index = MAX_TX_URBS;
628    
629     - if (netif_queue_stopped(netdev))
630     - netif_wake_queue(netdev);
631     }
632    
633     /*
634     @@ -590,7 +596,7 @@ static int ems_usb_start(struct ems_usb *dev)
635     int err, i;
636    
637     dev->intr_in_buffer[0] = 0;
638     - dev->free_slots = 15; /* initial size */
639     + dev->free_slots = 50; /* initial size */
640    
641     for (i = 0; i < MAX_RX_URBS; i++) {
642     struct urb *urb = NULL;
643     @@ -838,7 +844,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
644    
645     /* Slow down tx path */
646     if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
647     - dev->free_slots < 5) {
648     + dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
649     netif_stop_queue(netdev);
650     }
651     }
652     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
653     index 9779c1e5688c..90e8b662e44d 100644
654     --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
655     +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
656     @@ -2797,6 +2797,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
657     struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
658     int ret;
659    
660     + /* we don't support "match all" in the firmware */
661     + if (!req->n_match_sets)
662     + return -EOPNOTSUPP;
663     +
664     if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
665     ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
666     if (ret)
667     diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
668     index 7cfd2db02deb..914655e89677 100644
669     --- a/drivers/pci/xen-pcifront.c
670     +++ b/drivers/pci/xen-pcifront.c
671     @@ -52,7 +52,7 @@ struct pcifront_device {
672     };
673    
674     struct pcifront_sd {
675     - int domain;
676     + struct pci_sysdata sd;
677     struct pcifront_device *pdev;
678     };
679    
680     @@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
681     unsigned int domain, unsigned int bus,
682     struct pcifront_device *pdev)
683     {
684     - sd->domain = domain;
685     + /* Because we do not expose that information via XenBus. */
686     + sd->sd.node = first_online_node;
687     + sd->sd.domain = domain;
688     sd->pdev = pdev;
689     }
690    
691     @@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
692     dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
693     domain, bus);
694    
695     - bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
696     - sd = kmalloc(sizeof(*sd), GFP_KERNEL);
697     + bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
698     + sd = kzalloc(sizeof(*sd), GFP_KERNEL);
699     if (!bus_entry || !sd) {
700     err = -ENOMEM;
701     goto err_out;
702     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
703     index 2e58279fab60..6f50e9d958de 100644
704     --- a/drivers/target/iscsi/iscsi_target.c
705     +++ b/drivers/target/iscsi/iscsi_target.c
706     @@ -4095,6 +4095,17 @@ reject:
707     return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
708     }
709    
710     +static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
711     +{
712     + bool ret;
713     +
714     + spin_lock_bh(&conn->state_lock);
715     + ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
716     + spin_unlock_bh(&conn->state_lock);
717     +
718     + return ret;
719     +}
720     +
721     int iscsi_target_rx_thread(void *arg)
722     {
723     int ret, rc;
724     @@ -4112,7 +4123,7 @@ int iscsi_target_rx_thread(void *arg)
725     * incoming iscsi/tcp socket I/O, and/or failing the connection.
726     */
727     rc = wait_for_completion_interruptible(&conn->rx_login_comp);
728     - if (rc < 0)
729     + if (rc < 0 || iscsi_target_check_conn_state(conn))
730     return 0;
731    
732     if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
733     diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
734     index f9cde9141836..9a96f1712b7a 100644
735     --- a/drivers/target/iscsi/iscsi_target_nego.c
736     +++ b/drivers/target/iscsi/iscsi_target_nego.c
737     @@ -393,6 +393,7 @@ err:
738     if (login->login_complete) {
739     if (conn->rx_thread && conn->rx_thread_active) {
740     send_sig(SIGINT, conn->rx_thread, 1);
741     + complete(&conn->rx_login_comp);
742     kthread_stop(conn->rx_thread);
743     }
744     if (conn->tx_thread && conn->tx_thread_active) {
745     diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
746     index 08aa7cc58694..57fd4e14d4eb 100644
747     --- a/drivers/target/target_core_pr.c
748     +++ b/drivers/target/target_core_pr.c
749     @@ -329,7 +329,7 @@ static int core_scsi3_pr_seq_non_holder(
750     * RESERVATION CONFLICT on some CDBs */
751    
752     if (!se_sess->se_node_acl->device_list)
753     - return;
754     + return 0;
755    
756     se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
757     /*
758     diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
759     index 733824e3825f..46b966d09af2 100644
760     --- a/drivers/target/target_core_sbc.c
761     +++ b/drivers/target/target_core_sbc.c
762     @@ -321,7 +321,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
763     return 0;
764     }
765    
766     -static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
767     +static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
768     + int *post_ret)
769     {
770     unsigned char *buf, *addr;
771     struct scatterlist *sg;
772     @@ -385,7 +386,8 @@ sbc_execute_rw(struct se_cmd *cmd)
773     cmd->data_direction);
774     }
775    
776     -static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
777     +static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
778     + int *post_ret)
779     {
780     struct se_device *dev = cmd->se_dev;
781    
782     @@ -395,8 +397,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
783     * sent to the backend driver.
784     */
785     spin_lock_irq(&cmd->t_state_lock);
786     - if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
787     + if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
788     cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
789     + *post_ret = 1;
790     + }
791     spin_unlock_irq(&cmd->t_state_lock);
792    
793     /*
794     @@ -408,7 +412,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
795     return TCM_NO_SENSE;
796     }
797    
798     -static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
799     +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
800     + int *post_ret)
801     {
802     struct se_device *dev = cmd->se_dev;
803     struct scatterlist *write_sg = NULL, *sg;
804     @@ -504,11 +509,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
805    
806     if (block_size < PAGE_SIZE) {
807     sg_set_page(&write_sg[i], m.page, block_size,
808     - block_size);
809     + m.piter.sg->offset + block_size);
810     } else {
811     sg_miter_next(&m);
812     sg_set_page(&write_sg[i], m.page, block_size,
813     - 0);
814     + m.piter.sg->offset);
815     }
816     len -= block_size;
817     i++;
818     diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
819     index adb8016955c4..ad48837ead42 100644
820     --- a/drivers/target/target_core_tmr.c
821     +++ b/drivers/target/target_core_tmr.c
822     @@ -78,16 +78,18 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
823     kfree(tmr);
824     }
825    
826     -static void core_tmr_handle_tas_abort(
827     - struct se_node_acl *tmr_nacl,
828     - struct se_cmd *cmd,
829     - int tas)
830     +static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
831     {
832     - bool remove = true;
833     + unsigned long flags;
834     + bool remove = true, send_tas;
835     /*
836     * TASK ABORTED status (TAS) bit support
837     */
838     - if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
839     + spin_lock_irqsave(&cmd->t_state_lock, flags);
840     + send_tas = (cmd->transport_state & CMD_T_TAS);
841     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
842     +
843     + if (send_tas) {
844     remove = false;
845     transport_send_task_abort(cmd);
846     }
847     @@ -110,6 +112,47 @@ static int target_check_cdb_and_preempt(struct list_head *list,
848     return 1;
849     }
850    
851     +static bool __target_check_io_state(struct se_cmd *se_cmd,
852     + struct se_session *tmr_sess, int tas)
853     +{
854     + struct se_session *sess = se_cmd->se_sess;
855     +
856     + assert_spin_locked(&sess->sess_cmd_lock);
857     + WARN_ON_ONCE(!irqs_disabled());
858     + /*
859     + * If command already reached CMD_T_COMPLETE state within
860     + * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
861     + * this se_cmd has been passed to fabric driver and will
862     + * not be aborted.
863     + *
864     + * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
865     + * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
866     + * long as se_cmd->cmd_kref is still active unless zero.
867     + */
868     + spin_lock(&se_cmd->t_state_lock);
869     + if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
870     + pr_debug("Attempted to abort io tag: %u already complete or"
871     + " fabric stop, skipping\n",
872     + se_cmd->se_tfo->get_task_tag(se_cmd));
873     + spin_unlock(&se_cmd->t_state_lock);
874     + return false;
875     + }
876     + if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
877     + pr_debug("Attempted to abort io tag: %u already shutdown,"
878     + " skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd));
879     + spin_unlock(&se_cmd->t_state_lock);
880     + return false;
881     + }
882     + se_cmd->transport_state |= CMD_T_ABORTED;
883     +
884     + if ((tmr_sess != se_cmd->se_sess) && tas)
885     + se_cmd->transport_state |= CMD_T_TAS;
886     +
887     + spin_unlock(&se_cmd->t_state_lock);
888     +
889     + return kref_get_unless_zero(&se_cmd->cmd_kref);
890     +}
891     +
892     void core_tmr_abort_task(
893     struct se_device *dev,
894     struct se_tmr_req *tmr,
895     @@ -136,25 +179,20 @@ void core_tmr_abort_task(
896     printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
897     se_cmd->se_tfo->get_fabric_name(), ref_tag);
898    
899     - spin_lock(&se_cmd->t_state_lock);
900     - if (se_cmd->transport_state & CMD_T_COMPLETE) {
901     - printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
902     - spin_unlock(&se_cmd->t_state_lock);
903     + if (!__target_check_io_state(se_cmd, se_sess, 0)) {
904     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
905     + target_put_sess_cmd(se_cmd);
906     goto out;
907     }
908     - se_cmd->transport_state |= CMD_T_ABORTED;
909     - spin_unlock(&se_cmd->t_state_lock);
910    
911     list_del_init(&se_cmd->se_cmd_list);
912     - kref_get(&se_cmd->cmd_kref);
913     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
914    
915     cancel_work_sync(&se_cmd->work);
916     transport_wait_for_tasks(se_cmd);
917    
918     - target_put_sess_cmd(se_cmd);
919     transport_cmd_finish_abort(se_cmd, true);
920     + target_put_sess_cmd(se_cmd);
921    
922     printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
923     " ref_tag: %d\n", ref_tag);
924     @@ -211,7 +249,8 @@ static void core_tmr_drain_tmr_list(
925    
926     spin_lock(&sess->sess_cmd_lock);
927     spin_lock(&cmd->t_state_lock);
928     - if (!(cmd->transport_state & CMD_T_ACTIVE)) {
929     + if (!(cmd->transport_state & CMD_T_ACTIVE) ||
930     + (cmd->transport_state & CMD_T_FABRIC_STOP)) {
931     spin_unlock(&cmd->t_state_lock);
932     spin_unlock(&sess->sess_cmd_lock);
933     continue;
934     @@ -221,15 +260,22 @@ static void core_tmr_drain_tmr_list(
935     spin_unlock(&sess->sess_cmd_lock);
936     continue;
937     }
938     + if (sess->sess_tearing_down || cmd->cmd_wait_set) {
939     + spin_unlock(&cmd->t_state_lock);
940     + spin_unlock(&sess->sess_cmd_lock);
941     + continue;
942     + }
943     cmd->transport_state |= CMD_T_ABORTED;
944     spin_unlock(&cmd->t_state_lock);
945    
946     rc = kref_get_unless_zero(&cmd->cmd_kref);
947     - spin_unlock(&sess->sess_cmd_lock);
948     if (!rc) {
949     printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
950     + spin_unlock(&sess->sess_cmd_lock);
951     continue;
952     }
953     + spin_unlock(&sess->sess_cmd_lock);
954     +
955     list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
956     }
957     spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
958     @@ -254,13 +300,15 @@ static void core_tmr_drain_tmr_list(
959     static void core_tmr_drain_state_list(
960     struct se_device *dev,
961     struct se_cmd *prout_cmd,
962     - struct se_node_acl *tmr_nacl,
963     + struct se_session *tmr_sess,
964     int tas,
965     struct list_head *preempt_and_abort_list)
966     {
967     LIST_HEAD(drain_task_list);
968     + struct se_session *sess;
969     struct se_cmd *cmd, *next;
970     unsigned long flags;
971     + int rc;
972    
973     /*
974     * Complete outstanding commands with TASK_ABORTED SAM status.
975     @@ -299,6 +347,16 @@ static void core_tmr_drain_state_list(
976     if (prout_cmd == cmd)
977     continue;
978    
979     + sess = cmd->se_sess;
980     + if (WARN_ON_ONCE(!sess))
981     + continue;
982     +
983     + spin_lock(&sess->sess_cmd_lock);
984     + rc = __target_check_io_state(cmd, tmr_sess, tas);
985     + spin_unlock(&sess->sess_cmd_lock);
986     + if (!rc)
987     + continue;
988     +
989     list_move_tail(&cmd->state_list, &drain_task_list);
990     cmd->state_active = false;
991     }
992     @@ -306,7 +364,7 @@ static void core_tmr_drain_state_list(
993    
994     while (!list_empty(&drain_task_list)) {
995     cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
996     - list_del(&cmd->state_list);
997     + list_del_init(&cmd->state_list);
998    
999     pr_debug("LUN_RESET: %s cmd: %p"
1000     " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
1001     @@ -330,16 +388,11 @@ static void core_tmr_drain_state_list(
1002     * loop above, but we do it down here given that
1003     * cancel_work_sync may block.
1004     */
1005     - if (cmd->t_state == TRANSPORT_COMPLETE)
1006     - cancel_work_sync(&cmd->work);
1007     -
1008     - spin_lock_irqsave(&cmd->t_state_lock, flags);
1009     - target_stop_cmd(cmd, &flags);
1010     -
1011     - cmd->transport_state |= CMD_T_ABORTED;
1012     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1013     + cancel_work_sync(&cmd->work);
1014     + transport_wait_for_tasks(cmd);
1015    
1016     - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
1017     + core_tmr_handle_tas_abort(cmd, tas);
1018     + target_put_sess_cmd(cmd);
1019     }
1020     }
1021    
1022     @@ -351,6 +404,7 @@ int core_tmr_lun_reset(
1023     {
1024     struct se_node_acl *tmr_nacl = NULL;
1025     struct se_portal_group *tmr_tpg = NULL;
1026     + struct se_session *tmr_sess = NULL;
1027     int tas;
1028     /*
1029     * TASK_ABORTED status bit, this is configurable via ConfigFS
1030     @@ -369,8 +423,9 @@ int core_tmr_lun_reset(
1031     * or struct se_device passthrough..
1032     */
1033     if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
1034     - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
1035     - tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
1036     + tmr_sess = tmr->task_cmd->se_sess;
1037     + tmr_nacl = tmr_sess->se_node_acl;
1038     + tmr_tpg = tmr_sess->se_tpg;
1039     if (tmr_nacl && tmr_tpg) {
1040     pr_debug("LUN_RESET: TMR caller fabric: %s"
1041     " initiator port %s\n",
1042     @@ -383,7 +438,7 @@ int core_tmr_lun_reset(
1043     dev->transport->name, tas);
1044    
1045     core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
1046     - core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
1047     + core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
1048     preempt_and_abort_list);
1049    
1050     /*
1051     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
1052     index 3881504b40d8..be12b9d84052 100644
1053     --- a/drivers/target/target_core_transport.c
1054     +++ b/drivers/target/target_core_transport.c
1055     @@ -555,9 +555,6 @@ void transport_deregister_session(struct se_session *se_sess)
1056     }
1057     EXPORT_SYMBOL(transport_deregister_session);
1058    
1059     -/*
1060     - * Called with cmd->t_state_lock held.
1061     - */
1062     static void target_remove_from_state_list(struct se_cmd *cmd)
1063     {
1064     struct se_device *dev = cmd->se_dev;
1065     @@ -582,10 +579,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
1066     {
1067     unsigned long flags;
1068    
1069     - spin_lock_irqsave(&cmd->t_state_lock, flags);
1070     - if (write_pending)
1071     - cmd->t_state = TRANSPORT_WRITE_PENDING;
1072     -
1073     if (remove_from_lists) {
1074     target_remove_from_state_list(cmd);
1075    
1076     @@ -595,6 +588,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
1077     cmd->se_lun = NULL;
1078     }
1079    
1080     + spin_lock_irqsave(&cmd->t_state_lock, flags);
1081     + if (write_pending)
1082     + cmd->t_state = TRANSPORT_WRITE_PENDING;
1083     +
1084     /*
1085     * Determine if frontend context caller is requesting the stopping of
1086     * this command for frontend exceptions.
1087     @@ -649,6 +646,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
1088    
1089     void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
1090     {
1091     + bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
1092     +
1093     if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
1094     transport_lun_remove_cmd(cmd);
1095     /*
1096     @@ -660,7 +659,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
1097    
1098     if (transport_cmd_check_stop_to_fabric(cmd))
1099     return;
1100     - if (remove)
1101     + if (remove && ack_kref)
1102     transport_put_cmd(cmd);
1103     }
1104    
1105     @@ -728,7 +727,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
1106     * Check for case where an explicit ABORT_TASK has been received
1107     * and transport_wait_for_tasks() will be waiting for completion..
1108     */
1109     - if (cmd->transport_state & CMD_T_ABORTED &&
1110     + if (cmd->transport_state & CMD_T_ABORTED ||
1111     cmd->transport_state & CMD_T_STOP) {
1112     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1113     complete_all(&cmd->t_transport_stop_comp);
1114     @@ -1638,7 +1637,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1115     void transport_generic_request_failure(struct se_cmd *cmd,
1116     sense_reason_t sense_reason)
1117     {
1118     - int ret = 0;
1119     + int ret = 0, post_ret = 0;
1120    
1121     pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
1122     " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
1123     @@ -1661,7 +1660,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1124     */
1125     if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1126     cmd->transport_complete_callback)
1127     - cmd->transport_complete_callback(cmd, false);
1128     + cmd->transport_complete_callback(cmd, false, &post_ret);
1129    
1130     switch (sense_reason) {
1131     case TCM_NON_EXISTENT_LUN:
1132     @@ -1836,19 +1835,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1133     return true;
1134     }
1135    
1136     +static int __transport_check_aborted_status(struct se_cmd *, int);
1137     +
1138     void target_execute_cmd(struct se_cmd *cmd)
1139     {
1140     /*
1141     - * If the received CDB has aleady been aborted stop processing it here.
1142     - */
1143     - if (transport_check_aborted_status(cmd, 1))
1144     - return;
1145     -
1146     - /*
1147     * Determine if frontend context caller is requesting the stopping of
1148     * this command for frontend exceptions.
1149     + *
1150     + * If the received CDB has aleady been aborted stop processing it here.
1151     */
1152     spin_lock_irq(&cmd->t_state_lock);
1153     + if (__transport_check_aborted_status(cmd, 1)) {
1154     + spin_unlock_irq(&cmd->t_state_lock);
1155     + return;
1156     + }
1157     if (cmd->transport_state & CMD_T_STOP) {
1158     pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
1159     __func__, __LINE__,
1160     @@ -2056,11 +2057,13 @@ static void target_complete_ok_work(struct work_struct *work)
1161     */
1162     if (cmd->transport_complete_callback) {
1163     sense_reason_t rc;
1164     + bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
1165     + bool zero_dl = !(cmd->data_length);
1166     + int post_ret = 0;
1167    
1168     - rc = cmd->transport_complete_callback(cmd, true);
1169     - if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
1170     - if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1171     - !cmd->data_length)
1172     + rc = cmd->transport_complete_callback(cmd, true, &post_ret);
1173     + if (!rc && !post_ret) {
1174     + if (caw && zero_dl)
1175     goto queue_rsp;
1176    
1177     return;
1178     @@ -2209,20 +2212,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
1179     }
1180    
1181     /**
1182     - * transport_release_cmd - free a command
1183     - * @cmd: command to free
1184     + * transport_put_cmd - release a reference to a command
1185     + * @cmd: command to release
1186     *
1187     - * This routine unconditionally frees a command, and reference counting
1188     - * or list removal must be done in the caller.
1189     + * This routine releases our reference to the command and frees it if possible.
1190     */
1191     -static int transport_release_cmd(struct se_cmd *cmd)
1192     +static int transport_put_cmd(struct se_cmd *cmd)
1193     {
1194     BUG_ON(!cmd->se_tfo);
1195     -
1196     - if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1197     - core_tmr_release_req(cmd->se_tmr_req);
1198     - if (cmd->t_task_cdb != cmd->__t_task_cdb)
1199     - kfree(cmd->t_task_cdb);
1200     /*
1201     * If this cmd has been setup with target_get_sess_cmd(), drop
1202     * the kref and call ->release_cmd() in kref callback.
1203     @@ -2230,18 +2227,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
1204     return target_put_sess_cmd(cmd);
1205     }
1206    
1207     -/**
1208     - * transport_put_cmd - release a reference to a command
1209     - * @cmd: command to release
1210     - *
1211     - * This routine releases our reference to the command and frees it if possible.
1212     - */
1213     -static int transport_put_cmd(struct se_cmd *cmd)
1214     -{
1215     - transport_free_pages(cmd);
1216     - return transport_release_cmd(cmd);
1217     -}
1218     -
1219     void *transport_kmap_data_sg(struct se_cmd *cmd)
1220     {
1221     struct scatterlist *sg = cmd->t_data_sg;
1222     @@ -2437,34 +2422,59 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
1223     }
1224     }
1225    
1226     -int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
1227     +static bool
1228     +__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
1229     + unsigned long *flags);
1230     +
1231     +static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
1232     {
1233     unsigned long flags;
1234     +
1235     + spin_lock_irqsave(&cmd->t_state_lock, flags);
1236     + __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
1237     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1238     +}
1239     +
1240     +int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
1241     +{
1242     int ret = 0;
1243     + bool aborted = false, tas = false;
1244    
1245     if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
1246     if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
1247     - transport_wait_for_tasks(cmd);
1248     + target_wait_free_cmd(cmd, &aborted, &tas);
1249    
1250     - ret = transport_release_cmd(cmd);
1251     + if (!aborted || tas)
1252     + ret = transport_put_cmd(cmd);
1253     } else {
1254     if (wait_for_tasks)
1255     - transport_wait_for_tasks(cmd);
1256     + target_wait_free_cmd(cmd, &aborted, &tas);
1257     /*
1258     * Handle WRITE failure case where transport_generic_new_cmd()
1259     * has already added se_cmd to state_list, but fabric has
1260     * failed command before I/O submission.
1261     */
1262     - if (cmd->state_active) {
1263     - spin_lock_irqsave(&cmd->t_state_lock, flags);
1264     + if (cmd->state_active)
1265     target_remove_from_state_list(cmd);
1266     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1267     - }
1268    
1269     if (cmd->se_lun)
1270     transport_lun_remove_cmd(cmd);
1271    
1272     - ret = transport_put_cmd(cmd);
1273     + if (!aborted || tas)
1274     + ret = transport_put_cmd(cmd);
1275     + }
1276     + /*
1277     + * If the task has been internally aborted due to TMR ABORT_TASK
1278     + * or LUN_RESET, target_core_tmr.c is responsible for performing
1279     + * the remaining calls to target_put_sess_cmd(), and not the
1280     + * callers of this function.
1281     + */
1282     + if (aborted) {
1283     + pr_debug("Detected CMD_T_ABORTED for ITT: %u\n",
1284     + cmd->se_tfo->get_task_tag(cmd));
1285     + wait_for_completion(&cmd->cmd_wait_comp);
1286     + cmd->se_tfo->release_cmd(cmd);
1287     + ret = 1;
1288     }
1289     return ret;
1290     }
1291     @@ -2504,25 +2514,45 @@ out:
1292     }
1293     EXPORT_SYMBOL(target_get_sess_cmd);
1294    
1295     +static void target_free_cmd_mem(struct se_cmd *cmd)
1296     +{
1297     + transport_free_pages(cmd);
1298     +
1299     + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1300     + core_tmr_release_req(cmd->se_tmr_req);
1301     + if (cmd->t_task_cdb != cmd->__t_task_cdb)
1302     + kfree(cmd->t_task_cdb);
1303     +}
1304     +
1305     static void target_release_cmd_kref(struct kref *kref)
1306     __releases(&se_cmd->se_sess->sess_cmd_lock)
1307     {
1308     struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
1309     struct se_session *se_sess = se_cmd->se_sess;
1310     + bool fabric_stop;
1311    
1312     if (list_empty(&se_cmd->se_cmd_list)) {
1313     spin_unlock(&se_sess->sess_cmd_lock);
1314     + target_free_cmd_mem(se_cmd);
1315     se_cmd->se_tfo->release_cmd(se_cmd);
1316     return;
1317     }
1318     - if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
1319     +
1320     + spin_lock(&se_cmd->t_state_lock);
1321     + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
1322     + spin_unlock(&se_cmd->t_state_lock);
1323     +
1324     + if (se_cmd->cmd_wait_set || fabric_stop) {
1325     + list_del_init(&se_cmd->se_cmd_list);
1326     spin_unlock(&se_sess->sess_cmd_lock);
1327     + target_free_cmd_mem(se_cmd);
1328     complete(&se_cmd->cmd_wait_comp);
1329     return;
1330     }
1331     - list_del(&se_cmd->se_cmd_list);
1332     + list_del_init(&se_cmd->se_cmd_list);
1333     spin_unlock(&se_sess->sess_cmd_lock);
1334    
1335     + target_free_cmd_mem(se_cmd);
1336     se_cmd->se_tfo->release_cmd(se_cmd);
1337     }
1338    
1339     @@ -2534,6 +2564,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
1340     struct se_session *se_sess = se_cmd->se_sess;
1341    
1342     if (!se_sess) {
1343     + target_free_cmd_mem(se_cmd);
1344     se_cmd->se_tfo->release_cmd(se_cmd);
1345     return 1;
1346     }
1347     @@ -2551,6 +2582,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
1348     {
1349     struct se_cmd *se_cmd;
1350     unsigned long flags;
1351     + int rc;
1352    
1353     spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1354     if (se_sess->sess_tearing_down) {
1355     @@ -2560,8 +2592,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
1356     se_sess->sess_tearing_down = 1;
1357     list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
1358    
1359     - list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
1360     - se_cmd->cmd_wait_set = 1;
1361     + list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
1362     + rc = kref_get_unless_zero(&se_cmd->cmd_kref);
1363     + if (rc) {
1364     + se_cmd->cmd_wait_set = 1;
1365     + spin_lock(&se_cmd->t_state_lock);
1366     + se_cmd->transport_state |= CMD_T_FABRIC_STOP;
1367     + spin_unlock(&se_cmd->t_state_lock);
1368     + }
1369     + }
1370    
1371     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1372     }
1373     @@ -2574,15 +2613,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
1374     {
1375     struct se_cmd *se_cmd, *tmp_cmd;
1376     unsigned long flags;
1377     + bool tas;
1378    
1379     list_for_each_entry_safe(se_cmd, tmp_cmd,
1380     &se_sess->sess_wait_list, se_cmd_list) {
1381     - list_del(&se_cmd->se_cmd_list);
1382     + list_del_init(&se_cmd->se_cmd_list);
1383    
1384     pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
1385     " %d\n", se_cmd, se_cmd->t_state,
1386     se_cmd->se_tfo->get_cmd_state(se_cmd));
1387    
1388     + spin_lock_irqsave(&se_cmd->t_state_lock, flags);
1389     + tas = (se_cmd->transport_state & CMD_T_TAS);
1390     + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
1391     +
1392     + if (!target_put_sess_cmd(se_cmd)) {
1393     + if (tas)
1394     + target_put_sess_cmd(se_cmd);
1395     + }
1396     +
1397     wait_for_completion(&se_cmd->cmd_wait_comp);
1398     pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
1399     " fabric state: %d\n", se_cmd, se_cmd->t_state,
1400     @@ -2625,34 +2674,38 @@ int transport_clear_lun_ref(struct se_lun *lun)
1401     return 0;
1402     }
1403    
1404     -/**
1405     - * transport_wait_for_tasks - wait for completion to occur
1406     - * @cmd: command to wait
1407     - *
1408     - * Called from frontend fabric context to wait for storage engine
1409     - * to pause and/or release frontend generated struct se_cmd.
1410     - */
1411     -bool transport_wait_for_tasks(struct se_cmd *cmd)
1412     +static bool
1413     +__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
1414     + bool *aborted, bool *tas, unsigned long *flags)
1415     + __releases(&cmd->t_state_lock)
1416     + __acquires(&cmd->t_state_lock)
1417     {
1418     - unsigned long flags;
1419    
1420     - spin_lock_irqsave(&cmd->t_state_lock, flags);
1421     + assert_spin_locked(&cmd->t_state_lock);
1422     + WARN_ON_ONCE(!irqs_disabled());
1423     +
1424     + if (fabric_stop)
1425     + cmd->transport_state |= CMD_T_FABRIC_STOP;
1426     +
1427     + if (cmd->transport_state & CMD_T_ABORTED)
1428     + *aborted = true;
1429     +
1430     + if (cmd->transport_state & CMD_T_TAS)
1431     + *tas = true;
1432     +
1433     if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
1434     - !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
1435     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1436     + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
1437     return false;
1438     - }
1439    
1440     if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
1441     - !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
1442     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1443     + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
1444     return false;
1445     - }
1446    
1447     - if (!(cmd->transport_state & CMD_T_ACTIVE)) {
1448     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1449     + if (!(cmd->transport_state & CMD_T_ACTIVE))
1450     + return false;
1451     +
1452     + if (fabric_stop && *aborted)
1453     return false;
1454     - }
1455    
1456     cmd->transport_state |= CMD_T_STOP;
1457    
1458     @@ -2661,20 +2714,37 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
1459     cmd, cmd->se_tfo->get_task_tag(cmd),
1460     cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
1461    
1462     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1463     + spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1464    
1465     wait_for_completion(&cmd->t_transport_stop_comp);
1466    
1467     - spin_lock_irqsave(&cmd->t_state_lock, flags);
1468     + spin_lock_irqsave(&cmd->t_state_lock, *flags);
1469     cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
1470    
1471     pr_debug("wait_for_tasks: Stopped wait_for_completion("
1472     "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
1473     cmd->se_tfo->get_task_tag(cmd));
1474    
1475     + return true;
1476     +}
1477     +
1478     +/**
1479     + * transport_wait_for_tasks - wait for completion to occur
1480     + * @cmd: command to wait
1481     + *
1482     + * Called from frontend fabric context to wait for storage engine
1483     + * to pause and/or release frontend generated struct se_cmd.
1484     + */
1485     +bool transport_wait_for_tasks(struct se_cmd *cmd)
1486     +{
1487     + unsigned long flags;
1488     + bool ret, aborted = false, tas = false;
1489     +
1490     + spin_lock_irqsave(&cmd->t_state_lock, flags);
1491     + ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
1492     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1493    
1494     - return true;
1495     + return ret;
1496     }
1497     EXPORT_SYMBOL(transport_wait_for_tasks);
1498    
1499     @@ -2960,8 +3030,13 @@ after_reason:
1500     }
1501     EXPORT_SYMBOL(transport_send_check_condition_and_sense);
1502    
1503     -int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
1504     +static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
1505     + __releases(&cmd->t_state_lock)
1506     + __acquires(&cmd->t_state_lock)
1507     {
1508     + assert_spin_locked(&cmd->t_state_lock);
1509     + WARN_ON_ONCE(!irqs_disabled());
1510     +
1511     if (!(cmd->transport_state & CMD_T_ABORTED))
1512     return 0;
1513    
1514     @@ -2969,19 +3044,37 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
1515     * If cmd has been aborted but either no status is to be sent or it has
1516     * already been sent, just return
1517     */
1518     - if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
1519     + if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
1520     + if (send_status)
1521     + cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
1522     return 1;
1523     + }
1524    
1525     - pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
1526     - cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
1527     + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
1528     + " 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0],
1529     + cmd->se_tfo->get_task_tag(cmd));
1530    
1531     cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
1532     cmd->scsi_status = SAM_STAT_TASK_ABORTED;
1533     trace_target_cmd_complete(cmd);
1534     +
1535     + spin_unlock_irq(&cmd->t_state_lock);
1536     cmd->se_tfo->queue_status(cmd);
1537     + spin_lock_irq(&cmd->t_state_lock);
1538    
1539     return 1;
1540     }
1541     +
1542     +int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
1543     +{
1544     + int ret;
1545     +
1546     + spin_lock_irq(&cmd->t_state_lock);
1547     + ret = __transport_check_aborted_status(cmd, send_status);
1548     + spin_unlock_irq(&cmd->t_state_lock);
1549     +
1550     + return ret;
1551     +}
1552     EXPORT_SYMBOL(transport_check_aborted_status);
1553    
1554     void transport_send_task_abort(struct se_cmd *cmd)
1555     @@ -3003,11 +3096,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
1556     */
1557     if (cmd->data_direction == DMA_TO_DEVICE) {
1558     if (cmd->se_tfo->write_pending_status(cmd) != 0) {
1559     - cmd->transport_state |= CMD_T_ABORTED;
1560     + spin_lock_irqsave(&cmd->t_state_lock, flags);
1561     + if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
1562     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1563     + goto send_abort;
1564     + }
1565     cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
1566     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1567     return;
1568     }
1569     }
1570     +send_abort:
1571     cmd->scsi_status = SAM_STAT_TASK_ABORTED;
1572    
1573     transport_lun_remove_cmd(cmd);
1574     diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
1575     index ad6c87a4653c..fbc6285905a6 100644
1576     --- a/drivers/usb/chipidea/otg.c
1577     +++ b/drivers/usb/chipidea/otg.c
1578     @@ -118,7 +118,7 @@ static void ci_otg_work(struct work_struct *work)
1579     int ci_hdrc_otg_init(struct ci_hdrc *ci)
1580     {
1581     INIT_WORK(&ci->work, ci_otg_work);
1582     - ci->wq = create_singlethread_workqueue("ci_otg");
1583     + ci->wq = create_freezable_workqueue("ci_otg");
1584     if (!ci->wq) {
1585     dev_err(ci->dev, "can't create workqueue\n");
1586     return -ENODEV;
1587     diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
1588     index 173edd4ca20e..be245d073f15 100644
1589     --- a/drivers/usb/dwc3/core.h
1590     +++ b/drivers/usb/dwc3/core.h
1591     @@ -829,7 +829,6 @@ struct dwc3 {
1592     unsigned pullups_connected:1;
1593     unsigned resize_fifos:1;
1594     unsigned setup_packet_pending:1;
1595     - unsigned start_config_issued:1;
1596     unsigned three_stage_setup:1;
1597     unsigned usb3_lpm_capable:1;
1598    
1599     diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
1600     index 06ecd1e6871c..00f2c456f94b 100644
1601     --- a/drivers/usb/dwc3/ep0.c
1602     +++ b/drivers/usb/dwc3/ep0.c
1603     @@ -545,7 +545,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
1604     int ret;
1605     u32 reg;
1606    
1607     - dwc->start_config_issued = false;
1608     cfg = le16_to_cpu(ctrl->wValue);
1609    
1610     switch (state) {
1611     @@ -727,10 +726,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
1612     dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
1613     ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
1614     break;
1615     - case USB_REQ_SET_INTERFACE:
1616     - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
1617     - dwc->start_config_issued = false;
1618     - /* Fall through */
1619     default:
1620     dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
1621     ret = dwc3_ep0_delegate_req(dwc, ctrl);
1622     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
1623     index 6fbf461d523c..b886226be241 100644
1624     --- a/drivers/usb/dwc3/gadget.c
1625     +++ b/drivers/usb/dwc3/gadget.c
1626     @@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
1627     dep->trb_pool_dma = 0;
1628     }
1629    
1630     +static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
1631     +
1632     +/**
1633     + * dwc3_gadget_start_config - Configure EP resources
1634     + * @dwc: pointer to our controller context structure
1635     + * @dep: endpoint that is being enabled
1636     + *
1637     + * The assignment of transfer resources cannot perfectly follow the
1638     + * data book due to the fact that the controller driver does not have
1639     + * all knowledge of the configuration in advance. It is given this
1640     + * information piecemeal by the composite gadget framework after every
1641     + * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
1642     + * programming model in this scenario can cause errors. For two
1643     + * reasons:
1644     + *
1645     + * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
1646     + * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
1647     + * multiple interfaces.
1648     + *
1649     + * 2) The databook does not mention doing more DEPXFERCFG for new
1650     + * endpoint on alt setting (8.1.6).
1651     + *
1652     + * The following simplified method is used instead:
1653     + *
1654     + * All hardware endpoints can be assigned a transfer resource and this
1655     + * setting will stay persistent until either a core reset or
1656     + * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
1657     + * do DEPXFERCFG for every hardware endpoint as well. We are
1658     + * guaranteed that there are as many transfer resources as endpoints.
1659     + *
1660     + * This function is called for each endpoint when it is being enabled
1661     + * but is triggered only when called for EP0-out, which always happens
1662     + * first, and which should only happen in one of the above conditions.
1663     + */
1664     static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
1665     {
1666     struct dwc3_gadget_ep_cmd_params params;
1667     u32 cmd;
1668     + int i;
1669     + int ret;
1670     +
1671     + if (dep->number)
1672     + return 0;
1673    
1674     memset(&params, 0x00, sizeof(params));
1675     + cmd = DWC3_DEPCMD_DEPSTARTCFG;
1676    
1677     - if (dep->number != 1) {
1678     - cmd = DWC3_DEPCMD_DEPSTARTCFG;
1679     - /* XferRscIdx == 0 for ep0 and 2 for the remaining */
1680     - if (dep->number > 1) {
1681     - if (dwc->start_config_issued)
1682     - return 0;
1683     - dwc->start_config_issued = true;
1684     - cmd |= DWC3_DEPCMD_PARAM(2);
1685     - }
1686     + ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
1687     + if (ret)
1688     + return ret;
1689    
1690     - return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
1691     + for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1692     + struct dwc3_ep *dep = dwc->eps[i];
1693     +
1694     + if (!dep)
1695     + continue;
1696     +
1697     + ret = dwc3_gadget_set_xfer_resource(dwc, dep);
1698     + if (ret)
1699     + return ret;
1700     }
1701    
1702     return 0;
1703     @@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
1704     struct dwc3_trb *trb_st_hw;
1705     struct dwc3_trb *trb_link;
1706    
1707     - ret = dwc3_gadget_set_xfer_resource(dwc, dep);
1708     - if (ret)
1709     - return ret;
1710     -
1711     dep->endpoint.desc = desc;
1712     dep->comp_desc = comp_desc;
1713     dep->type = usb_endpoint_type(desc);
1714     @@ -1589,8 +1627,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
1715     }
1716     dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1717    
1718     - dwc->start_config_issued = false;
1719     -
1720     /* Start with SuperSpeed Default */
1721     dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1722    
1723     @@ -2167,7 +2203,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1724     dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1725    
1726     dwc3_disconnect_gadget(dwc);
1727     - dwc->start_config_issued = false;
1728    
1729     dwc->gadget.speed = USB_SPEED_UNKNOWN;
1730     dwc->setup_packet_pending = false;
1731     @@ -2218,7 +2253,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1732    
1733     dwc3_stop_active_transfers(dwc);
1734     dwc3_clear_stall_all_ep(dwc);
1735     - dwc->start_config_issued = false;
1736    
1737     /* Reset device address to zero */
1738     reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1739     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1740     index 1dd9919081f8..7a76fe4c2f9e 100644
1741     --- a/drivers/usb/serial/cp210x.c
1742     +++ b/drivers/usb/serial/cp210x.c
1743     @@ -162,6 +162,9 @@ static const struct usb_device_id id_table[] = {
1744     { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1745     { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1746     { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
1747     + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
1748     + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
1749     + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
1750     { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
1751     { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
1752     { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
1753     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1754     index 88540596973f..ce3d6af977b7 100644
1755     --- a/drivers/usb/serial/option.c
1756     +++ b/drivers/usb/serial/option.c
1757     @@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb);
1758     #define TELIT_PRODUCT_UE910_V2 0x1012
1759     #define TELIT_PRODUCT_LE922_USBCFG0 0x1042
1760     #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
1761     +#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
1762     #define TELIT_PRODUCT_LE920 0x1200
1763     #define TELIT_PRODUCT_LE910 0x1201
1764    
1765     @@ -318,6 +319,7 @@ static void option_instat_callback(struct urb *urb);
1766     #define TOSHIBA_PRODUCT_G450 0x0d45
1767    
1768     #define ALINK_VENDOR_ID 0x1e0e
1769     +#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
1770     #define ALINK_PRODUCT_PH300 0x9100
1771     #define ALINK_PRODUCT_3GU 0x9200
1772    
1773     @@ -610,6 +612,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
1774     .reserved = BIT(3) | BIT(4),
1775     };
1776    
1777     +static const struct option_blacklist_info simcom_sim7100e_blacklist = {
1778     + .reserved = BIT(5) | BIT(6),
1779     +};
1780     +
1781     static const struct option_blacklist_info telit_le910_blacklist = {
1782     .sendsetup = BIT(0),
1783     .reserved = BIT(1) | BIT(2),
1784     @@ -1130,6 +1136,8 @@ static const struct usb_device_id option_ids[] = {
1785     { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1786     { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1787     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
1788     + { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
1789     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1790     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1791     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1792     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1793     @@ -1137,6 +1145,8 @@ static const struct usb_device_id option_ids[] = {
1794     .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
1795     { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
1796     .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
1797     + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
1798     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1799     { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1800     { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1801     { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
1802     @@ -1188,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
1803     .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1804     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
1805     .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1806     + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1807     + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1808     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1809     .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1810     { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
1811     @@ -1657,6 +1669,8 @@ static const struct usb_device_id option_ids[] = {
1812     { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
1813     { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
1814     { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1815     + { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1816     + .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
1817     { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1818     .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
1819     },
1820     diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1821     index 514fa91cf74e..f0a2ad15a992 100644
1822     --- a/drivers/usb/serial/qcserial.c
1823     +++ b/drivers/usb/serial/qcserial.c
1824     @@ -155,14 +155,17 @@ static const struct usb_device_id id_table[] = {
1825     {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
1826     {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
1827     {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
1828     - {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */
1829     - {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */
1830     + {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
1831     + {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
1832     + {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
1833     + {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
1834     {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
1835     {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
1836     {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
1837     {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
1838     {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
1839     {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
1840     + {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1841    
1842     /* Huawei devices */
1843     {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
1844     diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
1845     index e9851add6f4e..c0f4ab83aaa8 100644
1846     --- a/drivers/vfio/pci/vfio_pci.c
1847     +++ b/drivers/vfio/pci/vfio_pci.c
1848     @@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
1849     info.num_regions = VFIO_PCI_NUM_REGIONS;
1850     info.num_irqs = VFIO_PCI_NUM_IRQS;
1851    
1852     - return copy_to_user((void __user *)arg, &info, minsz);
1853     + return copy_to_user((void __user *)arg, &info, minsz) ?
1854     + -EFAULT : 0;
1855    
1856     } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1857     struct pci_dev *pdev = vdev->pdev;
1858     @@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
1859     return -EINVAL;
1860     }
1861    
1862     - return copy_to_user((void __user *)arg, &info, minsz);
1863     + return copy_to_user((void __user *)arg, &info, minsz) ?
1864     + -EFAULT : 0;
1865    
1866     } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1867     struct vfio_irq_info info;
1868     @@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
1869     else
1870     info.flags |= VFIO_IRQ_INFO_NORESIZE;
1871    
1872     - return copy_to_user((void __user *)arg, &info, minsz);
1873     + return copy_to_user((void __user *)arg, &info, minsz) ?
1874     + -EFAULT : 0;
1875    
1876     } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1877     struct vfio_irq_set hdr;
1878     diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
1879     index abcff7a1aa66..973b24ffe332 100644
1880     --- a/drivers/vfio/platform/vfio_platform_common.c
1881     +++ b/drivers/vfio/platform/vfio_platform_common.c
1882     @@ -163,7 +163,8 @@ static long vfio_platform_ioctl(void *device_data,
1883     info.num_regions = vdev->num_regions;
1884     info.num_irqs = vdev->num_irqs;
1885    
1886     - return copy_to_user((void __user *)arg, &info, minsz);
1887     + return copy_to_user((void __user *)arg, &info, minsz) ?
1888     + -EFAULT : 0;
1889    
1890     } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1891     struct vfio_region_info info;
1892     @@ -184,7 +185,8 @@ static long vfio_platform_ioctl(void *device_data,
1893     info.size = vdev->regions[info.index].size;
1894     info.flags = vdev->regions[info.index].flags;
1895    
1896     - return copy_to_user((void __user *)arg, &info, minsz);
1897     + return copy_to_user((void __user *)arg, &info, minsz) ?
1898     + -EFAULT : 0;
1899    
1900     } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1901     struct vfio_irq_info info;
1902     @@ -203,7 +205,8 @@ static long vfio_platform_ioctl(void *device_data,
1903     info.flags = vdev->irqs[info.index].flags;
1904     info.count = vdev->irqs[info.index].count;
1905    
1906     - return copy_to_user((void __user *)arg, &info, minsz);
1907     + return copy_to_user((void __user *)arg, &info, minsz) ?
1908     + -EFAULT : 0;
1909    
1910     } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1911     struct vfio_irq_set hdr;
1912     diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
1913     index 57d8c37a002b..092216540756 100644
1914     --- a/drivers/vfio/vfio_iommu_type1.c
1915     +++ b/drivers/vfio/vfio_iommu_type1.c
1916     @@ -986,7 +986,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
1917    
1918     info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
1919    
1920     - return copy_to_user((void __user *)arg, &info, minsz);
1921     + return copy_to_user((void __user *)arg, &info, minsz) ?
1922     + -EFAULT : 0;
1923    
1924     } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1925     struct vfio_iommu_type1_dma_map map;
1926     @@ -1019,7 +1020,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
1927     if (ret)
1928     return ret;
1929    
1930     - return copy_to_user((void __user *)arg, &unmap, minsz);
1931     + return copy_to_user((void __user *)arg, &unmap, minsz) ?
1932     + -EFAULT : 0;
1933     }
1934    
1935     return -ENOTTY;
1936     diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
1937     index 58e38d586f52..4d529f3e40df 100644
1938     --- a/drivers/xen/xen-pciback/pciback.h
1939     +++ b/drivers/xen/xen-pciback/pciback.h
1940     @@ -37,6 +37,7 @@ struct xen_pcibk_device {
1941     struct xen_pci_sharedinfo *sh_info;
1942     unsigned long flags;
1943     struct work_struct op_work;
1944     + struct xen_pci_op op;
1945     };
1946    
1947     struct xen_pcibk_dev_data {
1948     diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
1949     index c4a0666de6f5..9cf4653b6bd7 100644
1950     --- a/drivers/xen/xen-pciback/pciback_ops.c
1951     +++ b/drivers/xen/xen-pciback/pciback_ops.c
1952     @@ -197,13 +197,27 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
1953     struct xen_pcibk_dev_data *dev_data;
1954     int i, result;
1955     struct msix_entry *entries;
1956     + u16 cmd;
1957    
1958     if (unlikely(verbose_request))
1959     printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
1960     pci_name(dev));
1961     +
1962     if (op->value > SH_INFO_MAX_VEC)
1963     return -EINVAL;
1964    
1965     + if (dev->msix_enabled)
1966     + return -EALREADY;
1967     +
1968     + /*
1969     + * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
1970     + * to access the BARs where the MSI-X entries reside.
1971     + * But VF devices are unique in which the PF needs to be checked.
1972     + */
1973     + pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
1974     + if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
1975     + return -ENXIO;
1976     +
1977     entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
1978     if (entries == NULL)
1979     return -ENOMEM;
1980     @@ -298,9 +312,14 @@ void xen_pcibk_do_op(struct work_struct *data)
1981     container_of(data, struct xen_pcibk_device, op_work);
1982     struct pci_dev *dev;
1983     struct xen_pcibk_dev_data *dev_data = NULL;
1984     - struct xen_pci_op *op = &pdev->sh_info->op;
1985     + struct xen_pci_op *op = &pdev->op;
1986     int test_intx = 0;
1987     +#ifdef CONFIG_PCI_MSI
1988     + unsigned int nr = 0;
1989     +#endif
1990    
1991     + *op = pdev->sh_info->op;
1992     + barrier();
1993     dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
1994    
1995     if (dev == NULL)
1996     @@ -326,6 +345,7 @@ void xen_pcibk_do_op(struct work_struct *data)
1997     op->err = xen_pcibk_disable_msi(pdev, dev, op);
1998     break;
1999     case XEN_PCI_OP_enable_msix:
2000     + nr = op->value;
2001     op->err = xen_pcibk_enable_msix(pdev, dev, op);
2002     break;
2003     case XEN_PCI_OP_disable_msix:
2004     @@ -342,6 +362,17 @@ void xen_pcibk_do_op(struct work_struct *data)
2005     if ((dev_data->enable_intx != test_intx))
2006     xen_pcibk_control_isr(dev, 0 /* no reset */);
2007     }
2008     + pdev->sh_info->op.err = op->err;
2009     + pdev->sh_info->op.value = op->value;
2010     +#ifdef CONFIG_PCI_MSI
2011     + if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
2012     + unsigned int i;
2013     +
2014     + for (i = 0; i < nr; i++)
2015     + pdev->sh_info->op.msix_entries[i].vector =
2016     + op->msix_entries[i].vector;
2017     + }
2018     +#endif
2019     /* Tell the driver domain that we're done. */
2020     wmb();
2021     clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
2022     diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
2023     index b7f51504f85a..c561d530be2e 100644
2024     --- a/drivers/xen/xen-scsiback.c
2025     +++ b/drivers/xen/xen-scsiback.c
2026     @@ -941,12 +941,12 @@ out:
2027     spin_unlock_irqrestore(&info->v2p_lock, flags);
2028    
2029     out_free:
2030     - mutex_lock(&tpg->tv_tpg_mutex);
2031     - tpg->tv_tpg_fe_count--;
2032     - mutex_unlock(&tpg->tv_tpg_mutex);
2033     -
2034     - if (err)
2035     + if (err) {
2036     + mutex_lock(&tpg->tv_tpg_mutex);
2037     + tpg->tv_tpg_fe_count--;
2038     + mutex_unlock(&tpg->tv_tpg_mutex);
2039     kfree(new);
2040     + }
2041    
2042     return err;
2043     }
2044     diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
2045     index 252f5c15806b..78a7b1d73354 100644
2046     --- a/fs/cifs/cifsfs.h
2047     +++ b/fs/cifs/cifsfs.h
2048     @@ -31,19 +31,15 @@
2049     * so that it will fit. We use hash_64 to convert the value to 31 bits, and
2050     * then add 1, to ensure that we don't end up with a 0 as the value.
2051     */
2052     -#if BITS_PER_LONG == 64
2053     static inline ino_t
2054     cifs_uniqueid_to_ino_t(u64 fileid)
2055     {
2056     + if ((sizeof(ino_t)) < (sizeof(u64)))
2057     + return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
2058     +
2059     return (ino_t)fileid;
2060     +
2061     }
2062     -#else
2063     -static inline ino_t
2064     -cifs_uniqueid_to_ino_t(u64 fileid)
2065     -{
2066     - return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
2067     -}
2068     -#endif
2069    
2070     extern struct file_system_type cifs_fs_type;
2071     extern const struct address_space_operations cifs_addr_ops;
2072     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2073     index f26ffbfc64d8..f1a5067d5494 100644
2074     --- a/fs/cifs/cifssmb.c
2075     +++ b/fs/cifs/cifssmb.c
2076     @@ -1395,11 +1395,10 @@ openRetry:
2077     * current bigbuf.
2078     */
2079     static int
2080     -cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2081     +discard_remaining_data(struct TCP_Server_Info *server)
2082     {
2083     unsigned int rfclen = get_rfc1002_length(server->smallbuf);
2084     int remaining = rfclen + 4 - server->total_read;
2085     - struct cifs_readdata *rdata = mid->callback_data;
2086    
2087     while (remaining > 0) {
2088     int length;
2089     @@ -1413,10 +1412,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2090     remaining -= length;
2091     }
2092    
2093     - dequeue_mid(mid, rdata->result);
2094     return 0;
2095     }
2096    
2097     +static int
2098     +cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2099     +{
2100     + int length;
2101     + struct cifs_readdata *rdata = mid->callback_data;
2102     +
2103     + length = discard_remaining_data(server);
2104     + dequeue_mid(mid, rdata->result);
2105     + return length;
2106     +}
2107     +
2108     int
2109     cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2110     {
2111     @@ -1445,6 +1454,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
2112     return length;
2113     server->total_read += length;
2114    
2115     + if (server->ops->is_status_pending &&
2116     + server->ops->is_status_pending(buf, server, 0)) {
2117     + discard_remaining_data(server);
2118     + return -1;
2119     + }
2120     +
2121     /* Was the SMB read successful? */
2122     rdata->result = server->ops->map_error(buf, false);
2123     if (rdata->result != 0) {
2124     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2125     index 894f259d3989..657a9c5c4fff 100644
2126     --- a/fs/cifs/smb2pdu.c
2127     +++ b/fs/cifs/smb2pdu.c
2128     @@ -1042,21 +1042,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
2129     {
2130     char *data_offset;
2131     struct create_context *cc;
2132     - unsigned int next = 0;
2133     + unsigned int next;
2134     + unsigned int remaining;
2135     char *name;
2136    
2137     data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
2138     + remaining = le32_to_cpu(rsp->CreateContextsLength);
2139     cc = (struct create_context *)data_offset;
2140     - do {
2141     - cc = (struct create_context *)((char *)cc + next);
2142     + while (remaining >= sizeof(struct create_context)) {
2143     name = le16_to_cpu(cc->NameOffset) + (char *)cc;
2144     - if (le16_to_cpu(cc->NameLength) != 4 ||
2145     - strncmp(name, "RqLs", 4)) {
2146     - next = le32_to_cpu(cc->Next);
2147     - continue;
2148     - }
2149     - return server->ops->parse_lease_buf(cc, epoch);
2150     - } while (next != 0);
2151     + if (le16_to_cpu(cc->NameLength) == 4 &&
2152     + strncmp(name, "RqLs", 4) == 0)
2153     + return server->ops->parse_lease_buf(cc, epoch);
2154     +
2155     + next = le32_to_cpu(cc->Next);
2156     + if (!next)
2157     + break;
2158     + remaining -= next;
2159     + cc = (struct create_context *)((char *)cc + next);
2160     + }
2161    
2162     return 0;
2163     }
2164     diff --git a/fs/dcache.c b/fs/dcache.c
2165     index 0046ab7d4f3d..10bce74c427f 100644
2166     --- a/fs/dcache.c
2167     +++ b/fs/dcache.c
2168     @@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry)
2169     return dentry->d_name.name != dentry->d_iname;
2170     }
2171    
2172     -/*
2173     - * Make sure other CPUs see the inode attached before the type is set.
2174     - */
2175     static inline void __d_set_inode_and_type(struct dentry *dentry,
2176     struct inode *inode,
2177     unsigned type_flags)
2178     @@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
2179     unsigned flags;
2180    
2181     dentry->d_inode = inode;
2182     - smp_wmb();
2183     flags = READ_ONCE(dentry->d_flags);
2184     flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
2185     flags |= type_flags;
2186     WRITE_ONCE(dentry->d_flags, flags);
2187     }
2188    
2189     -/*
2190     - * Ideally, we want to make sure that other CPUs see the flags cleared before
2191     - * the inode is detached, but this is really a violation of RCU principles
2192     - * since the ordering suggests we should always set inode before flags.
2193     - *
2194     - * We should instead replace or discard the entire dentry - but that sucks
2195     - * performancewise on mass deletion/rename.
2196     - */
2197     static inline void __d_clear_type_and_inode(struct dentry *dentry)
2198     {
2199     unsigned flags = READ_ONCE(dentry->d_flags);
2200    
2201     flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
2202     WRITE_ONCE(dentry->d_flags, flags);
2203     - smp_wmb();
2204     dentry->d_inode = NULL;
2205     }
2206    
2207     @@ -322,17 +309,17 @@ static void dentry_free(struct dentry *dentry)
2208     }
2209    
2210     /**
2211     - * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
2212     + * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
2213     * @dentry: the target dentry
2214     * After this call, in-progress rcu-walk path lookup will fail. This
2215     * should be called after unhashing, and after changing d_inode (if
2216     * the dentry has not already been unhashed).
2217     */
2218     -static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
2219     +static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
2220     {
2221     - assert_spin_locked(&dentry->d_lock);
2222     - /* Go through a barrier */
2223     - write_seqcount_barrier(&dentry->d_seq);
2224     + lockdep_assert_held(&dentry->d_lock);
2225     + /* Go through am invalidation barrier */
2226     + write_seqcount_invalidate(&dentry->d_seq);
2227     }
2228    
2229     /*
2230     @@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
2231     __releases(dentry->d_inode->i_lock)
2232     {
2233     struct inode *inode = dentry->d_inode;
2234     +
2235     + raw_write_seqcount_begin(&dentry->d_seq);
2236     __d_clear_type_and_inode(dentry);
2237     hlist_del_init(&dentry->d_u.d_alias);
2238     - dentry_rcuwalk_barrier(dentry);
2239     + raw_write_seqcount_end(&dentry->d_seq);
2240     spin_unlock(&dentry->d_lock);
2241     spin_unlock(&inode->i_lock);
2242     if (!inode->i_nlink)
2243     @@ -494,7 +483,7 @@ void __d_drop(struct dentry *dentry)
2244     __hlist_bl_del(&dentry->d_hash);
2245     dentry->d_hash.pprev = NULL;
2246     hlist_bl_unlock(b);
2247     - dentry_rcuwalk_barrier(dentry);
2248     + dentry_rcuwalk_invalidate(dentry);
2249     }
2250     }
2251     EXPORT_SYMBOL(__d_drop);
2252     @@ -1757,8 +1746,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
2253     spin_lock(&dentry->d_lock);
2254     if (inode)
2255     hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2256     + raw_write_seqcount_begin(&dentry->d_seq);
2257     __d_set_inode_and_type(dentry, inode, add_flags);
2258     - dentry_rcuwalk_barrier(dentry);
2259     + raw_write_seqcount_end(&dentry->d_seq);
2260     spin_unlock(&dentry->d_lock);
2261     fsnotify_d_instantiate(dentry, inode);
2262     }
2263     diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
2264     index 9e92c9c2d319..b5f3cc7274f6 100644
2265     --- a/fs/hpfs/namei.c
2266     +++ b/fs/hpfs/namei.c
2267     @@ -377,12 +377,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
2268     struct inode *inode = d_inode(dentry);
2269     dnode_secno dno;
2270     int r;
2271     - int rep = 0;
2272     int err;
2273    
2274     hpfs_lock(dir->i_sb);
2275     hpfs_adjust_length(name, &len);
2276     -again:
2277     +
2278     err = -ENOENT;
2279     de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
2280     if (!de)
2281     @@ -402,33 +401,9 @@ again:
2282     hpfs_error(dir->i_sb, "there was error when removing dirent");
2283     err = -EFSERROR;
2284     break;
2285     - case 2: /* no space for deleting, try to truncate file */
2286     -
2287     + case 2: /* no space for deleting */
2288     err = -ENOSPC;
2289     - if (rep++)
2290     - break;
2291     -
2292     - dentry_unhash(dentry);
2293     - if (!d_unhashed(dentry)) {
2294     - hpfs_unlock(dir->i_sb);
2295     - return -ENOSPC;
2296     - }
2297     - if (generic_permission(inode, MAY_WRITE) ||
2298     - !S_ISREG(inode->i_mode) ||
2299     - get_write_access(inode)) {
2300     - d_rehash(dentry);
2301     - } else {
2302     - struct iattr newattrs;
2303     - /*pr_info("truncating file before delete.\n");*/
2304     - newattrs.ia_size = 0;
2305     - newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
2306     - err = notify_change(dentry, &newattrs, NULL);
2307     - put_write_access(inode);
2308     - if (!err)
2309     - goto again;
2310     - }
2311     - hpfs_unlock(dir->i_sb);
2312     - return -ENOSPC;
2313     + break;
2314     default:
2315     drop_nlink(inode);
2316     err = 0;
2317     diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
2318     index 3ea36554107f..8918ac905a3b 100644
2319     --- a/fs/jffs2/README.Locking
2320     +++ b/fs/jffs2/README.Locking
2321     @@ -2,10 +2,6 @@
2322     JFFS2 LOCKING DOCUMENTATION
2323     ---------------------------
2324    
2325     -At least theoretically, JFFS2 does not require the Big Kernel Lock
2326     -(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
2327     -code. It has its own locking, as described below.
2328     -
2329     This document attempts to describe the existing locking rules for
2330     JFFS2. It is not expected to remain perfectly up to date, but ought to
2331     be fairly close.
2332     @@ -69,6 +65,7 @@ Ordering constraints:
2333     any f->sem held.
2334     2. Never attempt to lock two file mutexes in one thread.
2335     No ordering rules have been made for doing so.
2336     + 3. Never lock a page cache page with f->sem held.
2337    
2338    
2339     erase_completion_lock spinlock
2340     diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
2341     index a3750f902adc..c1f04947d7dc 100644
2342     --- a/fs/jffs2/build.c
2343     +++ b/fs/jffs2/build.c
2344     @@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
2345    
2346    
2347     static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
2348     - struct jffs2_inode_cache *ic)
2349     + struct jffs2_inode_cache *ic,
2350     + int *dir_hardlinks)
2351     {
2352     struct jffs2_full_dirent *fd;
2353    
2354     @@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
2355     dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
2356     fd->name, fd->ino, ic->ino);
2357     jffs2_mark_node_obsolete(c, fd->raw);
2358     + /* Clear the ic/raw union so it doesn't cause problems later. */
2359     + fd->ic = NULL;
2360     continue;
2361     }
2362    
2363     + /* From this point, fd->raw is no longer used so we can set fd->ic */
2364     + fd->ic = child_ic;
2365     + child_ic->pino_nlink++;
2366     + /* If we appear (at this stage) to have hard-linked directories,
2367     + * set a flag to trigger a scan later */
2368     if (fd->type == DT_DIR) {
2369     - if (child_ic->pino_nlink) {
2370     - JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
2371     - fd->name, fd->ino, ic->ino);
2372     - /* TODO: What do we do about it? */
2373     - } else {
2374     - child_ic->pino_nlink = ic->ino;
2375     - }
2376     - } else
2377     - child_ic->pino_nlink++;
2378     + child_ic->flags |= INO_FLAGS_IS_DIR;
2379     + if (child_ic->pino_nlink > 1)
2380     + *dir_hardlinks = 1;
2381     + }
2382    
2383     dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
2384     /* Can't free scan_dents so far. We might need them in pass 2 */
2385     @@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
2386     */
2387     static int jffs2_build_filesystem(struct jffs2_sb_info *c)
2388     {
2389     - int ret;
2390     - int i;
2391     + int ret, i, dir_hardlinks = 0;
2392     struct jffs2_inode_cache *ic;
2393     struct jffs2_full_dirent *fd;
2394     struct jffs2_full_dirent *dead_fds = NULL;
2395     @@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
2396     /* Now scan the directory tree, increasing nlink according to every dirent found. */
2397     for_each_inode(i, c, ic) {
2398     if (ic->scan_dents) {
2399     - jffs2_build_inode_pass1(c, ic);
2400     + jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
2401     cond_resched();
2402     }
2403     }
2404     @@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
2405     }
2406    
2407     dbg_fsbuild("pass 2a complete\n");
2408     +
2409     + if (dir_hardlinks) {
2410     + /* If we detected directory hardlinks earlier, *hopefully*
2411     + * they are gone now because some of the links were from
2412     + * dead directories which still had some old dirents lying
2413     + * around and not yet garbage-collected, but which have
2414     + * been discarded above. So clear the pino_nlink field
2415     + * in each directory, so that the final scan below can
2416     + * print appropriate warnings. */
2417     + for_each_inode(i, c, ic) {
2418     + if (ic->flags & INO_FLAGS_IS_DIR)
2419     + ic->pino_nlink = 0;
2420     + }
2421     + }
2422     dbg_fsbuild("freeing temporary data structures\n");
2423    
2424     /* Finally, we can scan again and free the dirent structs */
2425     @@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
2426     while(ic->scan_dents) {
2427     fd = ic->scan_dents;
2428     ic->scan_dents = fd->next;
2429     + /* We do use the pino_nlink field to count nlink of
2430     + * directories during fs build, so set it to the
2431     + * parent ino# now. Now that there's hopefully only
2432     + * one. */
2433     + if (fd->type == DT_DIR) {
2434     + if (!fd->ic) {
2435     + /* We'll have complained about it and marked the coresponding
2436     + raw node obsolete already. Just skip it. */
2437     + continue;
2438     + }
2439     +
2440     + /* We *have* to have set this in jffs2_build_inode_pass1() */
2441     + BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
2442     +
2443     + /* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
2444     + * is set. Otherwise, we know this should never trigger anyway, so
2445     + * we don't do the check. And ic->pino_nlink still contains the nlink
2446     + * value (which is 1). */
2447     + if (dir_hardlinks && fd->ic->pino_nlink) {
2448     + JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
2449     + fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
2450     + /* Should we unlink it from its previous parent? */
2451     + }
2452     +
2453     + /* For directories, ic->pino_nlink holds that parent inode # */
2454     + fd->ic->pino_nlink = ic->ino;
2455     + }
2456     jffs2_free_full_dirent(fd);
2457     }
2458     ic->scan_dents = NULL;
2459     @@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
2460    
2461     /* Reduce nlink of the child. If it's now zero, stick it on the
2462     dead_fds list to be cleaned up later. Else just free the fd */
2463     -
2464     - if (fd->type == DT_DIR)
2465     - child_ic->pino_nlink = 0;
2466     - else
2467     - child_ic->pino_nlink--;
2468     + child_ic->pino_nlink--;
2469    
2470     if (!child_ic->pino_nlink) {
2471     dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
2472     diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
2473     index f509f62e12f6..3361979d728c 100644
2474     --- a/fs/jffs2/file.c
2475     +++ b/fs/jffs2/file.c
2476     @@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
2477     struct page *pg;
2478     struct inode *inode = mapping->host;
2479     struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
2480     - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
2481     - struct jffs2_raw_inode ri;
2482     - uint32_t alloc_len = 0;
2483     pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2484     uint32_t pageofs = index << PAGE_CACHE_SHIFT;
2485     int ret = 0;
2486    
2487     - jffs2_dbg(1, "%s()\n", __func__);
2488     -
2489     - if (pageofs > inode->i_size) {
2490     - ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
2491     - ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
2492     - if (ret)
2493     - return ret;
2494     - }
2495     -
2496     - mutex_lock(&f->sem);
2497     pg = grab_cache_page_write_begin(mapping, index, flags);
2498     - if (!pg) {
2499     - if (alloc_len)
2500     - jffs2_complete_reservation(c);
2501     - mutex_unlock(&f->sem);
2502     + if (!pg)
2503     return -ENOMEM;
2504     - }
2505     *pagep = pg;
2506    
2507     - if (alloc_len) {
2508     + jffs2_dbg(1, "%s()\n", __func__);
2509     +
2510     + if (pageofs > inode->i_size) {
2511     /* Make new hole frag from old EOF to new page */
2512     + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
2513     + struct jffs2_raw_inode ri;
2514     struct jffs2_full_dnode *fn;
2515     + uint32_t alloc_len;
2516    
2517     jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
2518     (unsigned int)inode->i_size, pageofs);
2519    
2520     + ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
2521     + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
2522     + if (ret)
2523     + goto out_page;
2524     +
2525     + mutex_lock(&f->sem);
2526     memset(&ri, 0, sizeof(ri));
2527    
2528     ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
2529     @@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
2530     if (IS_ERR(fn)) {
2531     ret = PTR_ERR(fn);
2532     jffs2_complete_reservation(c);
2533     + mutex_unlock(&f->sem);
2534     goto out_page;
2535     }
2536     ret = jffs2_add_full_dnode_to_inode(c, f, fn);
2537     @@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
2538     jffs2_mark_node_obsolete(c, fn->raw);
2539     jffs2_free_full_dnode(fn);
2540     jffs2_complete_reservation(c);
2541     + mutex_unlock(&f->sem);
2542     goto out_page;
2543     }
2544     jffs2_complete_reservation(c);
2545     inode->i_size = pageofs;
2546     + mutex_unlock(&f->sem);
2547     }
2548    
2549     /*
2550     @@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
2551     * case of a short-copy.
2552     */
2553     if (!PageUptodate(pg)) {
2554     + mutex_lock(&f->sem);
2555     ret = jffs2_do_readpage_nolock(inode, pg);
2556     + mutex_unlock(&f->sem);
2557     if (ret)
2558     goto out_page;
2559     }
2560     - mutex_unlock(&f->sem);
2561     jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
2562     return ret;
2563    
2564     out_page:
2565     unlock_page(pg);
2566     page_cache_release(pg);
2567     - mutex_unlock(&f->sem);
2568     return ret;
2569     }
2570    
2571     diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
2572     index 5a2dec2b064c..95d5880a63ee 100644
2573     --- a/fs/jffs2/gc.c
2574     +++ b/fs/jffs2/gc.c
2575     @@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
2576     BUG_ON(start > orig_start);
2577     }
2578    
2579     - /* First, use readpage() to read the appropriate page into the page cache */
2580     - /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
2581     - * triggered garbage collection in the first place?
2582     - * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
2583     - * page OK. We'll actually write it out again in commit_write, which is a little
2584     - * suboptimal, but at least we're correct.
2585     - */
2586     + /* The rules state that we must obtain the page lock *before* f->sem, so
2587     + * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
2588     + * actually going to *change* so we're safe; we only allow reading.
2589     + *
2590     + * It is important to note that jffs2_write_begin() will ensure that its
2591     + * page is marked Uptodate before allocating space. That means that if we
2592     + * end up here trying to GC the *same* page that jffs2_write_begin() is
2593     + * trying to write out, read_cache_page() will not deadlock. */
2594     + mutex_unlock(&f->sem);
2595     pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
2596     + mutex_lock(&f->sem);
2597    
2598     if (IS_ERR(pg_ptr)) {
2599     pr_warn("read_cache_page() returned error: %ld\n",
2600     diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
2601     index fa35ff79ab35..0637271f3770 100644
2602     --- a/fs/jffs2/nodelist.h
2603     +++ b/fs/jffs2/nodelist.h
2604     @@ -194,6 +194,7 @@ struct jffs2_inode_cache {
2605     #define INO_STATE_CLEARING 6 /* In clear_inode() */
2606    
2607     #define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
2608     +#define INO_FLAGS_IS_DIR 0x02 /* is a directory */
2609    
2610     #define RAWNODE_CLASS_INODE_CACHE 0
2611     #define RAWNODE_CLASS_XATTR_DATUM 1
2612     @@ -249,7 +250,10 @@ struct jffs2_readinode_info
2613    
2614     struct jffs2_full_dirent
2615     {
2616     - struct jffs2_raw_node_ref *raw;
2617     + union {
2618     + struct jffs2_raw_node_ref *raw;
2619     + struct jffs2_inode_cache *ic; /* Just during part of build */
2620     + };
2621     struct jffs2_full_dirent *next;
2622     uint32_t version;
2623     uint32_t ino; /* == zero for unlink */
2624     diff --git a/fs/namei.c b/fs/namei.c
2625     index ccd7f98d85b9..f3cc848da8bc 100644
2626     --- a/fs/namei.c
2627     +++ b/fs/namei.c
2628     @@ -1619,10 +1619,10 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
2629     if (err < 0)
2630     goto out_err;
2631    
2632     - inode = path->dentry->d_inode;
2633     err = -ENOENT;
2634     if (d_is_negative(path->dentry))
2635     goto out_path_put;
2636     + inode = path->dentry->d_inode;
2637     }
2638    
2639     if (should_follow_link(path->dentry, follow)) {
2640     @@ -3078,6 +3078,7 @@ retry_lookup:
2641     path_to_nameidata(path, nd);
2642     goto out;
2643     }
2644     + inode = path->dentry->d_inode;
2645     finish_lookup:
2646     /* we _can_ be in RCU mode here */
2647     if (should_follow_link(path->dentry, !symlink_ok)) {
2648     @@ -3152,6 +3153,10 @@ opened:
2649     goto exit_fput;
2650     }
2651     out:
2652     + if (unlikely(error > 0)) {
2653     + WARN_ON(1);
2654     + error = -EINVAL;
2655     + }
2656     if (got_write)
2657     mnt_drop_write(nd->path.mnt);
2658     path_put(&save_parent);
2659     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2660     index 2c4f41c34366..84706204cc33 100644
2661     --- a/fs/nfs/nfs4proc.c
2662     +++ b/fs/nfs/nfs4proc.c
2663     @@ -2331,9 +2331,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2664     dentry = d_add_unique(dentry, igrab(state->inode));
2665     if (dentry == NULL) {
2666     dentry = opendata->dentry;
2667     - } else if (dentry != ctx->dentry) {
2668     + } else {
2669     dput(ctx->dentry);
2670     - ctx->dentry = dget(dentry);
2671     + ctx->dentry = dentry;
2672     }
2673     nfs_set_verifier(dentry,
2674     nfs_save_change_attribute(d_inode(opendata->dir)));
2675     diff --git a/include/linux/ata.h b/include/linux/ata.h
2676     index 5dfbcd8887bb..2e5fb1c31251 100644
2677     --- a/include/linux/ata.h
2678     +++ b/include/linux/ata.h
2679     @@ -487,8 +487,8 @@ enum ata_tf_protocols {
2680     };
2681    
2682     enum ata_ioctls {
2683     - ATA_IOC_GET_IO32 = 0x309,
2684     - ATA_IOC_SET_IO32 = 0x324,
2685     + ATA_IOC_GET_IO32 = 0x309, /* HDIO_GET_32BIT */
2686     + ATA_IOC_SET_IO32 = 0x324, /* HDIO_SET_32BIT */
2687     };
2688    
2689     /* core structures */
2690     diff --git a/include/linux/dcache.h b/include/linux/dcache.h
2691     index 167ec0934049..ca9df4521734 100644
2692     --- a/include/linux/dcache.h
2693     +++ b/include/linux/dcache.h
2694     @@ -408,9 +408,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
2695     */
2696     static inline unsigned __d_entry_type(const struct dentry *dentry)
2697     {
2698     - unsigned type = READ_ONCE(dentry->d_flags);
2699     - smp_rmb();
2700     - return type & DCACHE_ENTRY_TYPE;
2701     + return dentry->d_flags & DCACHE_ENTRY_TYPE;
2702     }
2703    
2704     static inline bool d_is_miss(const struct dentry *dentry)
2705     diff --git a/include/linux/libata.h b/include/linux/libata.h
2706     index e0e33787c485..11c2dd114732 100644
2707     --- a/include/linux/libata.h
2708     +++ b/include/linux/libata.h
2709     @@ -717,7 +717,7 @@ struct ata_device {
2710     union {
2711     u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
2712     u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
2713     - };
2714     + } ____cacheline_aligned;
2715    
2716     /* DEVSLP Timing Variables from Identify Device Data Log */
2717     u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
2718     diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
2719     index b95f914ce083..150f43a9149c 100644
2720     --- a/include/linux/nfs_fs.h
2721     +++ b/include/linux/nfs_fs.h
2722     @@ -540,9 +540,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
2723    
2724     static inline loff_t nfs_size_to_loff_t(__u64 size)
2725     {
2726     - if (size > (__u64) OFFSET_MAX - 1)
2727     - return OFFSET_MAX - 1;
2728     - return (loff_t) size;
2729     + return min_t(u64, size, OFFSET_MAX);
2730     }
2731    
2732     static inline ino_t
2733     diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
2734     index 5f68d0a391ce..c07e3a536099 100644
2735     --- a/include/linux/seqlock.h
2736     +++ b/include/linux/seqlock.h
2737     @@ -266,13 +266,13 @@ static inline void write_seqcount_end(seqcount_t *s)
2738     }
2739    
2740     /**
2741     - * write_seqcount_barrier - invalidate in-progress read-side seq operations
2742     + * write_seqcount_invalidate - invalidate in-progress read-side seq operations
2743     * @s: pointer to seqcount_t
2744     *
2745     - * After write_seqcount_barrier, no read-side seq operations will complete
2746     + * After write_seqcount_invalidate, no read-side seq operations will complete
2747     * successfully and see data older than this.
2748     */
2749     -static inline void write_seqcount_barrier(seqcount_t *s)
2750     +static inline void write_seqcount_invalidate(seqcount_t *s)
2751     {
2752     smp_wmb();
2753     s->sequence+=2;
2754     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
2755     index 480e9f82dfea..2b40a1fab293 100644
2756     --- a/include/target/target_core_base.h
2757     +++ b/include/target/target_core_base.h
2758     @@ -167,6 +167,7 @@ enum se_cmd_flags_table {
2759     SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
2760     SCF_COMPARE_AND_WRITE = 0x00080000,
2761     SCF_COMPARE_AND_WRITE_POST = 0x00100000,
2762     + SCF_ACK_KREF = 0x00400000,
2763     };
2764    
2765     /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
2766     @@ -522,7 +523,7 @@ struct se_cmd {
2767     sense_reason_t (*execute_cmd)(struct se_cmd *);
2768     sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
2769     u32, enum dma_data_direction);
2770     - sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
2771     + sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
2772    
2773     unsigned char *t_task_cdb;
2774     unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
2775     @@ -537,6 +538,8 @@ struct se_cmd {
2776     #define CMD_T_DEV_ACTIVE (1 << 7)
2777     #define CMD_T_REQUEST_STOP (1 << 8)
2778     #define CMD_T_BUSY (1 << 9)
2779     +#define CMD_T_TAS (1 << 10)
2780     +#define CMD_T_FABRIC_STOP (1 << 11)
2781     spinlock_t t_state_lock;
2782     struct completion t_transport_stop_comp;
2783    
2784     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2785     index c4de47fc5cca..f69ec1295b0b 100644
2786     --- a/kernel/trace/trace_events.c
2787     +++ b/kernel/trace/trace_events.c
2788     @@ -683,7 +683,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
2789     * The ftrace subsystem is for showing formats only.
2790     * They can not be enabled or disabled via the event files.
2791     */
2792     - if (call->class && call->class->reg)
2793     + if (call->class && call->class->reg &&
2794     + !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2795     return file;
2796     }
2797    
2798     diff --git a/mm/memory.c b/mm/memory.c
2799     index 2a9e09870c20..701d9ad45c46 100644
2800     --- a/mm/memory.c
2801     +++ b/mm/memory.c
2802     @@ -3363,8 +3363,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2803     if (unlikely(pmd_none(*pmd)) &&
2804     unlikely(__pte_alloc(mm, vma, pmd, address)))
2805     return VM_FAULT_OOM;
2806     - /* if an huge pmd materialized from under us just retry later */
2807     - if (unlikely(pmd_trans_huge(*pmd)))
2808     + /*
2809     + * If a huge pmd materialized under us just retry later. Use
2810     + * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
2811     + * didn't become pmd_trans_huge under us and then back to pmd_none, as
2812     + * a result of MADV_DONTNEED running immediately after a huge pmd fault
2813     + * in a different thread of this mm, in turn leading to a misleading
2814     + * pmd_trans_huge() retval. All we have to ensure is that it is a
2815     + * regular pmd that we can walk with pte_offset_map() and we can do that
2816     + * through an atomic read in C, which is what pmd_trans_unstable()
2817     + * provides.
2818     + */
2819     + if (unlikely(pmd_trans_unstable(pmd)))
2820     return 0;
2821     /*
2822     * A regular pmd is established and it can't morph into a huge pmd
2823     diff --git a/mm/migrate.c b/mm/migrate.c
2824     index 2c37b1a44a8c..8c4841a6dc4c 100644
2825     --- a/mm/migrate.c
2826     +++ b/mm/migrate.c
2827     @@ -1557,7 +1557,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
2828     (GFP_HIGHUSER_MOVABLE |
2829     __GFP_THISNODE | __GFP_NOMEMALLOC |
2830     __GFP_NORETRY | __GFP_NOWARN) &
2831     - ~GFP_IOFS, 0);
2832     + ~__GFP_WAIT, 0);
2833    
2834     return newpage;
2835     }
2836     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
2837     index e51af69c61bf..84201c21705e 100644
2838     --- a/net/ceph/messenger.c
2839     +++ b/net/ceph/messenger.c
2840     @@ -1203,6 +1203,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
2841     return new_piece;
2842     }
2843    
2844     +static size_t sizeof_footer(struct ceph_connection *con)
2845     +{
2846     + return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
2847     + sizeof(struct ceph_msg_footer) :
2848     + sizeof(struct ceph_msg_footer_old);
2849     +}
2850     +
2851     static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
2852     {
2853     BUG_ON(!msg);
2854     @@ -2326,9 +2333,9 @@ static int read_partial_message(struct ceph_connection *con)
2855     ceph_pr_addr(&con->peer_addr.in_addr),
2856     seq, con->in_seq + 1);
2857     con->in_base_pos = -front_len - middle_len - data_len -
2858     - sizeof(m->footer);
2859     + sizeof_footer(con);
2860     con->in_tag = CEPH_MSGR_TAG_READY;
2861     - return 0;
2862     + return 1;
2863     } else if ((s64)seq - (s64)con->in_seq > 1) {
2864     pr_err("read_partial_message bad seq %lld expected %lld\n",
2865     seq, con->in_seq + 1);
2866     @@ -2358,10 +2365,10 @@ static int read_partial_message(struct ceph_connection *con)
2867     /* skip this message */
2868     dout("alloc_msg said skip message\n");
2869     con->in_base_pos = -front_len - middle_len - data_len -
2870     - sizeof(m->footer);
2871     + sizeof_footer(con);
2872     con->in_tag = CEPH_MSGR_TAG_READY;
2873     con->in_seq++;
2874     - return 0;
2875     + return 1;
2876     }
2877    
2878     BUG_ON(!con->in_msg);
2879     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2880     index 36b9ac48b8fb..06bf4010d3ed 100644
2881     --- a/net/ipv6/ip6_output.c
2882     +++ b/net/ipv6/ip6_output.c
2883     @@ -376,6 +376,9 @@ int ip6_forward(struct sk_buff *skb)
2884     if (skb->pkt_type != PACKET_HOST)
2885     goto drop;
2886    
2887     + if (unlikely(skb->sk))
2888     + goto drop;
2889     +
2890     if (skb_warn_if_lro(skb))
2891     goto drop;
2892    
2893     diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
2894     index 2928afffbb81..8d79e70bd978 100644
2895     --- a/net/sunrpc/cache.c
2896     +++ b/net/sunrpc/cache.c
2897     @@ -1218,7 +1218,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
2898     if (bp[0] == '\\' && bp[1] == 'x') {
2899     /* HEX STRING */
2900     bp += 2;
2901     - while (len < bufsize) {
2902     + while (len < bufsize - 1) {
2903     int h, l;
2904    
2905     h = hex_to_bin(bp[0]);
2906     diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
2907     index b9c0910fb8c4..0608f216f359 100644
2908     --- a/sound/core/control_compat.c
2909     +++ b/sound/core/control_compat.c
2910     @@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
2911     unsigned char reserved[128];
2912     };
2913    
2914     +#ifdef CONFIG_X86_X32
2915     +/* x32 has a different alignment for 64bit values from ia32 */
2916     +struct snd_ctl_elem_value_x32 {
2917     + struct snd_ctl_elem_id id;
2918     + unsigned int indirect; /* bit-field causes misalignment */
2919     + union {
2920     + s32 integer[128];
2921     + unsigned char data[512];
2922     + s64 integer64[64];
2923     + } value;
2924     + unsigned char reserved[128];
2925     +};
2926     +#endif /* CONFIG_X86_X32 */
2927    
2928     /* get the value type and count of the control */
2929     static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
2930     @@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
2931    
2932     static int copy_ctl_value_from_user(struct snd_card *card,
2933     struct snd_ctl_elem_value *data,
2934     - struct snd_ctl_elem_value32 __user *data32,
2935     + void __user *userdata,
2936     + void __user *valuep,
2937     int *typep, int *countp)
2938     {
2939     + struct snd_ctl_elem_value32 __user *data32 = userdata;
2940     int i, type, size;
2941     int uninitialized_var(count);
2942     unsigned int indirect;
2943     @@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
2944     if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
2945     type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
2946     for (i = 0; i < count; i++) {
2947     + s32 __user *intp = valuep;
2948     int val;
2949     - if (get_user(val, &data32->value.integer[i]))
2950     + if (get_user(val, &intp[i]))
2951     return -EFAULT;
2952     data->value.integer.value[i] = val;
2953     }
2954     @@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
2955     dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
2956     return -EINVAL;
2957     }
2958     - if (copy_from_user(data->value.bytes.data,
2959     - data32->value.data, size))
2960     + if (copy_from_user(data->value.bytes.data, valuep, size))
2961     return -EFAULT;
2962     }
2963    
2964     @@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
2965     }
2966    
2967     /* restore the value to 32bit */
2968     -static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
2969     +static int copy_ctl_value_to_user(void __user *userdata,
2970     + void __user *valuep,
2971     struct snd_ctl_elem_value *data,
2972     int type, int count)
2973     {
2974     @@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
2975     if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
2976     type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
2977     for (i = 0; i < count; i++) {
2978     + s32 __user *intp = valuep;
2979     int val;
2980     val = data->value.integer.value[i];
2981     - if (put_user(val, &data32->value.integer[i]))
2982     + if (put_user(val, &intp[i]))
2983     return -EFAULT;
2984     }
2985     } else {
2986     size = get_elem_size(type, count);
2987     - if (copy_to_user(data32->value.data,
2988     - data->value.bytes.data, size))
2989     + if (copy_to_user(valuep, data->value.bytes.data, size))
2990     return -EFAULT;
2991     }
2992     return 0;
2993     }
2994    
2995     -static int snd_ctl_elem_read_user_compat(struct snd_card *card,
2996     - struct snd_ctl_elem_value32 __user *data32)
2997     +static int ctl_elem_read_user(struct snd_card *card,
2998     + void __user *userdata, void __user *valuep)
2999     {
3000     struct snd_ctl_elem_value *data;
3001     int err, type, count;
3002     @@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
3003     if (data == NULL)
3004     return -ENOMEM;
3005    
3006     - if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
3007     + err = copy_ctl_value_from_user(card, data, userdata, valuep,
3008     + &type, &count);
3009     + if (err < 0)
3010     goto error;
3011    
3012     snd_power_lock(card);
3013     @@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
3014     err = snd_ctl_elem_read(card, data);
3015     snd_power_unlock(card);
3016     if (err >= 0)
3017     - err = copy_ctl_value_to_user(data32, data, type, count);
3018     + err = copy_ctl_value_to_user(userdata, valuep, data,
3019     + type, count);
3020     error:
3021     kfree(data);
3022     return err;
3023     }
3024    
3025     -static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
3026     - struct snd_ctl_elem_value32 __user *data32)
3027     +static int ctl_elem_write_user(struct snd_ctl_file *file,
3028     + void __user *userdata, void __user *valuep)
3029     {
3030     struct snd_ctl_elem_value *data;
3031     struct snd_card *card = file->card;
3032     @@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
3033     if (data == NULL)
3034     return -ENOMEM;
3035    
3036     - if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
3037     + err = copy_ctl_value_from_user(card, data, userdata, valuep,
3038     + &type, &count);
3039     + if (err < 0)
3040     goto error;
3041    
3042     snd_power_lock(card);
3043     @@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
3044     err = snd_ctl_elem_write(card, file, data);
3045     snd_power_unlock(card);
3046     if (err >= 0)
3047     - err = copy_ctl_value_to_user(data32, data, type, count);
3048     + err = copy_ctl_value_to_user(userdata, valuep, data,
3049     + type, count);
3050     error:
3051     kfree(data);
3052     return err;
3053     }
3054    
3055     +static int snd_ctl_elem_read_user_compat(struct snd_card *card,
3056     + struct snd_ctl_elem_value32 __user *data32)
3057     +{
3058     + return ctl_elem_read_user(card, data32, &data32->value);
3059     +}
3060     +
3061     +static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
3062     + struct snd_ctl_elem_value32 __user *data32)
3063     +{
3064     + return ctl_elem_write_user(file, data32, &data32->value);
3065     +}
3066     +
3067     +#ifdef CONFIG_X86_X32
3068     +static int snd_ctl_elem_read_user_x32(struct snd_card *card,
3069     + struct snd_ctl_elem_value_x32 __user *data32)
3070     +{
3071     + return ctl_elem_read_user(card, data32, &data32->value);
3072     +}
3073     +
3074     +static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
3075     + struct snd_ctl_elem_value_x32 __user *data32)
3076     +{
3077     + return ctl_elem_write_user(file, data32, &data32->value);
3078     +}
3079     +#endif /* CONFIG_X86_X32 */
3080     +
3081     /* add or replace a user control */
3082     static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
3083     struct snd_ctl_elem_info32 __user *data32,
3084     @@ -393,6 +441,10 @@ enum {
3085     SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
3086     SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
3087     SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
3088     +#ifdef CONFIG_X86_X32
3089     + SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
3090     + SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
3091     +#endif /* CONFIG_X86_X32 */
3092     };
3093    
3094     static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
3095     @@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
3096     return snd_ctl_elem_add_compat(ctl, argp, 0);
3097     case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
3098     return snd_ctl_elem_add_compat(ctl, argp, 1);
3099     +#ifdef CONFIG_X86_X32
3100     + case SNDRV_CTL_IOCTL_ELEM_READ_X32:
3101     + return snd_ctl_elem_read_user_x32(ctl->card, argp);
3102     + case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
3103     + return snd_ctl_elem_write_user_x32(ctl, argp);
3104     +#endif /* CONFIG_X86_X32 */
3105     }
3106    
3107     down_read(&snd_ioctl_rwsem);
3108     diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
3109     index 9630e9f72b7b..1f64ab0c2a95 100644
3110     --- a/sound/core/pcm_compat.c
3111     +++ b/sound/core/pcm_compat.c
3112     @@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream
3113     return err;
3114     }
3115    
3116     +#ifdef CONFIG_X86_X32
3117     +/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */
3118     +static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
3119     + struct snd_pcm_channel_info __user *src);
3120     +#define snd_pcm_ioctl_channel_info_x32(s, p) \
3121     + snd_pcm_channel_info_user(s, p)
3122     +#endif /* CONFIG_X86_X32 */
3123     +
3124     struct snd_pcm_status32 {
3125     s32 state;
3126     struct compat_timespec trigger_tstamp;
3127     @@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
3128     return err;
3129     }
3130    
3131     +#ifdef CONFIG_X86_X32
3132     +/* X32 ABI has 64bit timespec and 64bit alignment */
3133     +struct snd_pcm_status_x32 {
3134     + s32 state;
3135     + u32 rsvd; /* alignment */
3136     + struct timespec trigger_tstamp;
3137     + struct timespec tstamp;
3138     + u32 appl_ptr;
3139     + u32 hw_ptr;
3140     + s32 delay;
3141     + u32 avail;
3142     + u32 avail_max;
3143     + u32 overrange;
3144     + s32 suspended_state;
3145     + u32 audio_tstamp_data;
3146     + struct timespec audio_tstamp;
3147     + struct timespec driver_tstamp;
3148     + u32 audio_tstamp_accuracy;
3149     + unsigned char reserved[52-2*sizeof(struct timespec)];
3150     +} __packed;
3151     +
3152     +#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
3153     +
3154     +static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream,
3155     + struct snd_pcm_status_x32 __user *src,
3156     + bool ext)
3157     +{
3158     + struct snd_pcm_status status;
3159     + int err;
3160     +
3161     + memset(&status, 0, sizeof(status));
3162     + /*
3163     + * with extension, parameters are read/write,
3164     + * get audio_tstamp_data from user,
3165     + * ignore rest of status structure
3166     + */
3167     + if (ext && get_user(status.audio_tstamp_data,
3168     + (u32 __user *)(&src->audio_tstamp_data)))
3169     + return -EFAULT;
3170     + err = snd_pcm_status(substream, &status);
3171     + if (err < 0)
3172     + return err;
3173     +
3174     + if (clear_user(src, sizeof(*src)))
3175     + return -EFAULT;
3176     + if (put_user(status.state, &src->state) ||
3177     + put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
3178     + put_timespec(&status.tstamp, &src->tstamp) ||
3179     + put_user(status.appl_ptr, &src->appl_ptr) ||
3180     + put_user(status.hw_ptr, &src->hw_ptr) ||
3181     + put_user(status.delay, &src->delay) ||
3182     + put_user(status.avail, &src->avail) ||
3183     + put_user(status.avail_max, &src->avail_max) ||
3184     + put_user(status.overrange, &src->overrange) ||
3185     + put_user(status.suspended_state, &src->suspended_state) ||
3186     + put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
3187     + put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
3188     + put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
3189     + put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
3190     + return -EFAULT;
3191     +
3192     + return err;
3193     +}
3194     +#endif /* CONFIG_X86_X32 */
3195     +
3196     /* both for HW_PARAMS and HW_REFINE */
3197     static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
3198     int refine,
3199     @@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
3200     return 0;
3201     }
3202    
3203     +#ifdef CONFIG_X86_X32
3204     +/* X32 ABI has 64bit timespec and 64bit alignment */
3205     +struct snd_pcm_mmap_status_x32 {
3206     + s32 state;
3207     + s32 pad1;
3208     + u32 hw_ptr;
3209     + u32 pad2; /* alignment */
3210     + struct timespec tstamp;
3211     + s32 suspended_state;
3212     + struct timespec audio_tstamp;
3213     +} __packed;
3214     +
3215     +struct snd_pcm_mmap_control_x32 {
3216     + u32 appl_ptr;
3217     + u32 avail_min;
3218     +};
3219     +
3220     +struct snd_pcm_sync_ptr_x32 {
3221     + u32 flags;
3222     + u32 rsvd; /* alignment */
3223     + union {
3224     + struct snd_pcm_mmap_status_x32 status;
3225     + unsigned char reserved[64];
3226     + } s;
3227     + union {
3228     + struct snd_pcm_mmap_control_x32 control;
3229     + unsigned char reserved[64];
3230     + } c;
3231     +} __packed;
3232     +
3233     +static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
3234     + struct snd_pcm_sync_ptr_x32 __user *src)
3235     +{
3236     + struct snd_pcm_runtime *runtime = substream->runtime;
3237     + volatile struct snd_pcm_mmap_status *status;
3238     + volatile struct snd_pcm_mmap_control *control;
3239     + u32 sflags;
3240     + struct snd_pcm_mmap_control scontrol;
3241     + struct snd_pcm_mmap_status sstatus;
3242     + snd_pcm_uframes_t boundary;
3243     + int err;
3244     +
3245     + if (snd_BUG_ON(!runtime))
3246     + return -EINVAL;
3247     +
3248     + if (get_user(sflags, &src->flags) ||
3249     + get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
3250     + get_user(scontrol.avail_min, &src->c.control.avail_min))
3251     + return -EFAULT;
3252     + if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
3253     + err = snd_pcm_hwsync(substream);
3254     + if (err < 0)
3255     + return err;
3256     + }
3257     + status = runtime->status;
3258     + control = runtime->control;
3259     + boundary = recalculate_boundary(runtime);
3260     + if (!boundary)
3261     + boundary = 0x7fffffff;
3262     + snd_pcm_stream_lock_irq(substream);
3263     + /* FIXME: we should consider the boundary for the sync from app */
3264     + if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
3265     + control->appl_ptr = scontrol.appl_ptr;
3266     + else
3267     + scontrol.appl_ptr = control->appl_ptr % boundary;
3268     + if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
3269     + control->avail_min = scontrol.avail_min;
3270     + else
3271     + scontrol.avail_min = control->avail_min;
3272     + sstatus.state = status->state;
3273     + sstatus.hw_ptr = status->hw_ptr % boundary;
3274     + sstatus.tstamp = status->tstamp;
3275     + sstatus.suspended_state = status->suspended_state;
3276     + sstatus.audio_tstamp = status->audio_tstamp;
3277     + snd_pcm_stream_unlock_irq(substream);
3278     + if (put_user(sstatus.state, &src->s.status.state) ||
3279     + put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
3280     + put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
3281     + put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
3282     + put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) ||
3283     + put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
3284     + put_user(scontrol.avail_min, &src->c.control.avail_min))
3285     + return -EFAULT;
3286     +
3287     + return 0;
3288     +}
3289     +#endif /* CONFIG_X86_X32 */
3290    
3291     /*
3292     */
3293     @@ -487,7 +647,12 @@ enum {
3294     SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
3295     SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
3296     SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
3297     -
3298     +#ifdef CONFIG_X86_X32
3299     + SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info),
3300     + SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32),
3301     + SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32),
3302     + SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32),
3303     +#endif /* CONFIG_X86_X32 */
3304     };
3305    
3306     static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
3307     @@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
3308     return snd_pcm_ioctl_rewind_compat(substream, argp);
3309     case SNDRV_PCM_IOCTL_FORWARD32:
3310     return snd_pcm_ioctl_forward_compat(substream, argp);
3311     +#ifdef CONFIG_X86_X32
3312     + case SNDRV_PCM_IOCTL_STATUS_X32:
3313     + return snd_pcm_status_user_x32(substream, argp, false);
3314     + case SNDRV_PCM_IOCTL_STATUS_EXT_X32:
3315     + return snd_pcm_status_user_x32(substream, argp, true);
3316     + case SNDRV_PCM_IOCTL_SYNC_PTR_X32:
3317     + return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
3318     + case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
3319     + return snd_pcm_ioctl_channel_info_x32(substream, argp);
3320     +#endif /* CONFIG_X86_X32 */
3321     }
3322    
3323     return -ENOIOCTLCMD;
3324     diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
3325     index 5268c1f58c25..09a89094dcf7 100644
3326     --- a/sound/core/rawmidi_compat.c
3327     +++ b/sound/core/rawmidi_compat.c
3328     @@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
3329     return 0;
3330     }
3331    
3332     +#ifdef CONFIG_X86_X32
3333     +/* X32 ABI has 64bit timespec and 64bit alignment */
3334     +struct snd_rawmidi_status_x32 {
3335     + s32 stream;
3336     + u32 rsvd; /* alignment */
3337     + struct timespec tstamp;
3338     + u32 avail;
3339     + u32 xruns;
3340     + unsigned char reserved[16];
3341     +} __attribute__((packed));
3342     +
3343     +#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
3344     +
3345     +static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
3346     + struct snd_rawmidi_status_x32 __user *src)
3347     +{
3348     + int err;
3349     + struct snd_rawmidi_status status;
3350     +
3351     + if (rfile->output == NULL)
3352     + return -EINVAL;
3353     + if (get_user(status.stream, &src->stream))
3354     + return -EFAULT;
3355     +
3356     + switch (status.stream) {
3357     + case SNDRV_RAWMIDI_STREAM_OUTPUT:
3358     + err = snd_rawmidi_output_status(rfile->output, &status);
3359     + break;
3360     + case SNDRV_RAWMIDI_STREAM_INPUT:
3361     + err = snd_rawmidi_input_status(rfile->input, &status);
3362     + break;
3363     + default:
3364     + return -EINVAL;
3365     + }
3366     + if (err < 0)
3367     + return err;
3368     +
3369     + if (put_timespec(&status.tstamp, &src->tstamp) ||
3370     + put_user(status.avail, &src->avail) ||
3371     + put_user(status.xruns, &src->xruns))
3372     + return -EFAULT;
3373     +
3374     + return 0;
3375     +}
3376     +#endif /* CONFIG_X86_X32 */
3377     +
3378     enum {
3379     SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
3380     SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
3381     +#ifdef CONFIG_X86_X32
3382     + SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
3383     +#endif /* CONFIG_X86_X32 */
3384     };
3385    
3386     static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
3387     @@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
3388     return snd_rawmidi_ioctl_params_compat(rfile, argp);
3389     case SNDRV_RAWMIDI_IOCTL_STATUS32:
3390     return snd_rawmidi_ioctl_status_compat(rfile, argp);
3391     +#ifdef CONFIG_X86_X32
3392     + case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
3393     + return snd_rawmidi_ioctl_status_x32(rfile, argp);
3394     +#endif /* CONFIG_X86_X32 */
3395     }
3396     return -ENOIOCTLCMD;
3397     }
3398     diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
3399     index 72873a46afeb..4b53b8f2330f 100644
3400     --- a/sound/core/seq/oss/seq_oss.c
3401     +++ b/sound/core/seq/oss/seq_oss.c
3402     @@ -148,8 +148,6 @@ odev_release(struct inode *inode, struct file *file)
3403     if ((dp = file->private_data) == NULL)
3404     return 0;
3405    
3406     - snd_seq_oss_drain_write(dp);
3407     -
3408     mutex_lock(&register_mutex);
3409     snd_seq_oss_release(dp);
3410     mutex_unlock(&register_mutex);
3411     diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
3412     index b43924325249..d7b4d016b547 100644
3413     --- a/sound/core/seq/oss/seq_oss_device.h
3414     +++ b/sound/core/seq/oss/seq_oss_device.h
3415     @@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
3416     unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
3417    
3418     void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
3419     -void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
3420    
3421     /* */
3422     void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
3423     diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
3424     index dad5b1123e46..0b9c18b2e45f 100644
3425     --- a/sound/core/seq/oss/seq_oss_init.c
3426     +++ b/sound/core/seq/oss/seq_oss_init.c
3427     @@ -436,22 +436,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
3428    
3429    
3430     /*
3431     - * Wait until the queue is empty (if we don't have nonblock)
3432     - */
3433     -void
3434     -snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
3435     -{
3436     - if (! dp->timer->running)
3437     - return;
3438     - if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
3439     - dp->writeq) {
3440     - while (snd_seq_oss_writeq_sync(dp->writeq))
3441     - ;
3442     - }
3443     -}
3444     -
3445     -
3446     -/*
3447     * reset sequencer devices
3448     */
3449     void
3450     diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
3451     index e05802ae6e1b..2e908225d754 100644
3452     --- a/sound/core/timer_compat.c
3453     +++ b/sound/core/timer_compat.c
3454     @@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
3455     struct snd_timer_status32 __user *_status)
3456     {
3457     struct snd_timer_user *tu;
3458     - struct snd_timer_status status;
3459     + struct snd_timer_status32 status;
3460    
3461     tu = file->private_data;
3462     if (snd_BUG_ON(!tu->timeri))
3463     return -ENXIO;
3464     memset(&status, 0, sizeof(status));
3465     - status.tstamp = tu->tstamp;
3466     + status.tstamp.tv_sec = tu->tstamp.tv_sec;
3467     + status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
3468     status.resolution = snd_timer_resolution(tu->timeri);
3469     status.lost = tu->timeri->lost;
3470     status.overrun = tu->overrun;
3471     @@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
3472     return 0;
3473     }
3474    
3475     +#ifdef CONFIG_X86_X32
3476     +/* X32 ABI has the same struct as x86-64 */
3477     +#define snd_timer_user_status_x32(file, s) \
3478     + snd_timer_user_status(file, s)
3479     +#endif /* CONFIG_X86_X32 */
3480     +
3481     /*
3482     */
3483    
3484     enum {
3485     SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
3486     SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
3487     +#ifdef CONFIG_X86_X32
3488     + SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
3489     +#endif /* CONFIG_X86_X32 */
3490     };
3491    
3492     static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
3493     @@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
3494     return snd_timer_user_info_compat(file, argp);
3495     case SNDRV_TIMER_IOCTL_STATUS32:
3496     return snd_timer_user_status_compat(file, argp);
3497     +#ifdef CONFIG_X86_X32
3498     + case SNDRV_TIMER_IOCTL_STATUS_X32:
3499     + return snd_timer_user_status_x32(file, argp);
3500     +#endif /* CONFIG_X86_X32 */
3501     }
3502     return -ENOIOCTLCMD;
3503     }
3504     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3505     index df34c78a6ced..91cc6897d595 100644
3506     --- a/sound/pci/hda/patch_realtek.c
3507     +++ b/sound/pci/hda/patch_realtek.c
3508     @@ -3477,6 +3477,29 @@ static void gpio2_mic_hotkey_event(struct hda_codec *codec,
3509     input_sync(spec->kb_dev);
3510     }
3511    
3512     +static int alc_register_micmute_input_device(struct hda_codec *codec)
3513     +{
3514     + struct alc_spec *spec = codec->spec;
3515     +
3516     + spec->kb_dev = input_allocate_device();
3517     + if (!spec->kb_dev) {
3518     + codec_err(codec, "Out of memory (input_allocate_device)\n");
3519     + return -ENOMEM;
3520     + }
3521     + spec->kb_dev->name = "Microphone Mute Button";
3522     + spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY);
3523     + spec->kb_dev->keybit[BIT_WORD(KEY_MICMUTE)] = BIT_MASK(KEY_MICMUTE);
3524     +
3525     + if (input_register_device(spec->kb_dev)) {
3526     + codec_err(codec, "input_register_device failed\n");
3527     + input_free_device(spec->kb_dev);
3528     + spec->kb_dev = NULL;
3529     + return -ENOMEM;
3530     + }
3531     +
3532     + return 0;
3533     +}
3534     +
3535     static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
3536     const struct hda_fixup *fix, int action)
3537     {
3538     @@ -3494,20 +3517,8 @@ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
3539     struct alc_spec *spec = codec->spec;
3540    
3541     if (action == HDA_FIXUP_ACT_PRE_PROBE) {
3542     - spec->kb_dev = input_allocate_device();
3543     - if (!spec->kb_dev) {
3544     - codec_err(codec, "Out of memory (input_allocate_device)\n");
3545     + if (alc_register_micmute_input_device(codec) != 0)
3546     return;
3547     - }
3548     - spec->kb_dev->name = "Microphone Mute Button";
3549     - spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY);
3550     - spec->kb_dev->keybit[BIT_WORD(KEY_MICMUTE)] = BIT_MASK(KEY_MICMUTE);
3551     - if (input_register_device(spec->kb_dev)) {
3552     - codec_err(codec, "input_register_device failed\n");
3553     - input_free_device(spec->kb_dev);
3554     - spec->kb_dev = NULL;
3555     - return;
3556     - }
3557    
3558     snd_hda_add_verbs(codec, gpio_init);
3559     snd_hda_codec_write_cache(codec, codec->core.afg, 0,
3560     @@ -3537,6 +3548,47 @@ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
3561     }
3562     }
3563    
3564     +static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
3565     + const struct hda_fixup *fix, int action)
3566     +{
3567     + /* Line2 = mic mute hotkey
3568     + GPIO2 = mic mute LED */
3569     + static const struct hda_verb gpio_init[] = {
3570     + { 0x01, AC_VERB_SET_GPIO_MASK, 0x04 },
3571     + { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04 },
3572     + {}
3573     + };
3574     +
3575     + struct alc_spec *spec = codec->spec;
3576     +
3577     + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
3578     + if (alc_register_micmute_input_device(codec) != 0)
3579     + return;
3580     +
3581     + snd_hda_add_verbs(codec, gpio_init);
3582     + snd_hda_jack_detect_enable_callback(codec, 0x1b,
3583     + gpio2_mic_hotkey_event);
3584     +
3585     + spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
3586     + spec->gpio_led = 0;
3587     + spec->mute_led_polarity = 0;
3588     + spec->gpio_mic_led_mask = 0x04;
3589     + return;
3590     + }
3591     +
3592     + if (!spec->kb_dev)
3593     + return;
3594     +
3595     + switch (action) {
3596     + case HDA_FIXUP_ACT_PROBE:
3597     + spec->init_amp = ALC_INIT_DEFAULT;
3598     + break;
3599     + case HDA_FIXUP_ACT_FREE:
3600     + input_unregister_device(spec->kb_dev);
3601     + spec->kb_dev = NULL;
3602     + }
3603     +}
3604     +
3605     static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
3606     const struct hda_fixup *fix, int action)
3607     {
3608     @@ -3720,6 +3772,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3609    
3610     static void alc_headset_mode_default(struct hda_codec *codec)
3611     {
3612     + static struct coef_fw coef0225[] = {
3613     + UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
3614     + {}
3615     + };
3616     static struct coef_fw coef0255[] = {
3617     WRITE_COEF(0x45, 0xc089),
3618     WRITE_COEF(0x45, 0xc489),
3619     @@ -3761,6 +3817,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3620     };
3621    
3622     switch (codec->core.vendor_id) {
3623     + case 0x10ec0225:
3624     + alc_process_coef_fw(codec, coef0225);
3625     + break;
3626     case 0x10ec0255:
3627     case 0x10ec0256:
3628     alc_process_coef_fw(codec, coef0255);
3629     @@ -4570,6 +4629,14 @@ enum {
3630     ALC288_FIXUP_DISABLE_AAMIX,
3631     ALC292_FIXUP_DELL_E7X,
3632     ALC292_FIXUP_DISABLE_AAMIX,
3633     + ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
3634     + ALC275_FIXUP_DELL_XPS,
3635     + ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
3636     + ALC293_FIXUP_LENOVO_SPK_NOISE,
3637     + ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
3638     + ALC255_FIXUP_DELL_SPK_NOISE,
3639     + ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
3640     + ALC280_FIXUP_HP_HEADSET_MIC,
3641     };
3642    
3643     static const struct hda_fixup alc269_fixups[] = {
3644     @@ -5131,6 +5198,71 @@ static const struct hda_fixup alc269_fixups[] = {
3645     .chained = true,
3646     .chain_id = ALC292_FIXUP_DISABLE_AAMIX
3647     },
3648     + [ALC298_FIXUP_DELL1_MIC_NO_PRESENCE] = {
3649     + .type = HDA_FIXUP_PINS,
3650     + .v.pins = (const struct hda_pintbl[]) {
3651     + { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
3652     + { 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */
3653     + { }
3654     + },
3655     + .chained = true,
3656     + .chain_id = ALC269_FIXUP_HEADSET_MODE
3657     + },
3658     + [ALC275_FIXUP_DELL_XPS] = {
3659     + .type = HDA_FIXUP_VERBS,
3660     + .v.verbs = (const struct hda_verb[]) {
3661     + /* Enables internal speaker */
3662     + {0x20, AC_VERB_SET_COEF_INDEX, 0x1f},
3663     + {0x20, AC_VERB_SET_PROC_COEF, 0x00c0},
3664     + {0x20, AC_VERB_SET_COEF_INDEX, 0x30},
3665     + {0x20, AC_VERB_SET_PROC_COEF, 0x00b1},
3666     + {}
3667     + }
3668     + },
3669     + [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
3670     + .type = HDA_FIXUP_VERBS,
3671     + .v.verbs = (const struct hda_verb[]) {
3672     + /* Disable pass-through path for FRONT 14h */
3673     + {0x20, AC_VERB_SET_COEF_INDEX, 0x36},
3674     + {0x20, AC_VERB_SET_PROC_COEF, 0x1737},
3675     + {}
3676     + },
3677     + .chained = true,
3678     + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
3679     + },
3680     + [ALC293_FIXUP_LENOVO_SPK_NOISE] = {
3681     + .type = HDA_FIXUP_FUNC,
3682     + .v.func = alc_fixup_disable_aamix,
3683     + .chained = true,
3684     + .chain_id = ALC269_FIXUP_THINKPAD_ACPI
3685     + },
3686     + [ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY] = {
3687     + .type = HDA_FIXUP_FUNC,
3688     + .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
3689     + },
3690     + [ALC255_FIXUP_DELL_SPK_NOISE] = {
3691     + .type = HDA_FIXUP_FUNC,
3692     + .v.func = alc_fixup_disable_aamix,
3693     + .chained = true,
3694     + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
3695     + },
3696     + [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
3697     + .type = HDA_FIXUP_VERBS,
3698     + .v.verbs = (const struct hda_verb[]) {
3699     + /* Disable pass-through path for FRONT 14h */
3700     + { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
3701     + { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
3702     + {}
3703     + },
3704     + .chained = true,
3705     + .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
3706     + },
3707     + [ALC280_FIXUP_HP_HEADSET_MIC] = {
3708     + .type = HDA_FIXUP_FUNC,
3709     + .v.func = alc_fixup_disable_aamix,
3710     + .chained = true,
3711     + .chain_id = ALC269_FIXUP_HEADSET_MIC,
3712     + },
3713     };
3714    
3715     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3716     @@ -5142,10 +5274,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3717     SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
3718     SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
3719     SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
3720     + SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
3721     SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
3722     SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
3723     SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
3724     SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
3725     + SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
3726     SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
3727     SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
3728     SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
3729     @@ -5169,6 +5303,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3730     SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
3731     SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
3732     SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
3733     + SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
3734     + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
3735     SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
3736     SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
3737     SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
3738     @@ -5228,6 +5364,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3739     SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
3740     SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
3741     SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
3742     + SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
3743     SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
3744     SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3745     SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3746     @@ -5275,6 +5412,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3747     SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
3748     SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
3749     SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
3750     + SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
3751     + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
3752     SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
3753     SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
3754     SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3755     @@ -5284,6 +5423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3756     SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
3757     SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
3758     SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
3759     + SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
3760     SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
3761     SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
3762     SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
3763     @@ -5365,6 +5505,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
3764     {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
3765     {}
3766     };
3767     +#define ALC225_STANDARD_PINS \
3768     + {0x12, 0xb7a60130}, \
3769     + {0x21, 0x04211020}
3770    
3771     #define ALC255_STANDARD_PINS \
3772     {0x18, 0x411111f0}, \
3773     @@ -5414,7 +5557,20 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
3774     {0x1d, 0x40700001}, \
3775     {0x1e, 0x411111f0}
3776    
3777     +#define ALC298_STANDARD_PINS \
3778     + {0x18, 0x411111f0}, \
3779     + {0x19, 0x411111f0}, \
3780     + {0x1a, 0x411111f0}, \
3781     + {0x1e, 0x411111f0}, \
3782     + {0x1f, 0x411111f0}
3783     +
3784     static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
3785     + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
3786     + ALC225_STANDARD_PINS,
3787     + {0x14, 0x901701a0}),
3788     + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
3789     + ALC225_STANDARD_PINS,
3790     + {0x14, 0x901701b0}),
3791     SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
3792     ALC255_STANDARD_PINS,
3793     {0x12, 0x40300000},
3794     @@ -5708,6 +5864,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
3795     {0x16, 0x411111f0},
3796     {0x18, 0x411111f0},
3797     {0x19, 0x411111f0}),
3798     + SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
3799     + ALC298_STANDARD_PINS,
3800     + {0x12, 0x90a60130},
3801     + {0x13, 0x40000000},
3802     + {0x14, 0x411111f0},
3803     + {0x17, 0x90170140},
3804     + {0x1d, 0x4068a36d},
3805     + {0x21, 0x03211020}),
3806     {}
3807     };
3808    
3809     diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
3810     index c19e021ccf66..11246280945d 100644
3811     --- a/sound/pci/rme9652/hdsp.c
3812     +++ b/sound/pci/rme9652/hdsp.c
3813     @@ -2878,7 +2878,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
3814     {
3815     struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
3816    
3817     - ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
3818     + ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
3819     return 0;
3820     }
3821    
3822     @@ -2890,7 +2890,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
3823    
3824     if (!snd_hdsp_use_is_exclusive(hdsp))
3825     return -EBUSY;
3826     - val = ucontrol->value.enumerated.item[0];
3827     + val = ucontrol->value.integer.value[0];
3828     spin_lock_irq(&hdsp->lock);
3829     if (val != hdsp_dds_offset(hdsp))
3830     change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
3831     diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
3832     index cb666c73712d..7f6190606f5e 100644
3833     --- a/sound/pci/rme9652/hdspm.c
3834     +++ b/sound/pci/rme9652/hdspm.c
3835     @@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
3836     {
3837     u64 n;
3838    
3839     + if (snd_BUG_ON(rate <= 0))
3840     + return;
3841     +
3842     if (rate >= 112000)
3843     rate /= 4;
3844     else if (rate >= 56000)
3845     @@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
3846     } else {
3847     /* slave mode, return external sample rate */
3848     rate = hdspm_external_sample_rate(hdspm);
3849     + if (!rate)
3850     + rate = hdspm->system_sample_rate;
3851     }
3852     }
3853    
3854     @@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
3855     ucontrol)
3856     {
3857     struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
3858     + int rate = ucontrol->value.integer.value[0];
3859    
3860     - hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
3861     + if (rate < 27000 || rate > 207000)
3862     + return -EINVAL;
3863     + hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
3864     return 0;
3865     }
3866    
3867     @@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
3868     {
3869     struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
3870    
3871     - ucontrol->value.enumerated.item[0] = hdspm->tco->term;
3872     + ucontrol->value.integer.value[0] = hdspm->tco->term;
3873    
3874     return 0;
3875     }
3876     @@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
3877     {
3878     struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
3879    
3880     - if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
3881     - hdspm->tco->term = ucontrol->value.enumerated.item[0];
3882     + if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
3883     + hdspm->tco->term = ucontrol->value.integer.value[0];
3884    
3885     hdspm_tco_write(hdspm);
3886    
3887     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3888     index 37d8ababfc04..a4d03e5da3e0 100644
3889     --- a/sound/usb/quirks.c
3890     +++ b/sound/usb/quirks.c
3891     @@ -1121,6 +1121,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
3892     case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
3893     case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
3894     case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
3895     + case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
3896     case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
3897     case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
3898     case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
3899     diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
3900     index 11ccbb22ea2b..13d0458afc71 100644
3901     --- a/tools/perf/MANIFEST
3902     +++ b/tools/perf/MANIFEST
3903     @@ -28,24 +28,20 @@ include/asm-generic/bitops/const_hweight.h
3904     include/asm-generic/bitops/fls64.h
3905     include/asm-generic/bitops/__fls.h
3906     include/asm-generic/bitops/fls.h
3907     -include/linux/const.h
3908     include/linux/perf_event.h
3909     include/linux/rbtree.h
3910     include/linux/list.h
3911     include/linux/hash.h
3912     include/linux/stringify.h
3913     -lib/find_next_bit.c
3914     lib/hweight.c
3915     lib/rbtree.c
3916     include/linux/swab.h
3917     arch/*/include/asm/unistd*.h
3918     -arch/*/include/asm/perf_regs.h
3919     arch/*/include/uapi/asm/unistd*.h
3920     arch/*/include/uapi/asm/perf_regs.h
3921     arch/*/lib/memcpy*.S
3922     arch/*/lib/memset*.S
3923     include/linux/poison.h
3924     -include/linux/magic.h
3925     include/linux/hw_breakpoint.h
3926     include/linux/rbtree_augmented.h
3927     include/uapi/linux/perf_event.h
3928     diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
3929     index 950064a0942d..934d56f6803c 100644
3930     --- a/virt/kvm/arm/vgic.c
3931     +++ b/virt/kvm/arm/vgic.c
3932     @@ -1602,8 +1602,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
3933     static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
3934     {
3935     struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
3936     -
3937     - int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
3938     + int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
3939     + int sz = nr_longs * sizeof(unsigned long);
3940     vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
3941     vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
3942     vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
3943     diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
3944     index 44660aee335f..f84f5856520a 100644
3945     --- a/virt/kvm/async_pf.c
3946     +++ b/virt/kvm/async_pf.c
3947     @@ -169,7 +169,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
3948     * do alloc nowait since if we are going to sleep anyway we
3949     * may as well sleep faulting in page
3950     */
3951     - work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
3952     + work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
3953     if (!work)
3954     return 0;
3955