Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0128-4.9.29-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 99092 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index 9460a63087b8..c12e501a18b8 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 28
9     +SUBLEVEL = 29
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
14     index c2b131527a64..a08d7a93aebb 100644
15     --- a/arch/arm/kvm/psci.c
16     +++ b/arch/arm/kvm/psci.c
17     @@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
18    
19     static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
20     {
21     - int ret = 1;
22     + struct kvm *kvm = vcpu->kvm;
23     unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
24     unsigned long val;
25     + int ret = 1;
26    
27     switch (psci_fn) {
28     case PSCI_0_2_FN_PSCI_VERSION:
29     @@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
30     break;
31     case PSCI_0_2_FN_CPU_ON:
32     case PSCI_0_2_FN64_CPU_ON:
33     + mutex_lock(&kvm->lock);
34     val = kvm_psci_vcpu_on(vcpu);
35     + mutex_unlock(&kvm->lock);
36     break;
37     case PSCI_0_2_FN_AFFINITY_INFO:
38     case PSCI_0_2_FN64_AFFINITY_INFO:
39     @@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
40    
41     static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
42     {
43     + struct kvm *kvm = vcpu->kvm;
44     unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
45     unsigned long val;
46    
47     @@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
48     val = PSCI_RET_SUCCESS;
49     break;
50     case KVM_PSCI_FN_CPU_ON:
51     + mutex_lock(&kvm->lock);
52     val = kvm_psci_vcpu_on(vcpu);
53     + mutex_unlock(&kvm->lock);
54     break;
55     default:
56     val = PSCI_RET_NOT_SUPPORTED;
57     diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
58     index f5ea0ba70f07..fe39e6841326 100644
59     --- a/arch/arm64/include/asm/kvm_emulate.h
60     +++ b/arch/arm64/include/asm/kvm_emulate.h
61     @@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
62     return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
63     }
64    
65     +static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
66     +{
67     + u32 esr = kvm_vcpu_get_hsr(vcpu);
68     + return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
69     +}
70     +
71     static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
72     {
73     return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
74     diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
75     index 87e7e6608cd8..7cee552ce0bf 100644
76     --- a/arch/arm64/kvm/sys_regs.c
77     +++ b/arch/arm64/kvm/sys_regs.c
78     @@ -1573,8 +1573,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
79     {
80     struct sys_reg_params params;
81     u32 hsr = kvm_vcpu_get_hsr(vcpu);
82     - int Rt = (hsr >> 5) & 0xf;
83     - int Rt2 = (hsr >> 10) & 0xf;
84     + int Rt = kvm_vcpu_sys_get_rt(vcpu);
85     + int Rt2 = (hsr >> 10) & 0x1f;
86    
87     params.is_aarch32 = true;
88     params.is_32bit = false;
89     @@ -1625,7 +1625,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
90     {
91     struct sys_reg_params params;
92     u32 hsr = kvm_vcpu_get_hsr(vcpu);
93     - int Rt = (hsr >> 5) & 0xf;
94     + int Rt = kvm_vcpu_sys_get_rt(vcpu);
95    
96     params.is_aarch32 = true;
97     params.is_32bit = true;
98     @@ -1740,7 +1740,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
99     {
100     struct sys_reg_params params;
101     unsigned long esr = kvm_vcpu_get_hsr(vcpu);
102     - int Rt = (esr >> 5) & 0x1f;
103     + int Rt = kvm_vcpu_sys_get_rt(vcpu);
104     int ret;
105    
106     trace_kvm_handle_sys_reg(esr);
107     diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
108     index 34d2c595de23..73622673eee3 100644
109     --- a/arch/powerpc/kernel/nvram_64.c
110     +++ b/arch/powerpc/kernel/nvram_64.c
111     @@ -561,6 +561,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
112     static struct pstore_info nvram_pstore_info = {
113     .owner = THIS_MODULE,
114     .name = "nvram",
115     + .flags = PSTORE_FLAGS_DMESG,
116     .open = nvram_pstore_open,
117     .read = nvram_pstore_read,
118     .write = nvram_pstore_write,
119     diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
120     index e5612f3e3b57..d7ac721a8a96 100644
121     --- a/arch/x86/boot/boot.h
122     +++ b/arch/x86/boot/boot.h
123     @@ -16,7 +16,7 @@
124     #ifndef BOOT_BOOT_H
125     #define BOOT_BOOT_H
126    
127     -#define STACK_SIZE 512 /* Minimum number of bytes for stack */
128     +#define STACK_SIZE 1024 /* Minimum number of bytes for stack */
129    
130     #ifndef __ASSEMBLY__
131    
132     diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
133     index 0a535cea8ff3..8b902b67342a 100644
134     --- a/arch/x86/events/intel/rapl.c
135     +++ b/arch/x86/events/intel/rapl.c
136     @@ -759,7 +759,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
137    
138     X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
139     X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
140     - X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsw_rapl_init),
141     + X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
142     X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
143    
144     X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
145     diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
146     index 529bb4a6487a..e2904373010d 100644
147     --- a/arch/x86/include/asm/pmem.h
148     +++ b/arch/x86/include/asm/pmem.h
149     @@ -103,7 +103,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
150    
151     if (bytes < 8) {
152     if (!IS_ALIGNED(dest, 4) || (bytes != 4))
153     - arch_wb_cache_pmem(addr, 1);
154     + arch_wb_cache_pmem(addr, bytes);
155     } else {
156     if (!IS_ALIGNED(dest, 8)) {
157     dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
158     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
159     index e5bc139d1ba7..43c152853969 100644
160     --- a/arch/x86/kvm/x86.c
161     +++ b/arch/x86/kvm/x86.c
162     @@ -3051,6 +3051,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
163     (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
164     return -EINVAL;
165    
166     + /* INITs are latched while in SMM */
167     + if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
168     + (events->smi.smm || events->smi.pending) &&
169     + vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
170     + return -EINVAL;
171     +
172     process_nmi(vcpu);
173     vcpu->arch.exception.pending = events->exception.injected;
174     vcpu->arch.exception.nr = events->exception.nr;
175     @@ -7162,6 +7168,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
176     mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
177     return -EINVAL;
178    
179     + /* INITs are latched while in SMM */
180     + if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
181     + (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
182     + mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
183     + return -EINVAL;
184     +
185     if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
186     vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
187     set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
188     diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
189     index e30202b1716e..7c1601798169 100644
190     --- a/arch/x86/um/ptrace_64.c
191     +++ b/arch/x86/um/ptrace_64.c
192     @@ -125,7 +125,7 @@ int poke_user(struct task_struct *child, long addr, long data)
193     else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
194     (addr <= offsetof(struct user, u_debugreg[7]))) {
195     addr -= offsetof(struct user, u_debugreg[0]);
196     - addr = addr >> 2;
197     + addr = addr >> 3;
198     if ((addr == 4) || (addr == 5))
199     return -EIO;
200     child->thread.arch.debugregs[addr] = data;
201     diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
202     index 7d5afdb417cc..418f1b8576cf 100644
203     --- a/arch/x86/xen/mmu.c
204     +++ b/arch/x86/xen/mmu.c
205     @@ -2028,7 +2028,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
206    
207     /*
208     * Translate a virtual address to a physical one without relying on mapped
209     - * page tables.
210     + * page tables. Don't rely on big pages being aligned in (guest) physical
211     + * space!
212     */
213     static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
214     {
215     @@ -2049,7 +2050,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
216     sizeof(pud)));
217     if (!pud_present(pud))
218     return 0;
219     - pa = pud_pfn(pud) << PAGE_SHIFT;
220     + pa = pud_val(pud) & PTE_PFN_MASK;
221     if (pud_large(pud))
222     return pa + (vaddr & ~PUD_MASK);
223    
224     @@ -2057,7 +2058,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
225     sizeof(pmd)));
226     if (!pmd_present(pmd))
227     return 0;
228     - pa = pmd_pfn(pmd) << PAGE_SHIFT;
229     + pa = pmd_val(pmd) & PTE_PFN_MASK;
230     if (pmd_large(pmd))
231     return pa + (vaddr & ~PMD_MASK);
232    
233     diff --git a/block/blk-integrity.c b/block/blk-integrity.c
234     index 319f2e4f4a8b..478f572cb1e7 100644
235     --- a/block/blk-integrity.c
236     +++ b/block/blk-integrity.c
237     @@ -412,7 +412,8 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
238    
239     bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
240     template->flags;
241     - bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
242     + bi->interval_exp = template->interval_exp ? :
243     + ilog2(queue_logical_block_size(disk->queue));
244     bi->profile = template->profile ? template->profile : &nop_profile;
245     bi->tuple_size = template->tuple_size;
246     bi->tag_size = template->tag_size;
247     diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
248     index fde8d885f7b6..6c11537ca404 100644
249     --- a/crypto/algif_aead.c
250     +++ b/crypto/algif_aead.c
251     @@ -44,6 +44,11 @@ struct aead_async_req {
252     char iv[];
253     };
254    
255     +struct aead_tfm {
256     + struct crypto_aead *aead;
257     + bool has_key;
258     +};
259     +
260     struct aead_ctx {
261     struct aead_sg_list tsgl;
262     struct aead_async_rsgl first_rsgl;
263     @@ -732,24 +737,146 @@ static struct proto_ops algif_aead_ops = {
264     .poll = aead_poll,
265     };
266    
267     +static int aead_check_key(struct socket *sock)
268     +{
269     + int err = 0;
270     + struct sock *psk;
271     + struct alg_sock *pask;
272     + struct aead_tfm *tfm;
273     + struct sock *sk = sock->sk;
274     + struct alg_sock *ask = alg_sk(sk);
275     +
276     + lock_sock(sk);
277     + if (ask->refcnt)
278     + goto unlock_child;
279     +
280     + psk = ask->parent;
281     + pask = alg_sk(ask->parent);
282     + tfm = pask->private;
283     +
284     + err = -ENOKEY;
285     + lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
286     + if (!tfm->has_key)
287     + goto unlock;
288     +
289     + if (!pask->refcnt++)
290     + sock_hold(psk);
291     +
292     + ask->refcnt = 1;
293     + sock_put(psk);
294     +
295     + err = 0;
296     +
297     +unlock:
298     + release_sock(psk);
299     +unlock_child:
300     + release_sock(sk);
301     +
302     + return err;
303     +}
304     +
305     +static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
306     + size_t size)
307     +{
308     + int err;
309     +
310     + err = aead_check_key(sock);
311     + if (err)
312     + return err;
313     +
314     + return aead_sendmsg(sock, msg, size);
315     +}
316     +
317     +static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
318     + int offset, size_t size, int flags)
319     +{
320     + int err;
321     +
322     + err = aead_check_key(sock);
323     + if (err)
324     + return err;
325     +
326     + return aead_sendpage(sock, page, offset, size, flags);
327     +}
328     +
329     +static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
330     + size_t ignored, int flags)
331     +{
332     + int err;
333     +
334     + err = aead_check_key(sock);
335     + if (err)
336     + return err;
337     +
338     + return aead_recvmsg(sock, msg, ignored, flags);
339     +}
340     +
341     +static struct proto_ops algif_aead_ops_nokey = {
342     + .family = PF_ALG,
343     +
344     + .connect = sock_no_connect,
345     + .socketpair = sock_no_socketpair,
346     + .getname = sock_no_getname,
347     + .ioctl = sock_no_ioctl,
348     + .listen = sock_no_listen,
349     + .shutdown = sock_no_shutdown,
350     + .getsockopt = sock_no_getsockopt,
351     + .mmap = sock_no_mmap,
352     + .bind = sock_no_bind,
353     + .accept = sock_no_accept,
354     + .setsockopt = sock_no_setsockopt,
355     +
356     + .release = af_alg_release,
357     + .sendmsg = aead_sendmsg_nokey,
358     + .sendpage = aead_sendpage_nokey,
359     + .recvmsg = aead_recvmsg_nokey,
360     + .poll = aead_poll,
361     +};
362     +
363     static void *aead_bind(const char *name, u32 type, u32 mask)
364     {
365     - return crypto_alloc_aead(name, type, mask);
366     + struct aead_tfm *tfm;
367     + struct crypto_aead *aead;
368     +
369     + tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
370     + if (!tfm)
371     + return ERR_PTR(-ENOMEM);
372     +
373     + aead = crypto_alloc_aead(name, type, mask);
374     + if (IS_ERR(aead)) {
375     + kfree(tfm);
376     + return ERR_CAST(aead);
377     + }
378     +
379     + tfm->aead = aead;
380     +
381     + return tfm;
382     }
383    
384     static void aead_release(void *private)
385     {
386     - crypto_free_aead(private);
387     + struct aead_tfm *tfm = private;
388     +
389     + crypto_free_aead(tfm->aead);
390     + kfree(tfm);
391     }
392    
393     static int aead_setauthsize(void *private, unsigned int authsize)
394     {
395     - return crypto_aead_setauthsize(private, authsize);
396     + struct aead_tfm *tfm = private;
397     +
398     + return crypto_aead_setauthsize(tfm->aead, authsize);
399     }
400    
401     static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
402     {
403     - return crypto_aead_setkey(private, key, keylen);
404     + struct aead_tfm *tfm = private;
405     + int err;
406     +
407     + err = crypto_aead_setkey(tfm->aead, key, keylen);
408     + tfm->has_key = !err;
409     +
410     + return err;
411     }
412    
413     static void aead_sock_destruct(struct sock *sk)
414     @@ -766,12 +893,14 @@ static void aead_sock_destruct(struct sock *sk)
415     af_alg_release_parent(sk);
416     }
417    
418     -static int aead_accept_parent(void *private, struct sock *sk)
419     +static int aead_accept_parent_nokey(void *private, struct sock *sk)
420     {
421     struct aead_ctx *ctx;
422     struct alg_sock *ask = alg_sk(sk);
423     - unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
424     - unsigned int ivlen = crypto_aead_ivsize(private);
425     + struct aead_tfm *tfm = private;
426     + struct crypto_aead *aead = tfm->aead;
427     + unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
428     + unsigned int ivlen = crypto_aead_ivsize(aead);
429    
430     ctx = sock_kmalloc(sk, len, GFP_KERNEL);
431     if (!ctx)
432     @@ -798,7 +927,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
433    
434     ask->private = ctx;
435    
436     - aead_request_set_tfm(&ctx->aead_req, private);
437     + aead_request_set_tfm(&ctx->aead_req, aead);
438     aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
439     af_alg_complete, &ctx->completion);
440    
441     @@ -807,13 +936,25 @@ static int aead_accept_parent(void *private, struct sock *sk)
442     return 0;
443     }
444    
445     +static int aead_accept_parent(void *private, struct sock *sk)
446     +{
447     + struct aead_tfm *tfm = private;
448     +
449     + if (!tfm->has_key)
450     + return -ENOKEY;
451     +
452     + return aead_accept_parent_nokey(private, sk);
453     +}
454     +
455     static const struct af_alg_type algif_type_aead = {
456     .bind = aead_bind,
457     .release = aead_release,
458     .setkey = aead_setkey,
459     .setauthsize = aead_setauthsize,
460     .accept = aead_accept_parent,
461     + .accept_nokey = aead_accept_parent_nokey,
462     .ops = &algif_aead_ops,
463     + .ops_nokey = &algif_aead_ops_nokey,
464     .name = "aead",
465     .owner = THIS_MODULE
466     };
467     diff --git a/drivers/Makefile b/drivers/Makefile
468     index 194d20bee7dc..733bf0b2613f 100644
469     --- a/drivers/Makefile
470     +++ b/drivers/Makefile
471     @@ -101,6 +101,7 @@ obj-$(CONFIG_USB_PHY) += usb/
472     obj-$(CONFIG_USB) += usb/
473     obj-$(CONFIG_PCI) += usb/
474     obj-$(CONFIG_USB_GADGET) += usb/
475     +obj-$(CONFIG_OF) += usb/
476     obj-$(CONFIG_SERIO) += input/serio/
477     obj-$(CONFIG_GAMEPORT) += input/gameport/
478     obj-$(CONFIG_INPUT) += input/
479     diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
480     index 8f6c23c20c52..deed58013555 100644
481     --- a/drivers/bluetooth/hci_bcm.c
482     +++ b/drivers/bluetooth/hci_bcm.c
483     @@ -287,6 +287,9 @@ static int bcm_open(struct hci_uart *hu)
484    
485     hu->priv = bcm;
486    
487     + if (!hu->tty->dev)
488     + goto out;
489     +
490     mutex_lock(&bcm_device_lock);
491     list_for_each(p, &bcm_device_list) {
492     struct bcm_device *dev = list_entry(p, struct bcm_device, list);
493     @@ -307,7 +310,7 @@ static int bcm_open(struct hci_uart *hu)
494     }
495    
496     mutex_unlock(&bcm_device_lock);
497     -
498     +out:
499     return 0;
500     }
501    
502     diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
503     index 9e271286c5e5..73306384af6c 100644
504     --- a/drivers/bluetooth/hci_intel.c
505     +++ b/drivers/bluetooth/hci_intel.c
506     @@ -307,6 +307,9 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
507     struct list_head *p;
508     int err = -ENODEV;
509    
510     + if (!hu->tty->dev)
511     + return err;
512     +
513     mutex_lock(&intel_device_list_lock);
514    
515     list_for_each(p, &intel_device_list) {
516     @@ -379,6 +382,9 @@ static void intel_busy_work(struct work_struct *work)
517     struct intel_data *intel = container_of(work, struct intel_data,
518     busy_work);
519    
520     + if (!intel->hu->tty->dev)
521     + return;
522     +
523     /* Link is busy, delay the suspend */
524     mutex_lock(&intel_device_list_lock);
525     list_for_each(p, &intel_device_list) {
526     @@ -889,6 +895,8 @@ static int intel_setup(struct hci_uart *hu)
527     list_for_each(p, &intel_device_list) {
528     struct intel_device *dev = list_entry(p, struct intel_device,
529     list);
530     + if (!hu->tty->dev)
531     + break;
532     if (hu->tty->dev->parent == dev->pdev->dev.parent) {
533     if (device_may_wakeup(&dev->pdev->dev)) {
534     set_bit(STATE_LPM_ENABLED, &intel->flags);
535     @@ -1056,6 +1064,9 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
536    
537     BT_DBG("hu %p skb %p", hu, skb);
538    
539     + if (!hu->tty->dev)
540     + goto out_enqueue;
541     +
542     /* Be sure our controller is resumed and potential LPM transaction
543     * completed before enqueuing any packet.
544     */
545     @@ -1072,7 +1083,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
546     }
547     }
548     mutex_unlock(&intel_device_list_lock);
549     -
550     +out_enqueue:
551     skb_queue_tail(&intel->txq, skb);
552    
553     return 0;
554     diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
555     index 5673ffff00be..6958b5ce9145 100644
556     --- a/drivers/char/ipmi/ipmi_ssif.c
557     +++ b/drivers/char/ipmi/ipmi_ssif.c
558     @@ -892,6 +892,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
559     * for details on the intricacies of this.
560     */
561     int left;
562     + unsigned char *data_to_send;
563    
564     ssif_inc_stat(ssif_info, sent_messages_parts);
565    
566     @@ -900,6 +901,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
567     left = 32;
568     /* Length byte. */
569     ssif_info->multi_data[ssif_info->multi_pos] = left;
570     + data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
571     ssif_info->multi_pos += left;
572     if (left < 32)
573     /*
574     @@ -913,7 +915,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
575     rv = ssif_i2c_send(ssif_info, msg_written_handler,
576     I2C_SMBUS_WRITE,
577     SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
578     - ssif_info->multi_data + ssif_info->multi_pos,
579     + data_to_send,
580     I2C_SMBUS_BLOCK_DATA);
581     if (rv < 0) {
582     /* request failed, just return the error. */
583     diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
584     index 8d2dbacc6161..e68966bbfa58 100644
585     --- a/drivers/crypto/ccp/ccp-dev-v3.c
586     +++ b/drivers/crypto/ccp/ccp-dev-v3.c
587     @@ -315,17 +315,73 @@ static int ccp_perform_ecc(struct ccp_op *op)
588     return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
589     }
590    
591     +static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
592     +{
593     + iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
594     +}
595     +
596     +static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
597     +{
598     + iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
599     +}
600     +
601     +static void ccp_irq_bh(unsigned long data)
602     +{
603     + struct ccp_device *ccp = (struct ccp_device *)data;
604     + struct ccp_cmd_queue *cmd_q;
605     + u32 q_int, status;
606     + unsigned int i;
607     +
608     + status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
609     +
610     + for (i = 0; i < ccp->cmd_q_count; i++) {
611     + cmd_q = &ccp->cmd_q[i];
612     +
613     + q_int = status & (cmd_q->int_ok | cmd_q->int_err);
614     + if (q_int) {
615     + cmd_q->int_status = status;
616     + cmd_q->q_status = ioread32(cmd_q->reg_status);
617     + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
618     +
619     + /* On error, only save the first error value */
620     + if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
621     + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
622     +
623     + cmd_q->int_rcvd = 1;
624     +
625     + /* Acknowledge the interrupt and wake the kthread */
626     + iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
627     + wake_up_interruptible(&cmd_q->int_queue);
628     + }
629     + }
630     + ccp_enable_queue_interrupts(ccp);
631     +}
632     +
633     +static irqreturn_t ccp_irq_handler(int irq, void *data)
634     +{
635     + struct device *dev = data;
636     + struct ccp_device *ccp = dev_get_drvdata(dev);
637     +
638     + ccp_disable_queue_interrupts(ccp);
639     + if (ccp->use_tasklet)
640     + tasklet_schedule(&ccp->irq_tasklet);
641     + else
642     + ccp_irq_bh((unsigned long)ccp);
643     +
644     + return IRQ_HANDLED;
645     +}
646     +
647     static int ccp_init(struct ccp_device *ccp)
648     {
649     struct device *dev = ccp->dev;
650     struct ccp_cmd_queue *cmd_q;
651     struct dma_pool *dma_pool;
652     char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
653     - unsigned int qmr, qim, i;
654     + unsigned int qmr, i;
655     int ret;
656    
657     /* Find available queues */
658     - qim = 0;
659     + ccp->qim = 0;
660     qmr = ioread32(ccp->io_regs + Q_MASK_REG);
661     for (i = 0; i < MAX_HW_QUEUES; i++) {
662     if (!(qmr & (1 << i)))
663     @@ -370,7 +426,7 @@ static int ccp_init(struct ccp_device *ccp)
664     init_waitqueue_head(&cmd_q->int_queue);
665    
666     /* Build queue interrupt mask (two interrupts per queue) */
667     - qim |= cmd_q->int_ok | cmd_q->int_err;
668     + ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
669    
670     #ifdef CONFIG_ARM64
671     /* For arm64 set the recommended queue cache settings */
672     @@ -388,14 +444,14 @@ static int ccp_init(struct ccp_device *ccp)
673     dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
674    
675     /* Disable and clear interrupts until ready */
676     - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
677     + ccp_disable_queue_interrupts(ccp);
678     for (i = 0; i < ccp->cmd_q_count; i++) {
679     cmd_q = &ccp->cmd_q[i];
680    
681     ioread32(cmd_q->reg_int_status);
682     ioread32(cmd_q->reg_status);
683     }
684     - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
685     + iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
686    
687     /* Request an irq */
688     ret = ccp->get_irq(ccp);
689     @@ -408,6 +464,11 @@ static int ccp_init(struct ccp_device *ccp)
690     init_waitqueue_head(&ccp->sb_queue);
691     init_waitqueue_head(&ccp->suspend_queue);
692    
693     + /* Initialize the ISR tasklet? */
694     + if (ccp->use_tasklet)
695     + tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
696     + (unsigned long)ccp);
697     +
698     dev_dbg(dev, "Starting threads...\n");
699     /* Create a kthread for each queue */
700     for (i = 0; i < ccp->cmd_q_count; i++) {
701     @@ -430,7 +491,7 @@ static int ccp_init(struct ccp_device *ccp)
702    
703     dev_dbg(dev, "Enabling interrupts...\n");
704     /* Enable interrupts */
705     - iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
706     + ccp_enable_queue_interrupts(ccp);
707    
708     dev_dbg(dev, "Registering device...\n");
709     ccp_add_device(ccp);
710     @@ -467,7 +528,7 @@ static void ccp_destroy(struct ccp_device *ccp)
711     {
712     struct ccp_cmd_queue *cmd_q;
713     struct ccp_cmd *cmd;
714     - unsigned int qim, i;
715     + unsigned int i;
716    
717     /* Unregister the DMA engine */
718     ccp_dmaengine_unregister(ccp);
719     @@ -478,22 +539,15 @@ static void ccp_destroy(struct ccp_device *ccp)
720     /* Remove this device from the list of available units */
721     ccp_del_device(ccp);
722    
723     - /* Build queue interrupt mask (two interrupt masks per queue) */
724     - qim = 0;
725     - for (i = 0; i < ccp->cmd_q_count; i++) {
726     - cmd_q = &ccp->cmd_q[i];
727     - qim |= cmd_q->int_ok | cmd_q->int_err;
728     - }
729     -
730     /* Disable and clear interrupts */
731     - iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
732     + ccp_disable_queue_interrupts(ccp);
733     for (i = 0; i < ccp->cmd_q_count; i++) {
734     cmd_q = &ccp->cmd_q[i];
735    
736     ioread32(cmd_q->reg_int_status);
737     ioread32(cmd_q->reg_status);
738     }
739     - iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
740     + iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
741    
742     /* Stop the queue kthreads */
743     for (i = 0; i < ccp->cmd_q_count; i++)
744     @@ -520,40 +574,6 @@ static void ccp_destroy(struct ccp_device *ccp)
745     }
746     }
747    
748     -static irqreturn_t ccp_irq_handler(int irq, void *data)
749     -{
750     - struct device *dev = data;
751     - struct ccp_device *ccp = dev_get_drvdata(dev);
752     - struct ccp_cmd_queue *cmd_q;
753     - u32 q_int, status;
754     - unsigned int i;
755     -
756     - status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
757     -
758     - for (i = 0; i < ccp->cmd_q_count; i++) {
759     - cmd_q = &ccp->cmd_q[i];
760     -
761     - q_int = status & (cmd_q->int_ok | cmd_q->int_err);
762     - if (q_int) {
763     - cmd_q->int_status = status;
764     - cmd_q->q_status = ioread32(cmd_q->reg_status);
765     - cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
766     -
767     - /* On error, only save the first error value */
768     - if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
769     - cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
770     -
771     - cmd_q->int_rcvd = 1;
772     -
773     - /* Acknowledge the interrupt and wake the kthread */
774     - iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
775     - wake_up_interruptible(&cmd_q->int_queue);
776     - }
777     - }
778     -
779     - return IRQ_HANDLED;
780     -}
781     -
782     static const struct ccp_actions ccp3_actions = {
783     .aes = ccp_perform_aes,
784     .xts_aes = ccp_perform_xts_aes,
785     diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
786     index a388bf2d67f4..2c0ce5f605b3 100644
787     --- a/drivers/crypto/ccp/ccp-dev-v5.c
788     +++ b/drivers/crypto/ccp/ccp-dev-v5.c
789     @@ -644,6 +644,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
790     return rc;
791     }
792    
793     +static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
794     +{
795     + unsigned int i;
796     +
797     + for (i = 0; i < ccp->cmd_q_count; i++)
798     + iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
799     +}
800     +
801     +static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
802     +{
803     + unsigned int i;
804     +
805     + for (i = 0; i < ccp->cmd_q_count; i++)
806     + iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
807     +}
808     +
809     +static void ccp5_irq_bh(unsigned long data)
810     +{
811     + struct ccp_device *ccp = (struct ccp_device *)data;
812     + u32 status;
813     + unsigned int i;
814     +
815     + for (i = 0; i < ccp->cmd_q_count; i++) {
816     + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
817     +
818     + status = ioread32(cmd_q->reg_interrupt_status);
819     +
820     + if (status) {
821     + cmd_q->int_status = status;
822     + cmd_q->q_status = ioread32(cmd_q->reg_status);
823     + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
824     +
825     + /* On error, only save the first error value */
826     + if ((status & INT_ERROR) && !cmd_q->cmd_error)
827     + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
828     +
829     + cmd_q->int_rcvd = 1;
830     +
831     + /* Acknowledge the interrupt and wake the kthread */
832     + iowrite32(status, cmd_q->reg_interrupt_status);
833     + wake_up_interruptible(&cmd_q->int_queue);
834     + }
835     + }
836     + ccp5_enable_queue_interrupts(ccp);
837     +}
838     +
839     +static irqreturn_t ccp5_irq_handler(int irq, void *data)
840     +{
841     + struct device *dev = data;
842     + struct ccp_device *ccp = dev_get_drvdata(dev);
843     +
844     + ccp5_disable_queue_interrupts(ccp);
845     + if (ccp->use_tasklet)
846     + tasklet_schedule(&ccp->irq_tasklet);
847     + else
848     + ccp5_irq_bh((unsigned long)ccp);
849     + return IRQ_HANDLED;
850     +}
851     +
852     static int ccp5_init(struct ccp_device *ccp)
853     {
854     struct device *dev = ccp->dev;
855     @@ -728,19 +787,18 @@ static int ccp5_init(struct ccp_device *ccp)
856     dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
857    
858     /* Turn off the queues and disable interrupts until ready */
859     + ccp5_disable_queue_interrupts(ccp);
860     for (i = 0; i < ccp->cmd_q_count; i++) {
861     cmd_q = &ccp->cmd_q[i];
862    
863     cmd_q->qcontrol = 0; /* Start with nothing */
864     iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
865    
866     - /* Disable the interrupts */
867     - iowrite32(0x00, cmd_q->reg_int_enable);
868     ioread32(cmd_q->reg_int_status);
869     ioread32(cmd_q->reg_status);
870    
871     - /* Clear the interrupts */
872     - iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
873     + /* Clear the interrupt status */
874     + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
875     }
876    
877     dev_dbg(dev, "Requesting an IRQ...\n");
878     @@ -750,6 +808,10 @@ static int ccp5_init(struct ccp_device *ccp)
879     dev_err(dev, "unable to allocate an IRQ\n");
880     goto e_pool;
881     }
882     + /* Initialize the ISR tasklet */
883     + if (ccp->use_tasklet)
884     + tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
885     + (unsigned long)ccp);
886    
887     /* Initialize the queue used to suspend */
888     init_waitqueue_head(&ccp->suspend_queue);
889     @@ -821,11 +883,7 @@ static int ccp5_init(struct ccp_device *ccp)
890     }
891    
892     dev_dbg(dev, "Enabling interrupts...\n");
893     - /* Enable interrupts */
894     - for (i = 0; i < ccp->cmd_q_count; i++) {
895     - cmd_q = &ccp->cmd_q[i];
896     - iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable);
897     - }
898     + ccp5_enable_queue_interrupts(ccp);
899    
900     dev_dbg(dev, "Registering device...\n");
901     /* Put this on the unit list to make it available */
902     @@ -877,17 +935,15 @@ static void ccp5_destroy(struct ccp_device *ccp)
903     ccp_del_device(ccp);
904    
905     /* Disable and clear interrupts */
906     + ccp5_disable_queue_interrupts(ccp);
907     for (i = 0; i < ccp->cmd_q_count; i++) {
908     cmd_q = &ccp->cmd_q[i];
909    
910     /* Turn off the run bit */
911     iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
912    
913     - /* Disable the interrupts */
914     - iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
915     -
916     /* Clear the interrupt status */
917     - iowrite32(0x00, cmd_q->reg_int_enable);
918     + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
919     ioread32(cmd_q->reg_int_status);
920     ioread32(cmd_q->reg_status);
921     }
922     @@ -920,38 +976,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
923     }
924     }
925    
926     -static irqreturn_t ccp5_irq_handler(int irq, void *data)
927     -{
928     - struct device *dev = data;
929     - struct ccp_device *ccp = dev_get_drvdata(dev);
930     - u32 status;
931     - unsigned int i;
932     -
933     - for (i = 0; i < ccp->cmd_q_count; i++) {
934     - struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
935     -
936     - status = ioread32(cmd_q->reg_interrupt_status);
937     -
938     - if (status) {
939     - cmd_q->int_status = status;
940     - cmd_q->q_status = ioread32(cmd_q->reg_status);
941     - cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
942     -
943     - /* On error, only save the first error value */
944     - if ((status & INT_ERROR) && !cmd_q->cmd_error)
945     - cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
946     -
947     - cmd_q->int_rcvd = 1;
948     -
949     - /* Acknowledge the interrupt and wake the kthread */
950     - iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status);
951     - wake_up_interruptible(&cmd_q->int_queue);
952     - }
953     - }
954     -
955     - return IRQ_HANDLED;
956     -}
957     -
958     static void ccp5_config(struct ccp_device *ccp)
959     {
960     /* Public side */
961     diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
962     index 340aef14d616..8ac7ae17e1f4 100644
963     --- a/drivers/crypto/ccp/ccp-dev.h
964     +++ b/drivers/crypto/ccp/ccp-dev.h
965     @@ -109,9 +109,8 @@
966     #define INT_COMPLETION 0x1
967     #define INT_ERROR 0x2
968     #define INT_QUEUE_STOPPED 0x4
969     -#define ALL_INTERRUPTS (INT_COMPLETION| \
970     - INT_ERROR| \
971     - INT_QUEUE_STOPPED)
972     +#define INT_EMPTY_QUEUE 0x8
973     +#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR)
974    
975     #define LSB_REGION_WIDTH 5
976     #define MAX_LSB_CNT 8
977     @@ -333,7 +332,10 @@ struct ccp_device {
978     void *dev_specific;
979     int (*get_irq)(struct ccp_device *ccp);
980     void (*free_irq)(struct ccp_device *ccp);
981     + unsigned int qim;
982     unsigned int irq;
983     + bool use_tasklet;
984     + struct tasklet_struct irq_tasklet;
985    
986     /* I/O area used for device communication. The register mapping
987     * starts at an offset into the mapped bar.
988     diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
989     index 28a9996c1085..e880d4cf4ada 100644
990     --- a/drivers/crypto/ccp/ccp-pci.c
991     +++ b/drivers/crypto/ccp/ccp-pci.c
992     @@ -69,6 +69,7 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
993     goto e_irq;
994     }
995     }
996     + ccp->use_tasklet = true;
997    
998     return 0;
999    
1000     @@ -100,6 +101,7 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
1001     dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
1002     goto e_msi;
1003     }
1004     + ccp->use_tasklet = true;
1005    
1006     return 0;
1007    
1008     diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
1009     index 193224889e41..586f9543de73 100644
1010     --- a/drivers/dax/dax.c
1011     +++ b/drivers/dax/dax.c
1012     @@ -553,13 +553,10 @@ static void dax_dev_release(struct device *dev)
1013     kfree(dax_dev);
1014     }
1015    
1016     -static void unregister_dax_dev(void *dev)
1017     +static void kill_dax_dev(struct dax_dev *dax_dev)
1018     {
1019     - struct dax_dev *dax_dev = to_dax_dev(dev);
1020     struct cdev *cdev = &dax_dev->cdev;
1021    
1022     - dev_dbg(dev, "%s\n", __func__);
1023     -
1024     /*
1025     * Note, rcu is not protecting the liveness of dax_dev, rcu is
1026     * ensuring that any fault handlers that might have seen
1027     @@ -571,6 +568,15 @@ static void unregister_dax_dev(void *dev)
1028     synchronize_srcu(&dax_srcu);
1029     unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
1030     cdev_del(cdev);
1031     +}
1032     +
1033     +static void unregister_dax_dev(void *dev)
1034     +{
1035     + struct dax_dev *dax_dev = to_dax_dev(dev);
1036     +
1037     + dev_dbg(dev, "%s\n", __func__);
1038     +
1039     + kill_dax_dev(dax_dev);
1040     device_unregister(dev);
1041     }
1042    
1043     @@ -647,6 +653,7 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
1044     dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
1045     rc = device_add(dev);
1046     if (rc) {
1047     + kill_dax_dev(dax_dev);
1048     put_device(dev);
1049     return ERR_PTR(rc);
1050     }
1051     diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
1052     index c1fb545e8d78..42de5f22da93 100644
1053     --- a/drivers/infiniband/core/sysfs.c
1054     +++ b/drivers/infiniband/core/sysfs.c
1055     @@ -1301,7 +1301,7 @@ int ib_device_register_sysfs(struct ib_device *device,
1056     free_port_list_attributes(device);
1057    
1058     err_unregister:
1059     - device_unregister(class_dev);
1060     + device_del(class_dev);
1061    
1062     err:
1063     return ret;
1064     diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
1065     index 83687646da68..0e64b52af5b2 100644
1066     --- a/drivers/infiniband/core/verbs.c
1067     +++ b/drivers/infiniband/core/verbs.c
1068     @@ -1516,7 +1516,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1069    
1070     if (!qp->device->attach_mcast)
1071     return -ENOSYS;
1072     - if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1073     + if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1074     + lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1075     + lid == be16_to_cpu(IB_LID_PERMISSIVE))
1076     return -EINVAL;
1077    
1078     ret = qp->device->attach_mcast(qp, gid, lid);
1079     @@ -1532,7 +1534,9 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1080    
1081     if (!qp->device->detach_mcast)
1082     return -ENOSYS;
1083     - if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1084     + if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1085     + lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1086     + lid == be16_to_cpu(IB_LID_PERMISSIVE))
1087     return -EINVAL;
1088    
1089     ret = qp->device->detach_mcast(qp, gid, lid);
1090     diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
1091     index a1576aea4756..9f768b48321f 100644
1092     --- a/drivers/infiniband/hw/hfi1/ruc.c
1093     +++ b/drivers/infiniband/hw/hfi1/ruc.c
1094     @@ -1,5 +1,5 @@
1095     /*
1096     - * Copyright(c) 2015, 2016 Intel Corporation.
1097     + * Copyright(c) 2015 - 2017 Intel Corporation.
1098     *
1099     * This file is provided under a dual BSD/GPLv2 license. When using or
1100     * redistributing this file, you may do so under either license.
1101     @@ -833,23 +833,29 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
1102     /* when sending, force a reschedule every one of these periods */
1103     #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
1104    
1105     +void hfi1_do_send_from_rvt(struct rvt_qp *qp)
1106     +{
1107     + hfi1_do_send(qp, false);
1108     +}
1109     +
1110     void _hfi1_do_send(struct work_struct *work)
1111     {
1112     struct iowait *wait = container_of(work, struct iowait, iowork);
1113     struct rvt_qp *qp = iowait_to_qp(wait);
1114    
1115     - hfi1_do_send(qp);
1116     + hfi1_do_send(qp, true);
1117     }
1118    
1119     /**
1120     * hfi1_do_send - perform a send on a QP
1121     * @work: contains a pointer to the QP
1122     + * @in_thread: true if in a workqueue thread
1123     *
1124     * Process entries in the send work queue until credit or queue is
1125     * exhausted. Only allow one CPU to send a packet per QP.
1126     * Otherwise, two threads could send packets out of order.
1127     */
1128     -void hfi1_do_send(struct rvt_qp *qp)
1129     +void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
1130     {
1131     struct hfi1_pkt_state ps;
1132     struct hfi1_qp_priv *priv = qp->priv;
1133     @@ -917,8 +923,10 @@ void hfi1_do_send(struct rvt_qp *qp)
1134     qp->s_hdrwords = 0;
1135     /* allow other tasks to run */
1136     if (unlikely(time_after(jiffies, timeout))) {
1137     - if (workqueue_congested(cpu,
1138     - ps.ppd->hfi1_wq)) {
1139     + if (!in_thread ||
1140     + workqueue_congested(
1141     + cpu,
1142     + ps.ppd->hfi1_wq)) {
1143     spin_lock_irqsave(
1144     &qp->s_lock,
1145     ps.flags);
1146     @@ -931,11 +939,9 @@ void hfi1_do_send(struct rvt_qp *qp)
1147     *ps.ppd->dd->send_schedule);
1148     return;
1149     }
1150     - if (!irqs_disabled()) {
1151     - cond_resched();
1152     - this_cpu_inc(
1153     - *ps.ppd->dd->send_schedule);
1154     - }
1155     + cond_resched();
1156     + this_cpu_inc(
1157     + *ps.ppd->dd->send_schedule);
1158     timeout = jiffies + (timeout_int) / 8;
1159     }
1160     spin_lock_irqsave(&qp->s_lock, ps.flags);
1161     diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
1162     index 4b7a16ceb362..01a380efea6b 100644
1163     --- a/drivers/infiniband/hw/hfi1/verbs.c
1164     +++ b/drivers/infiniband/hw/hfi1/verbs.c
1165     @@ -1,5 +1,5 @@
1166     /*
1167     - * Copyright(c) 2015, 2016 Intel Corporation.
1168     + * Copyright(c) 2015 - 2017 Intel Corporation.
1169     *
1170     * This file is provided under a dual BSD/GPLv2 license. When using or
1171     * redistributing this file, you may do so under either license.
1172     @@ -1697,7 +1697,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
1173     dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1174     dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1175     dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
1176     - dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
1177     + dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
1178     dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
1179     dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
1180     dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
1181     diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
1182     index 1c3815d89eb7..bac84f820a54 100644
1183     --- a/drivers/infiniband/hw/hfi1/verbs.h
1184     +++ b/drivers/infiniband/hw/hfi1/verbs.h
1185     @@ -1,5 +1,5 @@
1186     /*
1187     - * Copyright(c) 2015, 2016 Intel Corporation.
1188     + * Copyright(c) 2015 - 2017 Intel Corporation.
1189     *
1190     * This file is provided under a dual BSD/GPLv2 license. When using or
1191     * redistributing this file, you may do so under either license.
1192     @@ -372,7 +372,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
1193    
1194     void _hfi1_do_send(struct work_struct *work);
1195    
1196     -void hfi1_do_send(struct rvt_qp *qp);
1197     +void hfi1_do_send_from_rvt(struct rvt_qp *qp);
1198     +
1199     +void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
1200    
1201     void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
1202     enum ib_wc_status status);
1203     diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1204     index 46ad99595fd2..f2a885eee4bb 100644
1205     --- a/drivers/infiniband/hw/mlx4/main.c
1206     +++ b/drivers/infiniband/hw/mlx4/main.c
1207     @@ -2926,6 +2926,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1208     mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
1209    
1210     err_map:
1211     + mlx4_ib_free_eqs(dev, ibdev);
1212     iounmap(ibdev->uar_map);
1213    
1214     err_uar:
1215     diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
1216     index a21d37f02f35..e6ea81c9644a 100644
1217     --- a/drivers/infiniband/hw/mlx4/mcg.c
1218     +++ b/drivers/infiniband/hw/mlx4/mcg.c
1219     @@ -1102,7 +1102,8 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
1220     while ((p = rb_first(&ctx->mcg_table)) != NULL) {
1221     group = rb_entry(p, struct mcast_group, node);
1222     if (atomic_read(&group->refcount))
1223     - mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
1224     + mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
1225     + atomic_read(&group->refcount), group);
1226    
1227     force_clean_group(group);
1228     }
1229     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
1230     index 6bd5740e2691..09396bd7b02d 100644
1231     --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
1232     +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
1233     @@ -281,8 +281,11 @@ void ipoib_delete_debug_files(struct net_device *dev)
1234     {
1235     struct ipoib_dev_priv *priv = netdev_priv(dev);
1236    
1237     + WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
1238     + WARN_ONCE(!priv->path_dentry, "null path debug file\n");
1239     debugfs_remove(priv->mcg_dentry);
1240     debugfs_remove(priv->path_dentry);
1241     + priv->mcg_dentry = priv->path_dentry = NULL;
1242     }
1243    
1244     int ipoib_register_debugfs(void)
1245     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1246     index 3ef7b8f049c4..08c4b0287304 100644
1247     --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
1248     +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1249     @@ -108,6 +108,33 @@ static struct ib_client ipoib_client = {
1250     .get_net_dev_by_params = ipoib_get_net_dev_by_params,
1251     };
1252    
1253     +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
1254     +static int ipoib_netdev_event(struct notifier_block *this,
1255     + unsigned long event, void *ptr)
1256     +{
1257     + struct netdev_notifier_info *ni = ptr;
1258     + struct net_device *dev = ni->dev;
1259     +
1260     + if (dev->netdev_ops->ndo_open != ipoib_open)
1261     + return NOTIFY_DONE;
1262     +
1263     + switch (event) {
1264     + case NETDEV_REGISTER:
1265     + ipoib_create_debug_files(dev);
1266     + break;
1267     + case NETDEV_CHANGENAME:
1268     + ipoib_delete_debug_files(dev);
1269     + ipoib_create_debug_files(dev);
1270     + break;
1271     + case NETDEV_UNREGISTER:
1272     + ipoib_delete_debug_files(dev);
1273     + break;
1274     + }
1275     +
1276     + return NOTIFY_DONE;
1277     +}
1278     +#endif
1279     +
1280     int ipoib_open(struct net_device *dev)
1281     {
1282     struct ipoib_dev_priv *priv = netdev_priv(dev);
1283     @@ -1655,8 +1682,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
1284    
1285     ASSERT_RTNL();
1286    
1287     - ipoib_delete_debug_files(dev);
1288     -
1289     /* Delete any child interfaces first */
1290     list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
1291     /* Stop GC on child */
1292     @@ -2074,8 +2099,6 @@ static struct net_device *ipoib_add_port(const char *format,
1293     goto register_failed;
1294     }
1295    
1296     - ipoib_create_debug_files(priv->dev);
1297     -
1298     if (ipoib_cm_add_mode_attr(priv->dev))
1299     goto sysfs_failed;
1300     if (ipoib_add_pkey_attr(priv->dev))
1301     @@ -2090,7 +2113,6 @@ static struct net_device *ipoib_add_port(const char *format,
1302     return priv->dev;
1303    
1304     sysfs_failed:
1305     - ipoib_delete_debug_files(priv->dev);
1306     unregister_netdev(priv->dev);
1307    
1308     register_failed:
1309     @@ -2175,6 +2197,12 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
1310     kfree(dev_list);
1311     }
1312    
1313     +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
1314     +static struct notifier_block ipoib_netdev_notifier = {
1315     + .notifier_call = ipoib_netdev_event,
1316     +};
1317     +#endif
1318     +
1319     static int __init ipoib_init_module(void)
1320     {
1321     int ret;
1322     @@ -2227,6 +2255,9 @@ static int __init ipoib_init_module(void)
1323     if (ret)
1324     goto err_client;
1325    
1326     +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
1327     + register_netdevice_notifier(&ipoib_netdev_notifier);
1328     +#endif
1329     return 0;
1330    
1331     err_client:
1332     @@ -2244,6 +2275,9 @@ static int __init ipoib_init_module(void)
1333    
1334     static void __exit ipoib_cleanup_module(void)
1335     {
1336     +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
1337     + unregister_netdevice_notifier(&ipoib_netdev_notifier);
1338     +#endif
1339     ipoib_netlink_fini();
1340     ib_unregister_client(&ipoib_client);
1341     ib_sa_unregister_client(&ipoib_sa_client);
1342     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
1343     index a2f9f29c6ab5..57eadd2b7a71 100644
1344     --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
1345     +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
1346     @@ -87,8 +87,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
1347     goto register_failed;
1348     }
1349    
1350     - ipoib_create_debug_files(priv->dev);
1351     -
1352     /* RTNL childs don't need proprietary sysfs entries */
1353     if (type == IPOIB_LEGACY_CHILD) {
1354     if (ipoib_cm_add_mode_attr(priv->dev))
1355     @@ -109,7 +107,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
1356    
1357     sysfs_failed:
1358     result = -ENOMEM;
1359     - ipoib_delete_debug_files(priv->dev);
1360     unregister_netdevice(priv->dev);
1361    
1362     register_failed:
1363     diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
1364     index bf2b2676cb8a..80e3df1f1f7d 100644
1365     --- a/drivers/md/dm-era-target.c
1366     +++ b/drivers/md/dm-era-target.c
1367     @@ -961,15 +961,15 @@ static int metadata_commit(struct era_metadata *md)
1368     }
1369     }
1370    
1371     - r = save_sm_root(md);
1372     + r = dm_tm_pre_commit(md->tm);
1373     if (r) {
1374     - DMERR("%s: save_sm_root failed", __func__);
1375     + DMERR("%s: pre commit failed", __func__);
1376     return r;
1377     }
1378    
1379     - r = dm_tm_pre_commit(md->tm);
1380     + r = save_sm_root(md);
1381     if (r) {
1382     - DMERR("%s: pre commit failed", __func__);
1383     + DMERR("%s: save_sm_root failed", __func__);
1384     return r;
1385     }
1386    
1387     diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
1388     index 2c965424d383..ba7c4c685db3 100644
1389     --- a/drivers/md/dm-rq.c
1390     +++ b/drivers/md/dm-rq.c
1391     @@ -997,10 +997,14 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
1392     dm_init_md_queue(md);
1393    
1394     /* backfill 'mq' sysfs registration normally done in blk_register_queue */
1395     - blk_mq_register_dev(disk_to_dev(md->disk), q);
1396     + err = blk_mq_register_dev(disk_to_dev(md->disk), q);
1397     + if (err)
1398     + goto out_cleanup_queue;
1399    
1400     return 0;
1401    
1402     +out_cleanup_queue:
1403     + blk_cleanup_queue(q);
1404     out_tag_set:
1405     blk_mq_free_tag_set(md->tag_set);
1406     out_kfree_tag_set:
1407     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1408     index d1c05c12a9db..be869a990e38 100644
1409     --- a/drivers/md/dm-thin.c
1410     +++ b/drivers/md/dm-thin.c
1411     @@ -1070,6 +1070,7 @@ static void passdown_endio(struct bio *bio)
1412     * to unmap (we ignore err).
1413     */
1414     queue_passdown_pt2(bio->bi_private);
1415     + bio_put(bio);
1416     }
1417    
1418     static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1419     diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
1420     index 6c033c9a2f06..78cb3e2359bd 100644
1421     --- a/drivers/nvdimm/pfn_devs.c
1422     +++ b/drivers/nvdimm/pfn_devs.c
1423     @@ -538,7 +538,8 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
1424     nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
1425     altmap = NULL;
1426     } else if (nd_pfn->mode == PFN_MODE_PMEM) {
1427     - nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
1428     + nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
1429     + - offset) / PAGE_SIZE);
1430     if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
1431     dev_info(&nd_pfn->dev,
1432     "number of pfns truncated from %lld to %ld\n",
1433     @@ -625,7 +626,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
1434     */
1435     start += start_pad;
1436     size = resource_size(&nsio->res);
1437     - npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
1438     + npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
1439     + / PAGE_SIZE);
1440     if (nd_pfn->mode == PFN_MODE_PMEM) {
1441     /*
1442     * vmemmap_populate_hugepages() allocates the memmap array in
1443     diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
1444     index 24618431a14b..b4808590870c 100644
1445     --- a/drivers/nvdimm/pmem.c
1446     +++ b/drivers/nvdimm/pmem.c
1447     @@ -383,12 +383,12 @@ static void nd_pmem_shutdown(struct device *dev)
1448    
1449     static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
1450     {
1451     - struct pmem_device *pmem = dev_get_drvdata(dev);
1452     - struct nd_region *nd_region = to_region(pmem);
1453     + struct nd_region *nd_region;
1454     resource_size_t offset = 0, end_trunc = 0;
1455     struct nd_namespace_common *ndns;
1456     struct nd_namespace_io *nsio;
1457     struct resource res;
1458     + struct badblocks *bb;
1459    
1460     if (event != NVDIMM_REVALIDATE_POISON)
1461     return;
1462     @@ -397,20 +397,33 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
1463     struct nd_btt *nd_btt = to_nd_btt(dev);
1464    
1465     ndns = nd_btt->ndns;
1466     - } else if (is_nd_pfn(dev)) {
1467     - struct nd_pfn *nd_pfn = to_nd_pfn(dev);
1468     - struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
1469     + nd_region = to_nd_region(ndns->dev.parent);
1470     + nsio = to_nd_namespace_io(&ndns->dev);
1471     + bb = &nsio->bb;
1472     + } else {
1473     + struct pmem_device *pmem = dev_get_drvdata(dev);
1474    
1475     - ndns = nd_pfn->ndns;
1476     - offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
1477     - end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
1478     - } else
1479     - ndns = to_ndns(dev);
1480     + nd_region = to_region(pmem);
1481     + bb = &pmem->bb;
1482     +
1483     + if (is_nd_pfn(dev)) {
1484     + struct nd_pfn *nd_pfn = to_nd_pfn(dev);
1485     + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
1486     +
1487     + ndns = nd_pfn->ndns;
1488     + offset = pmem->data_offset +
1489     + __le32_to_cpu(pfn_sb->start_pad);
1490     + end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
1491     + } else {
1492     + ndns = to_ndns(dev);
1493     + }
1494     +
1495     + nsio = to_nd_namespace_io(&ndns->dev);
1496     + }
1497    
1498     - nsio = to_nd_namespace_io(&ndns->dev);
1499     res.start = nsio->res.start + offset;
1500     res.end = nsio->res.end - end_trunc;
1501     - nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
1502     + nvdimm_badblocks_populate(nd_region, bb, &res);
1503     }
1504    
1505     MODULE_ALIAS("pmem");
1506     diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
1507     index 9cf6f1a88fce..f62306246ca4 100644
1508     --- a/drivers/nvdimm/region_devs.c
1509     +++ b/drivers/nvdimm/region_devs.c
1510     @@ -968,17 +968,20 @@ EXPORT_SYMBOL_GPL(nvdimm_flush);
1511     */
1512     int nvdimm_has_flush(struct nd_region *nd_region)
1513     {
1514     - struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1515     int i;
1516    
1517     /* no nvdimm == flushing capability unknown */
1518     if (nd_region->ndr_mappings == 0)
1519     return -ENXIO;
1520    
1521     - for (i = 0; i < nd_region->ndr_mappings; i++)
1522     - /* flush hints present, flushing required */
1523     - if (ndrd_get_flush_wpq(ndrd, i, 0))
1524     + for (i = 0; i < nd_region->ndr_mappings; i++) {
1525     + struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1526     + struct nvdimm *nvdimm = nd_mapping->nvdimm;
1527     +
1528     + /* flush hints present / available */
1529     + if (nvdimm->num_flush)
1530     return 1;
1531     + }
1532    
1533     /*
1534     * The platform defines dimm devices without hints, assume
1535     diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
1536     index 70390de66e0e..eb0a095efe9c 100644
1537     --- a/drivers/staging/comedi/drivers/jr3_pci.c
1538     +++ b/drivers/staging/comedi/drivers/jr3_pci.c
1539     @@ -611,7 +611,7 @@ static void jr3_pci_poll_dev(unsigned long data)
1540     s = &dev->subdevices[i];
1541     spriv = s->private;
1542    
1543     - if (now > spriv->next_time_min) {
1544     + if (time_after_eq(now, spriv->next_time_min)) {
1545     struct jr3_pci_poll_delay sub_delay;
1546    
1547     sub_delay = jr3_pci_poll_subdevice(s);
1548     @@ -727,11 +727,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
1549     s->insn_read = jr3_pci_ai_insn_read;
1550    
1551     spriv = jr3_pci_alloc_spriv(dev, s);
1552     - if (spriv) {
1553     - /* Channel specific range and maxdata */
1554     - s->range_table_list = spriv->range_table_list;
1555     - s->maxdata_list = spriv->maxdata_list;
1556     - }
1557     + if (!spriv)
1558     + return -ENOMEM;
1559     +
1560     + /* Channel specific range and maxdata */
1561     + s->range_table_list = spriv->range_table_list;
1562     + s->maxdata_list = spriv->maxdata_list;
1563     }
1564    
1565     /* Reset DSP card */
1566     diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
1567     index 400969170d1c..f03e43b1b5f6 100644
1568     --- a/drivers/staging/gdm724x/gdm_mux.c
1569     +++ b/drivers/staging/gdm724x/gdm_mux.c
1570     @@ -664,9 +664,8 @@ static int __init gdm_usb_mux_init(void)
1571    
1572     static void __exit gdm_usb_mux_exit(void)
1573     {
1574     - unregister_lte_tty_driver();
1575     -
1576     usb_deregister(&gdm_mux_driver);
1577     + unregister_lte_tty_driver();
1578     }
1579    
1580     module_init(gdm_usb_mux_init);
1581     diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
1582     index e9b6b21f7422..f759aa8a342d 100644
1583     --- a/drivers/staging/vt6656/usbpipe.c
1584     +++ b/drivers/staging/vt6656/usbpipe.c
1585     @@ -47,15 +47,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
1586     u16 index, u16 length, u8 *buffer)
1587     {
1588     int status = 0;
1589     + u8 *usb_buffer;
1590    
1591     if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
1592     return STATUS_FAILURE;
1593    
1594     mutex_lock(&priv->usb_lock);
1595    
1596     + usb_buffer = kmemdup(buffer, length, GFP_KERNEL);
1597     + if (!usb_buffer) {
1598     + mutex_unlock(&priv->usb_lock);
1599     + return -ENOMEM;
1600     + }
1601     +
1602     status = usb_control_msg(priv->usb,
1603     - usb_sndctrlpipe(priv->usb, 0), request, 0x40, value,
1604     - index, buffer, length, USB_CTL_WAIT);
1605     + usb_sndctrlpipe(priv->usb, 0),
1606     + request, 0x40, value,
1607     + index, usb_buffer, length, USB_CTL_WAIT);
1608     +
1609     + kfree(usb_buffer);
1610    
1611     mutex_unlock(&priv->usb_lock);
1612    
1613     @@ -75,15 +85,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
1614     u16 index, u16 length, u8 *buffer)
1615     {
1616     int status;
1617     + u8 *usb_buffer;
1618    
1619     if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
1620     return STATUS_FAILURE;
1621    
1622     mutex_lock(&priv->usb_lock);
1623    
1624     + usb_buffer = kmalloc(length, GFP_KERNEL);
1625     + if (!usb_buffer) {
1626     + mutex_unlock(&priv->usb_lock);
1627     + return -ENOMEM;
1628     + }
1629     +
1630     status = usb_control_msg(priv->usb,
1631     - usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value,
1632     - index, buffer, length, USB_CTL_WAIT);
1633     + usb_rcvctrlpipe(priv->usb, 0),
1634     + request, 0xc0, value,
1635     + index, usb_buffer, length, USB_CTL_WAIT);
1636     +
1637     + if (status == length)
1638     + memcpy(buffer, usb_buffer, length);
1639     +
1640     + kfree(usb_buffer);
1641    
1642     mutex_unlock(&priv->usb_lock);
1643    
1644     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
1645     index b7d747e92c7a..40e50f2d209d 100644
1646     --- a/drivers/target/iscsi/iscsi_target.c
1647     +++ b/drivers/target/iscsi/iscsi_target.c
1648     @@ -4671,6 +4671,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
1649     continue;
1650     }
1651     atomic_set(&sess->session_reinstatement, 1);
1652     + atomic_set(&sess->session_fall_back_to_erl0, 1);
1653     spin_unlock(&sess->conn_lock);
1654    
1655     list_move_tail(&se_sess->sess_list, &free_list);
1656     diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
1657     index e980e2d0c2db..7e70fe849f0d 100644
1658     --- a/drivers/target/iscsi/iscsi_target_configfs.c
1659     +++ b/drivers/target/iscsi/iscsi_target_configfs.c
1660     @@ -1530,6 +1530,7 @@ static void lio_tpg_close_session(struct se_session *se_sess)
1661     return;
1662     }
1663     atomic_set(&sess->session_reinstatement, 1);
1664     + atomic_set(&sess->session_fall_back_to_erl0, 1);
1665     spin_unlock(&sess->conn_lock);
1666    
1667     iscsit_stop_time2retain_timer(sess);
1668     diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
1669     index 15f79a2ca34a..96c55bc10ac9 100644
1670     --- a/drivers/target/iscsi/iscsi_target_login.c
1671     +++ b/drivers/target/iscsi/iscsi_target_login.c
1672     @@ -204,6 +204,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
1673     initiatorname_param->value) &&
1674     (sess_p->sess_ops->SessionType == sessiontype))) {
1675     atomic_set(&sess_p->session_reinstatement, 1);
1676     + atomic_set(&sess_p->session_fall_back_to_erl0, 1);
1677     spin_unlock(&sess_p->conn_lock);
1678     iscsit_inc_session_usage_count(sess_p);
1679     iscsit_stop_time2retain_timer(sess_p);
1680     diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
1681     index d545993df18b..29f807b29e74 100644
1682     --- a/drivers/target/target_core_file.c
1683     +++ b/drivers/target/target_core_file.c
1684     @@ -594,8 +594,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
1685     if (ret < 0)
1686     return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1687    
1688     - if (ret)
1689     - target_complete_cmd(cmd, SAM_STAT_GOOD);
1690     + target_complete_cmd(cmd, SAM_STAT_GOOD);
1691     return 0;
1692     }
1693    
1694     diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
1695     index a53fb23a0411..b3b1461ec60d 100644
1696     --- a/drivers/target/target_core_sbc.c
1697     +++ b/drivers/target/target_core_sbc.c
1698     @@ -506,8 +506,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
1699     * been failed with a non-zero SCSI status.
1700     */
1701     if (cmd->scsi_status) {
1702     - pr_err("compare_and_write_callback: non zero scsi_status:"
1703     + pr_debug("compare_and_write_callback: non zero scsi_status:"
1704     " 0x%02x\n", cmd->scsi_status);
1705     + *post_ret = 1;
1706     + if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
1707     + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1708     goto out;
1709     }
1710    
1711     diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
1712     index a23fa5ed1d67..2b907385b4a8 100644
1713     --- a/drivers/tty/pty.c
1714     +++ b/drivers/tty/pty.c
1715     @@ -216,16 +216,11 @@ static int pty_signal(struct tty_struct *tty, int sig)
1716     static void pty_flush_buffer(struct tty_struct *tty)
1717     {
1718     struct tty_struct *to = tty->link;
1719     - struct tty_ldisc *ld;
1720    
1721     if (!to)
1722     return;
1723    
1724     - ld = tty_ldisc_ref(to);
1725     - tty_buffer_flush(to, ld);
1726     - if (ld)
1727     - tty_ldisc_deref(ld);
1728     -
1729     + tty_buffer_flush(to, NULL);
1730     if (to->packet) {
1731     spin_lock_irq(&tty->ctrl_lock);
1732     tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
1733     diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
1734     index a2a529994ba5..44e5b5bf713b 100644
1735     --- a/drivers/tty/serial/omap-serial.c
1736     +++ b/drivers/tty/serial/omap-serial.c
1737     @@ -1712,7 +1712,8 @@ static int serial_omap_probe(struct platform_device *pdev)
1738     return 0;
1739    
1740     err_add_port:
1741     - pm_runtime_put(&pdev->dev);
1742     + pm_runtime_dont_use_autosuspend(&pdev->dev);
1743     + pm_runtime_put_sync(&pdev->dev);
1744     pm_runtime_disable(&pdev->dev);
1745     pm_qos_remove_request(&up->pm_qos_request);
1746     device_init_wakeup(up->dev, false);
1747     @@ -1725,9 +1726,13 @@ static int serial_omap_remove(struct platform_device *dev)
1748     {
1749     struct uart_omap_port *up = platform_get_drvdata(dev);
1750    
1751     + pm_runtime_get_sync(up->dev);
1752     +
1753     + uart_remove_one_port(&serial_omap_reg, &up->port);
1754     +
1755     + pm_runtime_dont_use_autosuspend(up->dev);
1756     pm_runtime_put_sync(up->dev);
1757     pm_runtime_disable(up->dev);
1758     - uart_remove_one_port(&serial_omap_reg, &up->port);
1759     pm_qos_remove_request(&up->pm_qos_request);
1760     device_init_wakeup(&dev->dev, false);
1761    
1762     diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
1763     index 3e2ef4fd7382..d65f92bcd0f1 100644
1764     --- a/drivers/tty/serial/samsung.c
1765     +++ b/drivers/tty/serial/samsung.c
1766     @@ -906,14 +906,13 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
1767     return -ENOMEM;
1768     }
1769    
1770     - dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
1771     + dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
1772     dma->rx_size, DMA_FROM_DEVICE);
1773    
1774     spin_lock_irqsave(&p->port.lock, flags);
1775    
1776     /* TX buffer */
1777     - dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
1778     - p->port.state->xmit.buf,
1779     + dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
1780     UART_XMIT_SIZE, DMA_TO_DEVICE);
1781    
1782     spin_unlock_irqrestore(&p->port.lock, flags);
1783     @@ -927,7 +926,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
1784    
1785     if (dma->rx_chan) {
1786     dmaengine_terminate_all(dma->rx_chan);
1787     - dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
1788     + dma_unmap_single(p->port.dev, dma->rx_addr,
1789     dma->rx_size, DMA_FROM_DEVICE);
1790     kfree(dma->rx_buf);
1791     dma_release_channel(dma->rx_chan);
1792     @@ -936,7 +935,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
1793    
1794     if (dma->tx_chan) {
1795     dmaengine_terminate_all(dma->tx_chan);
1796     - dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
1797     + dma_unmap_single(p->port.dev, dma->tx_addr,
1798     UART_XMIT_SIZE, DMA_TO_DEVICE);
1799     dma_release_channel(dma->tx_chan);
1800     dma->tx_chan = NULL;
1801     diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
1802     index 0a6369510f2d..0b845e550fbd 100644
1803     --- a/drivers/usb/class/cdc-wdm.c
1804     +++ b/drivers/usb/class/cdc-wdm.c
1805     @@ -58,7 +58,6 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
1806     #define WDM_SUSPENDING 8
1807     #define WDM_RESETTING 9
1808     #define WDM_OVERFLOW 10
1809     -#define WDM_DRAIN_ON_OPEN 11
1810    
1811     #define WDM_MAX 16
1812    
1813     @@ -182,7 +181,7 @@ static void wdm_in_callback(struct urb *urb)
1814     "nonzero urb status received: -ESHUTDOWN\n");
1815     goto skip_error;
1816     case -EPIPE:
1817     - dev_dbg(&desc->intf->dev,
1818     + dev_err(&desc->intf->dev,
1819     "nonzero urb status received: -EPIPE\n");
1820     break;
1821     default:
1822     @@ -210,25 +209,6 @@ static void wdm_in_callback(struct urb *urb)
1823     desc->reslength = length;
1824     }
1825     }
1826     -
1827     - /*
1828     - * Handling devices with the WDM_DRAIN_ON_OPEN flag set:
1829     - * If desc->resp_count is unset, then the urb was submitted
1830     - * without a prior notification. If the device returned any
1831     - * data, then this implies that it had messages queued without
1832     - * notifying us. Continue reading until that queue is flushed.
1833     - */
1834     - if (!desc->resp_count) {
1835     - if (!length) {
1836     - /* do not propagate the expected -EPIPE */
1837     - desc->rerr = 0;
1838     - goto unlock;
1839     - }
1840     - dev_dbg(&desc->intf->dev, "got %d bytes without notification\n", length);
1841     - set_bit(WDM_RESPONDING, &desc->flags);
1842     - usb_submit_urb(desc->response, GFP_ATOMIC);
1843     - }
1844     -
1845     skip_error:
1846     set_bit(WDM_READ, &desc->flags);
1847     wake_up(&desc->wait);
1848     @@ -243,7 +223,6 @@ static void wdm_in_callback(struct urb *urb)
1849     service_outstanding_interrupt(desc);
1850     }
1851    
1852     -unlock:
1853     spin_unlock(&desc->iuspin);
1854     }
1855    
1856     @@ -686,17 +665,6 @@ static int wdm_open(struct inode *inode, struct file *file)
1857     dev_err(&desc->intf->dev,
1858     "Error submitting int urb - %d\n", rv);
1859     rv = usb_translate_errors(rv);
1860     - } else if (test_bit(WDM_DRAIN_ON_OPEN, &desc->flags)) {
1861     - /*
1862     - * Some devices keep pending messages queued
1863     - * without resending notifications. We must
1864     - * flush the message queue before we can
1865     - * assume a one-to-one relationship between
1866     - * notifications and messages in the queue
1867     - */
1868     - dev_dbg(&desc->intf->dev, "draining queued data\n");
1869     - set_bit(WDM_RESPONDING, &desc->flags);
1870     - rv = usb_submit_urb(desc->response, GFP_KERNEL);
1871     }
1872     } else {
1873     rv = 0;
1874     @@ -803,8 +771,7 @@ static void wdm_rxwork(struct work_struct *work)
1875     /* --- hotplug --- */
1876    
1877     static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
1878     - u16 bufsize, int (*manage_power)(struct usb_interface *, int),
1879     - bool drain_on_open)
1880     + u16 bufsize, int (*manage_power)(struct usb_interface *, int))
1881     {
1882     int rv = -ENOMEM;
1883     struct wdm_device *desc;
1884     @@ -891,68 +858,6 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
1885    
1886     desc->manage_power = manage_power;
1887    
1888     - /*
1889     - * "drain_on_open" enables a hack to work around a firmware
1890     - * issue observed on network functions, in particular MBIM
1891     - * functions.
1892     - *
1893     - * Quoting section 7 of the CDC-WMC r1.1 specification:
1894     - *
1895     - * "The firmware shall interpret GetEncapsulatedResponse as a
1896     - * request to read response bytes. The firmware shall send
1897     - * the next wLength bytes from the response. The firmware
1898     - * shall allow the host to retrieve data using any number of
1899     - * GetEncapsulatedResponse requests. The firmware shall
1900     - * return a zero- length reply if there are no data bytes
1901     - * available.
1902     - *
1903     - * The firmware shall send ResponseAvailable notifications
1904     - * periodically, using any appropriate algorithm, to inform
1905     - * the host that there is data available in the reply
1906     - * buffer. The firmware is allowed to send ResponseAvailable
1907     - * notifications even if there is no data available, but
1908     - * this will obviously reduce overall performance."
1909     - *
1910     - * These requirements, although they make equally sense, are
1911     - * often not implemented by network functions. Some firmwares
1912     - * will queue data indefinitely, without ever resending a
1913     - * notification. The result is that the driver and firmware
1914     - * loses "syncronization" if the driver ever fails to respond
1915     - * to a single notification, something which easily can happen
1916     - * on release(). When this happens, the driver will appear to
1917     - * never receive notifications for the most current data. Each
1918     - * notification will only cause a single read, which returns
1919     - * the oldest data in the firmware's queue.
1920     - *
1921     - * The "drain_on_open" hack resolves the situation by draining
1922     - * data from the firmware until none is returned, without a
1923     - * prior notification.
1924     - *
1925     - * This will inevitably race with the firmware, risking that
1926     - * we read data from the device before handling the associated
1927     - * notification. To make things worse, some of the devices
1928     - * needing the hack do not implement the "return zero if no
1929     - * data is available" requirement either. Instead they return
1930     - * an error on the subsequent read in this case. This means
1931     - * that "winning" the race can cause an unexpected EIO to
1932     - * userspace.
1933     - *
1934     - * "winning" the race is more likely on resume() than on
1935     - * open(), and the unexpected error is more harmful in the
1936     - * middle of an open session. The hack is therefore only
1937     - * applied on open(), and not on resume() where it logically
1938     - * would be equally necessary. So we define open() as the only
1939     - * driver <-> device "syncronization point". Should we happen
1940     - * to lose a notification after open(), then syncronization
1941     - * will be lost until release()
1942     - *
1943     - * The hack should not be enabled for CDC WDM devices
1944     - * conforming to the CDC-WMC r1.1 specification. This is
1945     - * ensured by setting drain_on_open to false in wdm_probe().
1946     - */
1947     - if (drain_on_open)
1948     - set_bit(WDM_DRAIN_ON_OPEN, &desc->flags);
1949     -
1950     spin_lock(&wdm_device_list_lock);
1951     list_add(&desc->device_list, &wdm_device_list);
1952     spin_unlock(&wdm_device_list_lock);
1953     @@ -1006,7 +911,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
1954     goto err;
1955     ep = &iface->endpoint[0].desc;
1956    
1957     - rv = wdm_create(intf, ep, maxcom, &wdm_manage_power, false);
1958     + rv = wdm_create(intf, ep, maxcom, &wdm_manage_power);
1959    
1960     err:
1961     return rv;
1962     @@ -1038,7 +943,7 @@ struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
1963     {
1964     int rv = -EINVAL;
1965    
1966     - rv = wdm_create(intf, ep, bufsize, manage_power, true);
1967     + rv = wdm_create(intf, ep, bufsize, manage_power);
1968     if (rv < 0)
1969     goto err;
1970    
1971     diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
1972     index dadd1e8dfe09..0bb380a9fcf7 100644
1973     --- a/drivers/usb/core/driver.c
1974     +++ b/drivers/usb/core/driver.c
1975     @@ -1328,6 +1328,24 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1976     */
1977     if (udev->parent && !PMSG_IS_AUTO(msg))
1978     status = 0;
1979     +
1980     + /*
1981     + * If the device is inaccessible, don't try to resume
1982     + * suspended interfaces and just return the error.
1983     + */
1984     + if (status && status != -EBUSY) {
1985     + int err;
1986     + u16 devstat;
1987     +
1988     + err = usb_get_status(udev, USB_RECIP_DEVICE, 0,
1989     + &devstat);
1990     + if (err) {
1991     + dev_err(&udev->dev,
1992     + "Failed to suspend device, error %d\n",
1993     + status);
1994     + goto done;
1995     + }
1996     + }
1997     }
1998    
1999     /* If the suspend failed, resume interfaces that did get suspended */
2000     @@ -1760,6 +1778,9 @@ static int autosuspend_check(struct usb_device *udev)
2001     int w, i;
2002     struct usb_interface *intf;
2003    
2004     + if (udev->state == USB_STATE_NOTATTACHED)
2005     + return -ENODEV;
2006     +
2007     /* Fail if autosuspend is disabled, or any interfaces are in use, or
2008     * any interface drivers require remote wakeup but it isn't available.
2009     */
2010     diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
2011     index 822ced9639aa..422ce7b20d73 100644
2012     --- a/drivers/usb/core/file.c
2013     +++ b/drivers/usb/core/file.c
2014     @@ -27,6 +27,7 @@
2015     #define MAX_USB_MINORS 256
2016     static const struct file_operations *usb_minors[MAX_USB_MINORS];
2017     static DECLARE_RWSEM(minor_rwsem);
2018     +static DEFINE_MUTEX(init_usb_class_mutex);
2019    
2020     static int usb_open(struct inode *inode, struct file *file)
2021     {
2022     @@ -109,8 +110,9 @@ static void release_usb_class(struct kref *kref)
2023    
2024     static void destroy_usb_class(void)
2025     {
2026     - if (usb_class)
2027     - kref_put(&usb_class->kref, release_usb_class);
2028     + mutex_lock(&init_usb_class_mutex);
2029     + kref_put(&usb_class->kref, release_usb_class);
2030     + mutex_unlock(&init_usb_class_mutex);
2031     }
2032    
2033     int usb_major_init(void)
2034     @@ -171,7 +173,10 @@ int usb_register_dev(struct usb_interface *intf,
2035     if (intf->minor >= 0)
2036     return -EADDRINUSE;
2037    
2038     + mutex_lock(&init_usb_class_mutex);
2039     retval = init_usb_class();
2040     + mutex_unlock(&init_usb_class_mutex);
2041     +
2042     if (retval)
2043     return retval;
2044    
2045     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2046     index 35fb2bef0e45..579900640faa 100644
2047     --- a/drivers/usb/core/hub.c
2048     +++ b/drivers/usb/core/hub.c
2049     @@ -1064,6 +1064,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2050    
2051     portstatus = portchange = 0;
2052     status = hub_port_status(hub, port1, &portstatus, &portchange);
2053     + if (status)
2054     + goto abort;
2055     +
2056     if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
2057     dev_dbg(&port_dev->dev, "status %04x change %04x\n",
2058     portstatus, portchange);
2059     @@ -1196,7 +1199,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2060    
2061     /* Scan all ports that need attention */
2062     kick_hub_wq(hub);
2063     -
2064     + abort:
2065     if (type == HUB_INIT2 || type == HUB_INIT3) {
2066     /* Allow autosuspend if it was suppressed */
2067     disconnected:
2068     @@ -2082,6 +2085,12 @@ void usb_disconnect(struct usb_device **pdev)
2069     dev_info(&udev->dev, "USB disconnect, device number %d\n",
2070     udev->devnum);
2071    
2072     + /*
2073     + * Ensure that the pm runtime code knows that the USB device
2074     + * is in the process of being disconnected.
2075     + */
2076     + pm_runtime_barrier(&udev->dev);
2077     +
2078     usb_lock_device(udev);
2079    
2080     hub_disconnect_children(udev);
2081     diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
2082     index 8ad203296079..f3ee80ece682 100644
2083     --- a/drivers/usb/gadget/Kconfig
2084     +++ b/drivers/usb/gadget/Kconfig
2085     @@ -460,6 +460,7 @@ config USB_CONFIGFS_F_TCM
2086     choice
2087     tristate "USB Gadget Drivers"
2088     default USB_ETH
2089     + optional
2090     help
2091     A Linux "Gadget Driver" talks to the USB Peripheral Controller
2092     driver through the abstract "gadget" API. Some other operating
2093     diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
2094     index 7064892ff4a6..709b52841c87 100644
2095     --- a/drivers/usb/host/xhci-mem.c
2096     +++ b/drivers/usb/host/xhci-mem.c
2097     @@ -1494,6 +1494,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
2098     */
2099     max_esit_payload = xhci_get_max_esit_payload(udev, ep);
2100     interval = xhci_get_endpoint_interval(udev, ep);
2101     +
2102     + /* Periodic endpoint bInterval limit quirk */
2103     + if (usb_endpoint_xfer_int(&ep->desc) ||
2104     + usb_endpoint_xfer_isoc(&ep->desc)) {
2105     + if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
2106     + udev->speed >= USB_SPEED_HIGH &&
2107     + interval >= 7) {
2108     + interval = 6;
2109     + }
2110     + }
2111     +
2112     mult = xhci_get_endpoint_mult(udev, ep);
2113     max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
2114     max_burst = xhci_get_endpoint_max_burst(udev, ep);
2115     @@ -2475,7 +2486,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2116     (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2117     xhci->cmd_ring->cycle_state;
2118     xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2119     - "// Setting command ring address to 0x%x", val);
2120     + "// Setting command ring address to 0x%016llx", val_64);
2121     xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2122     xhci_dbg_cmd_ptrs(xhci);
2123    
2124     diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2125     index 954abfd5014d..93f566cb968b 100644
2126     --- a/drivers/usb/host/xhci-pci.c
2127     +++ b/drivers/usb/host/xhci-pci.c
2128     @@ -199,6 +199,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2129     pdev->device == 0x1042)
2130     xhci->quirks |= XHCI_BROKEN_STREAMS;
2131    
2132     + if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
2133     + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
2134     +
2135     if (xhci->quirks & XHCI_RESET_ON_RESUME)
2136     xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2137     "QUIRK: Resetting on resume");
2138     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2139     index f97b009ffc40..328fe06752ec 100644
2140     --- a/drivers/usb/host/xhci.h
2141     +++ b/drivers/usb/host/xhci.h
2142     @@ -1659,6 +1659,7 @@ struct xhci_hcd {
2143     #define XHCI_MISSING_CAS (1 << 24)
2144     /* For controller with a broken Port Disable implementation */
2145     #define XHCI_BROKEN_PORT_PED (1 << 25)
2146     +#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
2147    
2148     unsigned int num_active_eps;
2149     unsigned int limit_active_eps;
2150     diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
2151     index 5c8210dc6fd9..d94927e5623b 100644
2152     --- a/drivers/usb/misc/usbtest.c
2153     +++ b/drivers/usb/misc/usbtest.c
2154     @@ -159,6 +159,7 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
2155     case USB_ENDPOINT_XFER_INT:
2156     if (dev->info->intr)
2157     goto try_intr;
2158     + continue;
2159     case USB_ENDPOINT_XFER_ISOC:
2160     if (dev->info->iso)
2161     goto try_iso;
2162     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2163     index d8d13eede6d9..1dc75db16cbd 100644
2164     --- a/drivers/usb/serial/ftdi_sio.c
2165     +++ b/drivers/usb/serial/ftdi_sio.c
2166     @@ -873,6 +873,7 @@ static const struct usb_device_id id_table_combined[] = {
2167     { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
2168     USB_CLASS_VENDOR_SPEC,
2169     USB_SUBCLASS_VENDOR_SPEC, 0x00) },
2170     + { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) },
2171     { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
2172     { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
2173     .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2174     diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2175     index 48ee04c94a75..71fb9e59db71 100644
2176     --- a/drivers/usb/serial/ftdi_sio_ids.h
2177     +++ b/drivers/usb/serial/ftdi_sio_ids.h
2178     @@ -873,6 +873,12 @@
2179     #define FIC_VID 0x1457
2180     #define FIC_NEO1973_DEBUG_PID 0x5118
2181    
2182     +/*
2183     + * Actel / Microsemi
2184     + */
2185     +#define ACTEL_VID 0x1514
2186     +#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID 0x2008
2187     +
2188     /* Olimex */
2189     #define OLIMEX_VID 0x15BA
2190     #define OLIMEX_ARM_USB_OCD_PID 0x0003
2191     diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
2192     index 2ba19424e4a1..1d48e62f4f52 100644
2193     --- a/drivers/vfio/vfio_iommu_type1.c
2194     +++ b/drivers/vfio/vfio_iommu_type1.c
2195     @@ -130,57 +130,36 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
2196     rb_erase(&old->node, &iommu->dma_list);
2197     }
2198    
2199     -struct vwork {
2200     - struct mm_struct *mm;
2201     - long npage;
2202     - struct work_struct work;
2203     -};
2204     -
2205     -/* delayed decrement/increment for locked_vm */
2206     -static void vfio_lock_acct_bg(struct work_struct *work)
2207     +static int vfio_lock_acct(long npage, bool *lock_cap)
2208     {
2209     - struct vwork *vwork = container_of(work, struct vwork, work);
2210     - struct mm_struct *mm;
2211     -
2212     - mm = vwork->mm;
2213     - down_write(&mm->mmap_sem);
2214     - mm->locked_vm += vwork->npage;
2215     - up_write(&mm->mmap_sem);
2216     - mmput(mm);
2217     - kfree(vwork);
2218     -}
2219     + int ret;
2220    
2221     -static void vfio_lock_acct(long npage)
2222     -{
2223     - struct vwork *vwork;
2224     - struct mm_struct *mm;
2225     + if (!npage)
2226     + return 0;
2227    
2228     - if (!current->mm || !npage)
2229     - return; /* process exited or nothing to do */
2230     + if (!current->mm)
2231     + return -ESRCH; /* process exited */
2232     +
2233     + ret = down_write_killable(&current->mm->mmap_sem);
2234     + if (!ret) {
2235     + if (npage > 0) {
2236     + if (lock_cap ? !*lock_cap : !capable(CAP_IPC_LOCK)) {
2237     + unsigned long limit;
2238     +
2239     + limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
2240     +
2241     + if (current->mm->locked_vm + npage > limit)
2242     + ret = -ENOMEM;
2243     + }
2244     + }
2245     +
2246     + if (!ret)
2247     + current->mm->locked_vm += npage;
2248    
2249     - if (down_write_trylock(&current->mm->mmap_sem)) {
2250     - current->mm->locked_vm += npage;
2251     up_write(&current->mm->mmap_sem);
2252     - return;
2253     }
2254    
2255     - /*
2256     - * Couldn't get mmap_sem lock, so must setup to update
2257     - * mm->locked_vm later. If locked_vm were atomic, we
2258     - * wouldn't need this silliness
2259     - */
2260     - vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
2261     - if (!vwork)
2262     - return;
2263     - mm = get_task_mm(current);
2264     - if (!mm) {
2265     - kfree(vwork);
2266     - return;
2267     - }
2268     - INIT_WORK(&vwork->work, vfio_lock_acct_bg);
2269     - vwork->mm = mm;
2270     - vwork->npage = npage;
2271     - schedule_work(&vwork->work);
2272     + return ret;
2273     }
2274    
2275     /*
2276     @@ -262,9 +241,9 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
2277     static long vfio_pin_pages(unsigned long vaddr, long npage,
2278     int prot, unsigned long *pfn_base)
2279     {
2280     - unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
2281     + unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
2282     bool lock_cap = capable(CAP_IPC_LOCK);
2283     - long ret, i;
2284     + long ret, i = 1;
2285     bool rsvd;
2286    
2287     if (!current->mm)
2288     @@ -283,16 +262,11 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
2289     return -ENOMEM;
2290     }
2291    
2292     - if (unlikely(disable_hugepages)) {
2293     - if (!rsvd)
2294     - vfio_lock_acct(1);
2295     - return 1;
2296     - }
2297     + if (unlikely(disable_hugepages))
2298     + goto out;
2299    
2300     /* Lock all the consecutive pages from pfn_base */
2301     - for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
2302     - unsigned long pfn = 0;
2303     -
2304     + for (vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
2305     ret = vaddr_get_pfn(vaddr, prot, &pfn);
2306     if (ret)
2307     break;
2308     @@ -308,12 +282,24 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
2309     put_pfn(pfn, prot);
2310     pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
2311     __func__, limit << PAGE_SHIFT);
2312     - break;
2313     + ret = -ENOMEM;
2314     + goto unpin_out;
2315     }
2316     }
2317    
2318     +out:
2319     if (!rsvd)
2320     - vfio_lock_acct(i);
2321     + ret = vfio_lock_acct(i, &lock_cap);
2322     +
2323     +unpin_out:
2324     + if (ret) {
2325     + if (!rsvd) {
2326     + for (pfn = *pfn_base ; i ; pfn++, i--)
2327     + put_pfn(pfn, prot);
2328     + }
2329     +
2330     + return ret;
2331     + }
2332    
2333     return i;
2334     }
2335     @@ -328,7 +314,7 @@ static long vfio_unpin_pages(unsigned long pfn, long npage,
2336     unlocked += put_pfn(pfn++, prot);
2337    
2338     if (do_accounting)
2339     - vfio_lock_acct(-unlocked);
2340     + vfio_lock_acct(-unlocked, NULL);
2341    
2342     return unlocked;
2343     }
2344     @@ -390,7 +376,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
2345     cond_resched();
2346     }
2347    
2348     - vfio_lock_acct(-unlocked);
2349     + vfio_lock_acct(-unlocked, NULL);
2350     }
2351    
2352     static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
2353     diff --git a/fs/block_dev.c b/fs/block_dev.c
2354     index 9ad527ff9974..2924bddb4a94 100644
2355     --- a/fs/block_dev.c
2356     +++ b/fs/block_dev.c
2357     @@ -102,12 +102,11 @@ void invalidate_bdev(struct block_device *bdev)
2358     {
2359     struct address_space *mapping = bdev->bd_inode->i_mapping;
2360    
2361     - if (mapping->nrpages == 0)
2362     - return;
2363     -
2364     - invalidate_bh_lrus();
2365     - lru_add_drain_all(); /* make sure all lru add caches are flushed */
2366     - invalidate_mapping_pages(mapping, 0, -1);
2367     + if (mapping->nrpages) {
2368     + invalidate_bh_lrus();
2369     + lru_add_drain_all(); /* make sure all lru add caches are flushed */
2370     + invalidate_mapping_pages(mapping, 0, -1);
2371     + }
2372     /* 99% of the time, we don't need to flush the cleancache on the bdev.
2373     * But, for the strange corners, lets be cautious
2374     */
2375     diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
2376     index febc28f9e2c2..75267cdd5dfd 100644
2377     --- a/fs/ceph/xattr.c
2378     +++ b/fs/ceph/xattr.c
2379     @@ -392,6 +392,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
2380    
2381     if (update_xattr) {
2382     int err = 0;
2383     +
2384     if (xattr && (flags & XATTR_CREATE))
2385     err = -EEXIST;
2386     else if (!xattr && (flags & XATTR_REPLACE))
2387     @@ -399,12 +400,14 @@ static int __set_xattr(struct ceph_inode_info *ci,
2388     if (err) {
2389     kfree(name);
2390     kfree(val);
2391     + kfree(*newxattr);
2392     return err;
2393     }
2394     if (update_xattr < 0) {
2395     if (xattr)
2396     __remove_xattr(ci, xattr);
2397     kfree(name);
2398     + kfree(*newxattr);
2399     return 0;
2400     }
2401     }
2402     diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
2403     index 02b071bf3732..a0b3e7d1be48 100644
2404     --- a/fs/cifs/cifs_unicode.c
2405     +++ b/fs/cifs/cifs_unicode.c
2406     @@ -83,6 +83,9 @@ convert_sfm_char(const __u16 src_char, char *target)
2407     case SFM_COLON:
2408     *target = ':';
2409     break;
2410     + case SFM_DOUBLEQUOTE:
2411     + *target = '"';
2412     + break;
2413     case SFM_ASTERISK:
2414     *target = '*';
2415     break;
2416     @@ -418,6 +421,9 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
2417     case ':':
2418     dest_char = cpu_to_le16(SFM_COLON);
2419     break;
2420     + case '"':
2421     + dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
2422     + break;
2423     case '*':
2424     dest_char = cpu_to_le16(SFM_ASTERISK);
2425     break;
2426     diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
2427     index 479bc0a941f3..07ade707fa60 100644
2428     --- a/fs/cifs/cifs_unicode.h
2429     +++ b/fs/cifs/cifs_unicode.h
2430     @@ -57,6 +57,7 @@
2431     * not conflict (although almost does) with the mapping above.
2432     */
2433    
2434     +#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
2435     #define SFM_ASTERISK ((__u16) 0xF021)
2436     #define SFM_QUESTION ((__u16) 0xF025)
2437     #define SFM_COLON ((__u16) 0xF022)
2438     @@ -64,8 +65,8 @@
2439     #define SFM_LESSTHAN ((__u16) 0xF023)
2440     #define SFM_PIPE ((__u16) 0xF027)
2441     #define SFM_SLASH ((__u16) 0xF026)
2442     -#define SFM_PERIOD ((__u16) 0xF028)
2443     -#define SFM_SPACE ((__u16) 0xF029)
2444     +#define SFM_SPACE ((__u16) 0xF028)
2445     +#define SFM_PERIOD ((__u16) 0xF029)
2446    
2447     /*
2448     * Mapping mechanism to use when one of the seven reserved characters is
2449     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2450     index 15261ba464c5..c0c253005b76 100644
2451     --- a/fs/cifs/cifsfs.c
2452     +++ b/fs/cifs/cifsfs.c
2453     @@ -87,6 +87,7 @@ extern mempool_t *cifs_req_poolp;
2454     extern mempool_t *cifs_mid_poolp;
2455    
2456     struct workqueue_struct *cifsiod_wq;
2457     +struct workqueue_struct *cifsoplockd_wq;
2458     __u32 cifs_lock_secret;
2459    
2460     /*
2461     @@ -1283,9 +1284,16 @@ init_cifs(void)
2462     goto out_clean_proc;
2463     }
2464    
2465     + cifsoplockd_wq = alloc_workqueue("cifsoplockd",
2466     + WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
2467     + if (!cifsoplockd_wq) {
2468     + rc = -ENOMEM;
2469     + goto out_destroy_cifsiod_wq;
2470     + }
2471     +
2472     rc = cifs_fscache_register();
2473     if (rc)
2474     - goto out_destroy_wq;
2475     + goto out_destroy_cifsoplockd_wq;
2476    
2477     rc = cifs_init_inodecache();
2478     if (rc)
2479     @@ -1333,7 +1341,9 @@ init_cifs(void)
2480     cifs_destroy_inodecache();
2481     out_unreg_fscache:
2482     cifs_fscache_unregister();
2483     -out_destroy_wq:
2484     +out_destroy_cifsoplockd_wq:
2485     + destroy_workqueue(cifsoplockd_wq);
2486     +out_destroy_cifsiod_wq:
2487     destroy_workqueue(cifsiod_wq);
2488     out_clean_proc:
2489     cifs_proc_clean();
2490     @@ -1356,6 +1366,7 @@ exit_cifs(void)
2491     cifs_destroy_mids();
2492     cifs_destroy_inodecache();
2493     cifs_fscache_unregister();
2494     + destroy_workqueue(cifsoplockd_wq);
2495     destroy_workqueue(cifsiod_wq);
2496     cifs_proc_clean();
2497     }
2498     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2499     index b3830f7ab260..48ef401c3c61 100644
2500     --- a/fs/cifs/cifsglob.h
2501     +++ b/fs/cifs/cifsglob.h
2502     @@ -1651,6 +1651,7 @@ void cifs_oplock_break(struct work_struct *work);
2503    
2504     extern const struct slow_work_ops cifs_oplock_break_ops;
2505     extern struct workqueue_struct *cifsiod_wq;
2506     +extern struct workqueue_struct *cifsoplockd_wq;
2507     extern __u32 cifs_lock_secret;
2508    
2509     extern mempool_t *cifs_mid_poolp;
2510     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2511     index 586fdac05ec2..1f91c9dadd5b 100644
2512     --- a/fs/cifs/cifssmb.c
2513     +++ b/fs/cifs/cifssmb.c
2514     @@ -717,6 +717,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
2515     if (rc)
2516     return rc;
2517    
2518     + if (server->capabilities & CAP_UNICODE)
2519     + smb->hdr.Flags2 |= SMBFLG2_UNICODE;
2520     +
2521     /* set up echo request */
2522     smb->hdr.Tid = 0xffff;
2523     smb->hdr.WordCount = 1;
2524     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2525     index b8015de88e8c..1a545695f547 100644
2526     --- a/fs/cifs/connect.c
2527     +++ b/fs/cifs/connect.c
2528     @@ -2839,16 +2839,14 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2529     {
2530     struct cifs_sb_info *old = CIFS_SB(sb);
2531     struct cifs_sb_info *new = mnt_data->cifs_sb;
2532     + bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
2533     + bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
2534    
2535     - if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
2536     - if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
2537     - return 0;
2538     - /* The prepath should be null terminated strings */
2539     - if (strcmp(new->prepath, old->prepath))
2540     - return 0;
2541     -
2542     + if (old_set && new_set && !strcmp(new->prepath, old->prepath))
2543     return 1;
2544     - }
2545     + else if (!old_set && !new_set)
2546     + return 1;
2547     +
2548     return 0;
2549     }
2550    
2551     diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
2552     index 001528781b6b..bdba9e7a9438 100644
2553     --- a/fs/cifs/ioctl.c
2554     +++ b/fs/cifs/ioctl.c
2555     @@ -264,10 +264,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
2556     rc = -EOPNOTSUPP;
2557     break;
2558     case CIFS_IOC_GET_MNT_INFO:
2559     + if (pSMBFile == NULL)
2560     + break;
2561     tcon = tlink_tcon(pSMBFile->tlink);
2562     rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
2563     break;
2564     case CIFS_ENUMERATE_SNAPSHOTS:
2565     + if (pSMBFile == NULL)
2566     + break;
2567     if (arg == 0) {
2568     rc = -EINVAL;
2569     goto cifs_ioc_exit;
2570     diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
2571     index c6729156f9a0..5419afea0a36 100644
2572     --- a/fs/cifs/misc.c
2573     +++ b/fs/cifs/misc.c
2574     @@ -492,7 +492,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
2575     CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2576     &pCifsInode->flags);
2577    
2578     - queue_work(cifsiod_wq,
2579     + queue_work(cifsoplockd_wq,
2580     &netfile->oplock_break);
2581     netfile->oplock_break_cancelled = false;
2582    
2583     diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
2584     index 97307808ae42..967dfe656ced 100644
2585     --- a/fs/cifs/smb2misc.c
2586     +++ b/fs/cifs/smb2misc.c
2587     @@ -494,7 +494,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
2588     else
2589     cfile->oplock_break_cancelled = true;
2590    
2591     - queue_work(cifsiod_wq, &cfile->oplock_break);
2592     + queue_work(cifsoplockd_wq, &cfile->oplock_break);
2593     kfree(lw);
2594     return true;
2595     }
2596     @@ -638,7 +638,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
2597     CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
2598     &cinode->flags);
2599     spin_unlock(&cfile->file_info_lock);
2600     - queue_work(cifsiod_wq, &cfile->oplock_break);
2601     + queue_work(cifsoplockd_wq,
2602     + &cfile->oplock_break);
2603    
2604     spin_unlock(&tcon->open_file_lock);
2605     spin_unlock(&cifs_tcp_ses_lock);
2606     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2607     index 007abf7195af..36334fe3266c 100644
2608     --- a/fs/cifs/smb2ops.c
2609     +++ b/fs/cifs/smb2ops.c
2610     @@ -924,6 +924,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2611     }
2612     if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
2613     rc = -ERANGE;
2614     + kfree(retbuf);
2615     return rc;
2616     }
2617    
2618     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2619     index 802185386851..7c1c6c39d582 100644
2620     --- a/fs/cifs/smb2pdu.c
2621     +++ b/fs/cifs/smb2pdu.c
2622     @@ -569,8 +569,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
2623     }
2624    
2625     if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
2626     - cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
2627     - return -EIO;
2628     + cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
2629     + rsplen);
2630     +
2631     + /* relax check since Mac returns max bufsize allowed on ioctl */
2632     + if (rsplen > CIFSMaxBufSize)
2633     + return -EIO;
2634     }
2635    
2636     /* check validate negotiate info response matches what we got earlier */
2637     @@ -1670,8 +1674,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
2638     * than one credit. Windows typically sets this smaller, but for some
2639     * ioctls it may be useful to allow server to send more. No point
2640     * limiting what the server can send as long as fits in one credit
2641     + * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
2642     + * (by default, note that it can be overridden to make max larger)
2643     + * in responses (except for read responses which can be bigger.
2644     + * We may want to bump this limit up
2645     */
2646     - req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
2647     + req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
2648    
2649     if (is_fsctl)
2650     req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
2651     diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
2652     index abc18847b98d..bb4e209bd809 100644
2653     --- a/fs/crypto/policy.c
2654     +++ b/fs/crypto/policy.c
2655     @@ -161,27 +161,61 @@ int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
2656     }
2657     EXPORT_SYMBOL(fscrypt_get_policy);
2658    
2659     +/**
2660     + * fscrypt_has_permitted_context() - is a file's encryption policy permitted
2661     + * within its directory?
2662     + *
2663     + * @parent: inode for parent directory
2664     + * @child: inode for file being looked up, opened, or linked into @parent
2665     + *
2666     + * Filesystems must call this before permitting access to an inode in a
2667     + * situation where the parent directory is encrypted (either before allowing
2668     + * ->lookup() to succeed, or for a regular file before allowing it to be opened)
2669     + * and before any operation that involves linking an inode into an encrypted
2670     + * directory, including link, rename, and cross rename. It enforces the
2671     + * constraint that within a given encrypted directory tree, all files use the
2672     + * same encryption policy. The pre-access check is needed to detect potentially
2673     + * malicious offline violations of this constraint, while the link and rename
2674     + * checks are needed to prevent online violations of this constraint.
2675     + *
2676     + * Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail
2677     + * the filesystem operation with EPERM.
2678     + */
2679     int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
2680     {
2681     - struct fscrypt_info *parent_ci, *child_ci;
2682     + const struct fscrypt_operations *cops = parent->i_sb->s_cop;
2683     + const struct fscrypt_info *parent_ci, *child_ci;
2684     + struct fscrypt_context parent_ctx, child_ctx;
2685     int res;
2686    
2687     - if ((parent == NULL) || (child == NULL)) {
2688     - printk(KERN_ERR "parent %p child %p\n", parent, child);
2689     - BUG_ON(1);
2690     - }
2691     -
2692     /* No restrictions on file types which are never encrypted */
2693     if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
2694     !S_ISLNK(child->i_mode))
2695     return 1;
2696    
2697     - /* no restrictions if the parent directory is not encrypted */
2698     - if (!parent->i_sb->s_cop->is_encrypted(parent))
2699     + /* No restrictions if the parent directory is unencrypted */
2700     + if (!cops->is_encrypted(parent))
2701     return 1;
2702     - /* if the child directory is not encrypted, this is always a problem */
2703     - if (!parent->i_sb->s_cop->is_encrypted(child))
2704     +
2705     + /* Encrypted directories must not contain unencrypted files */
2706     + if (!cops->is_encrypted(child))
2707     return 0;
2708     +
2709     + /*
2710     + * Both parent and child are encrypted, so verify they use the same
2711     + * encryption policy. Compare the fscrypt_info structs if the keys are
2712     + * available, otherwise retrieve and compare the fscrypt_contexts.
2713     + *
2714     + * Note that the fscrypt_context retrieval will be required frequently
2715     + * when accessing an encrypted directory tree without the key.
2716     + * Performance-wise this is not a big deal because we already don't
2717     + * really optimize for file access without the key (to the extent that
2718     + * such access is even possible), given that any attempted access
2719     + * already causes a fscrypt_context retrieval and keyring search.
2720     + *
2721     + * In any case, if an unexpected error occurs, fall back to "forbidden".
2722     + */
2723     +
2724     res = fscrypt_get_encryption_info(parent);
2725     if (res)
2726     return 0;
2727     @@ -190,17 +224,32 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
2728     return 0;
2729     parent_ci = parent->i_crypt_info;
2730     child_ci = child->i_crypt_info;
2731     - if (!parent_ci && !child_ci)
2732     - return 1;
2733     - if (!parent_ci || !child_ci)
2734     +
2735     + if (parent_ci && child_ci) {
2736     + return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key,
2737     + FS_KEY_DESCRIPTOR_SIZE) == 0 &&
2738     + (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
2739     + (parent_ci->ci_filename_mode ==
2740     + child_ci->ci_filename_mode) &&
2741     + (parent_ci->ci_flags == child_ci->ci_flags);
2742     + }
2743     +
2744     + res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx));
2745     + if (res != sizeof(parent_ctx))
2746     return 0;
2747    
2748     - return (memcmp(parent_ci->ci_master_key,
2749     - child_ci->ci_master_key,
2750     - FS_KEY_DESCRIPTOR_SIZE) == 0 &&
2751     - (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
2752     - (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
2753     - (parent_ci->ci_flags == child_ci->ci_flags));
2754     + res = cops->get_context(child, &child_ctx, sizeof(child_ctx));
2755     + if (res != sizeof(child_ctx))
2756     + return 0;
2757     +
2758     + return memcmp(parent_ctx.master_key_descriptor,
2759     + child_ctx.master_key_descriptor,
2760     + FS_KEY_DESCRIPTOR_SIZE) == 0 &&
2761     + (parent_ctx.contents_encryption_mode ==
2762     + child_ctx.contents_encryption_mode) &&
2763     + (parent_ctx.filenames_encryption_mode ==
2764     + child_ctx.filenames_encryption_mode) &&
2765     + (parent_ctx.flags == child_ctx.flags);
2766     }
2767     EXPORT_SYMBOL(fscrypt_has_permitted_context);
2768    
2769     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2770     index c78fce404654..01329688fb9e 100644
2771     --- a/fs/ext4/inode.c
2772     +++ b/fs/ext4/inode.c
2773     @@ -5686,6 +5686,11 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2774     file_update_time(vma->vm_file);
2775    
2776     down_read(&EXT4_I(inode)->i_mmap_sem);
2777     +
2778     + ret = ext4_convert_inline_data(inode);
2779     + if (ret)
2780     + goto out_ret;
2781     +
2782     /* Delalloc case is easy... */
2783     if (test_opt(inode->i_sb, DELALLOC) &&
2784     !ext4_should_journal_data(inode) &&
2785     diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
2786     index ef3b4eb54cf2..08ecdeebd6f7 100644
2787     --- a/fs/orangefs/inode.c
2788     +++ b/fs/orangefs/inode.c
2789     @@ -223,8 +223,7 @@ int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
2790     if (ret)
2791     goto out;
2792    
2793     - if ((iattr->ia_valid & ATTR_SIZE) &&
2794     - iattr->ia_size != i_size_read(inode)) {
2795     + if (iattr->ia_valid & ATTR_SIZE) {
2796     ret = orangefs_setattr_size(inode, iattr);
2797     if (ret)
2798     goto out;
2799     diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
2800     index a290ff6ec756..7c315938e9c2 100644
2801     --- a/fs/orangefs/namei.c
2802     +++ b/fs/orangefs/namei.c
2803     @@ -193,8 +193,6 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
2804     goto out;
2805     }
2806    
2807     - ORANGEFS_I(inode)->getattr_time = jiffies - 1;
2808     -
2809     gossip_debug(GOSSIP_NAME_DEBUG,
2810     "%s:%s:%d "
2811     "Found good inode [%lu] with count [%d]\n",
2812     diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
2813     index 74a81b1daaac..237c9c04dc3b 100644
2814     --- a/fs/orangefs/xattr.c
2815     +++ b/fs/orangefs/xattr.c
2816     @@ -76,11 +76,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name,
2817     if (S_ISLNK(inode->i_mode))
2818     return -EOPNOTSUPP;
2819    
2820     - if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
2821     - gossip_err("Invalid key length (%d)\n",
2822     - (int)strlen(name));
2823     + if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
2824     return -EINVAL;
2825     - }
2826    
2827     fsuid = from_kuid(&init_user_ns, current_fsuid());
2828     fsgid = from_kgid(&init_user_ns, current_fsgid());
2829     @@ -172,6 +169,9 @@ static int orangefs_inode_removexattr(struct inode *inode, const char *name,
2830     struct orangefs_kernel_op_s *new_op = NULL;
2831     int ret = -ENOMEM;
2832    
2833     + if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
2834     + return -EINVAL;
2835     +
2836     down_write(&orangefs_inode->xattr_sem);
2837     new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR);
2838     if (!new_op)
2839     @@ -231,23 +231,13 @@ int orangefs_inode_setxattr(struct inode *inode, const char *name,
2840     "%s: name %s, buffer_size %zd\n",
2841     __func__, name, size);
2842    
2843     - if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
2844     - flags < 0) {
2845     - gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
2846     - (int)size,
2847     - flags);
2848     + if (size > ORANGEFS_MAX_XATTR_VALUELEN)
2849     + return -EINVAL;
2850     + if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
2851     return -EINVAL;
2852     - }
2853    
2854     internal_flag = convert_to_internal_xattr_flags(flags);
2855    
2856     - if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
2857     - gossip_err
2858     - ("orangefs_inode_setxattr: bogus key size (%d)\n",
2859     - (int)(strlen(name)));
2860     - return -EINVAL;
2861     - }
2862     -
2863     /* This is equivalent to a removexattr */
2864     if (size == 0 && value == NULL) {
2865     gossip_debug(GOSSIP_XATTR_DEBUG,
2866     @@ -358,7 +348,7 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
2867    
2868     returned_count = new_op->downcall.resp.listxattr.returned_count;
2869     if (returned_count < 0 ||
2870     - returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) {
2871     + returned_count > ORANGEFS_MAX_XATTR_LISTLEN) {
2872     gossip_err("%s: impossible value for returned_count:%d:\n",
2873     __func__,
2874     returned_count);
2875     diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
2876     index 14984d902a99..43033a3d66d5 100644
2877     --- a/fs/pstore/platform.c
2878     +++ b/fs/pstore/platform.c
2879     @@ -704,6 +704,7 @@ int pstore_register(struct pstore_info *psi)
2880     if (psi->flags & PSTORE_FLAGS_PMSG)
2881     pstore_register_pmsg();
2882    
2883     + /* Start watching for new records, if desired. */
2884     if (pstore_update_ms >= 0) {
2885     pstore_timer.expires = jiffies +
2886     msecs_to_jiffies(pstore_update_ms);
2887     @@ -726,6 +727,11 @@ EXPORT_SYMBOL_GPL(pstore_register);
2888    
2889     void pstore_unregister(struct pstore_info *psi)
2890     {
2891     + /* Stop timer and make sure all work has finished. */
2892     + pstore_update_ms = -1;
2893     + del_timer_sync(&pstore_timer);
2894     + flush_work(&pstore_work);
2895     +
2896     if (psi->flags & PSTORE_FLAGS_PMSG)
2897     pstore_unregister_pmsg();
2898     if (psi->flags & PSTORE_FLAGS_FTRACE)
2899     @@ -825,7 +831,9 @@ static void pstore_timefunc(unsigned long dummy)
2900     schedule_work(&pstore_work);
2901     }
2902    
2903     - mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
2904     + if (pstore_update_ms >= 0)
2905     + mod_timer(&pstore_timer,
2906     + jiffies + msecs_to_jiffies(pstore_update_ms));
2907     }
2908    
2909     module_param(backend, charp, 0444);
2910     diff --git a/fs/xattr.c b/fs/xattr.c
2911     index 2d13b4e62fae..ed8c374570ed 100644
2912     --- a/fs/xattr.c
2913     +++ b/fs/xattr.c
2914     @@ -530,7 +530,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
2915     size = XATTR_SIZE_MAX;
2916     kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2917     if (!kvalue) {
2918     - kvalue = vmalloc(size);
2919     + kvalue = vzalloc(size);
2920     if (!kvalue)
2921     return -ENOMEM;
2922     }
2923     diff --git a/kernel/padata.c b/kernel/padata.c
2924     index b4a3c0ae649b..e4a8f8d9b31a 100644
2925     --- a/kernel/padata.c
2926     +++ b/kernel/padata.c
2927     @@ -358,7 +358,7 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
2928    
2929     cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
2930     if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
2931     - free_cpumask_var(pd->cpumask.cbcpu);
2932     + free_cpumask_var(pd->cpumask.pcpu);
2933     return -ENOMEM;
2934     }
2935    
2936     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2937     index e5b159b88e39..5b06fb385dd7 100644
2938     --- a/mm/page_alloc.c
2939     +++ b/mm/page_alloc.c
2940     @@ -3125,6 +3125,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2941     enum compact_priority prio, enum compact_result *compact_result)
2942     {
2943     struct page *page;
2944     + unsigned int noreclaim_flag = current->flags & PF_MEMALLOC;
2945    
2946     if (!order)
2947     return NULL;
2948     @@ -3132,7 +3133,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2949     current->flags |= PF_MEMALLOC;
2950     *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
2951     prio);
2952     - current->flags &= ~PF_MEMALLOC;
2953     + current->flags = (current->flags & ~PF_MEMALLOC) | noreclaim_flag;
2954    
2955     if (*compact_result <= COMPACT_INACTIVE)
2956     return NULL;
2957     diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
2958     index 48f9471e7c85..c88a6007e643 100644
2959     --- a/net/bluetooth/hci_sock.c
2960     +++ b/net/bluetooth/hci_sock.c
2961     @@ -1680,7 +1680,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
2962     if (msg->msg_flags & MSG_OOB)
2963     return -EOPNOTSUPP;
2964    
2965     - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
2966     + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
2967     + MSG_CMSG_COMPAT))
2968     return -EINVAL;
2969    
2970     if (len < 4 || len > HCI_MAX_FRAME_SIZE)
2971     diff --git a/net/core/datagram.c b/net/core/datagram.c
2972     index 963732e775df..58dfa23d12ca 100644
2973     --- a/net/core/datagram.c
2974     +++ b/net/core/datagram.c
2975     @@ -740,7 +740,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
2976    
2977     if (msg_data_left(msg) < chunk) {
2978     if (__skb_checksum_complete(skb))
2979     - goto csum_error;
2980     + return -EINVAL;
2981     if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
2982     goto fault;
2983     } else {
2984     @@ -748,15 +748,16 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
2985     if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
2986     chunk, &csum))
2987     goto fault;
2988     - if (csum_fold(csum))
2989     - goto csum_error;
2990     +
2991     + if (csum_fold(csum)) {
2992     + iov_iter_revert(&msg->msg_iter, chunk);
2993     + return -EINVAL;
2994     + }
2995     +
2996     if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
2997     netdev_rx_csum_fault(skb->dev);
2998     }
2999     return 0;
3000     -csum_error:
3001     - iov_iter_revert(&msg->msg_iter, chunk);
3002     - return -EINVAL;
3003     fault:
3004     return -EFAULT;
3005     }
3006     diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
3007     index c5a6e0b12452..78bd632f144d 100644
3008     --- a/tools/perf/util/auxtrace.c
3009     +++ b/tools/perf/util/auxtrace.c
3010     @@ -1826,7 +1826,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
3011     filt->addr = start;
3012     if (filt->range && !filt->size && !filt->sym_to) {
3013     filt->size = size;
3014     - no_size = !!size;
3015     + no_size = !size;
3016     }
3017     }
3018    
3019     @@ -1840,7 +1840,7 @@ static int addr_filter__resolve_kernel_syms(struct addr_filter *filt)
3020     if (err)
3021     return err;
3022     filt->size = start + size - filt->addr;
3023     - no_size = !!size;
3024     + no_size = !size;
3025     }
3026    
3027     /* The very last symbol in kallsyms does not imply a particular size */
3028     diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
3029     index 4af47079cf04..e717fed80219 100644
3030     --- a/tools/testing/selftests/x86/ldt_gdt.c
3031     +++ b/tools/testing/selftests/x86/ldt_gdt.c
3032     @@ -403,6 +403,51 @@ static void *threadproc(void *ctx)
3033     }
3034     }
3035    
3036     +#ifdef __i386__
3037     +
3038     +#ifndef SA_RESTORE
3039     +#define SA_RESTORER 0x04000000
3040     +#endif
3041     +
3042     +/*
3043     + * The UAPI header calls this 'struct sigaction', which conflicts with
3044     + * glibc. Sigh.
3045     + */
3046     +struct fake_ksigaction {
3047     + void *handler; /* the real type is nasty */
3048     + unsigned long sa_flags;
3049     + void (*sa_restorer)(void);
3050     + unsigned char sigset[8];
3051     +};
3052     +
3053     +static void fix_sa_restorer(int sig)
3054     +{
3055     + struct fake_ksigaction ksa;
3056     +
3057     + if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) {
3058     + /*
3059     + * glibc has a nasty bug: it sometimes writes garbage to
3060     + * sa_restorer. This interacts quite badly with anything
3061     + * that fiddles with SS because it can trigger legacy
3062     + * stack switching. Patch it up. See:
3063     + *
3064     + * https://sourceware.org/bugzilla/show_bug.cgi?id=21269
3065     + */
3066     + if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) {
3067     + ksa.sa_restorer = NULL;
3068     + if (syscall(SYS_rt_sigaction, sig, &ksa, NULL,
3069     + sizeof(ksa.sigset)) != 0)
3070     + err(1, "rt_sigaction");
3071     + }
3072     + }
3073     +}
3074     +#else
3075     +static void fix_sa_restorer(int sig)
3076     +{
3077     + /* 64-bit glibc works fine. */
3078     +}
3079     +#endif
3080     +
3081     static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
3082     int flags)
3083     {
3084     @@ -414,6 +459,7 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
3085     if (sigaction(sig, &sa, 0))
3086     err(1, "sigaction");
3087    
3088     + fix_sa_restorer(sig);
3089     }
3090    
3091     static jmp_buf jmpbuf;