Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.1/0118-4.1.19-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2775 - (hide annotations) (download)
Thu Apr 7 12:10:01 2016 UTC (8 years, 1 month ago) by niro
File size: 245375 byte(s)
-linux-4.1.19
1 niro 2775 diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
2     index 071fb18dc57c..07fad3d2fc56 100644
3     --- a/Documentation/networking/ip-sysctl.txt
4     +++ b/Documentation/networking/ip-sysctl.txt
5     @@ -1321,6 +1321,14 @@ accept_ra_from_local - BOOLEAN
6     disabled if accept_ra_from_local is disabled
7     on a specific interface.
8    
9     +accept_ra_min_hop_limit - INTEGER
10     + Minimum hop limit Information in Router Advertisement.
11     +
12     + Hop limit Information in Router Advertisement less than this
13     + variable shall be ignored.
14     +
15     + Default: 1
16     +
17     accept_ra_pinfo - BOOLEAN
18     Learn Prefix Information in Router Advertisement.
19    
20     diff --git a/Makefile b/Makefile
21     index 001375cfd815..06107f683bbe 100644
22     --- a/Makefile
23     +++ b/Makefile
24     @@ -1,6 +1,6 @@
25     VERSION = 4
26     PATCHLEVEL = 1
27     -SUBLEVEL = 18
28     +SUBLEVEL = 19
29     EXTRAVERSION =
30     NAME = Series 4800
31    
32     diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
33     index 2dc6da70ae59..d7ed252708c5 100644
34     --- a/arch/arm/common/icst.c
35     +++ b/arch/arm/common/icst.c
36     @@ -16,7 +16,7 @@
37     */
38     #include <linux/module.h>
39     #include <linux/kernel.h>
40     -
41     +#include <asm/div64.h>
42     #include <asm/hardware/icst.h>
43    
44     /*
45     @@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
46    
47     unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
48     {
49     - return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
50     + u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
51     + u32 divisor = (vco.r + 2) * p->s2div[vco.s];
52     +
53     + do_div(dividend, divisor);
54     + return (unsigned long)dividend;
55     }
56    
57     EXPORT_SYMBOL(icst_hz);
58     @@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
59    
60     if (f > p->vco_min && f <= p->vco_max)
61     break;
62     + i++;
63     } while (i < 8);
64    
65     if (i >= 8)
66     diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
67     index 6499d93ae68d..47bc45a67e9b 100644
68     --- a/arch/mips/include/asm/syscall.h
69     +++ b/arch/mips/include/asm/syscall.h
70     @@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
71     /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
72     if ((config_enabled(CONFIG_32BIT) ||
73     test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
74     - (regs->regs[2] == __NR_syscall)) {
75     + (regs->regs[2] == __NR_syscall))
76     i++;
77     - n++;
78     - }
79    
80     while (n--)
81     ret |= mips_get_syscall_arg(args++, task, regs, i++);
82     diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
83     index a52db28ecc1e..4457cb605356 100644
84     --- a/arch/powerpc/include/asm/eeh.h
85     +++ b/arch/powerpc/include/asm/eeh.h
86     @@ -79,6 +79,7 @@ struct pci_dn;
87     #define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
88     #define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
89     #define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
90     +#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
91    
92     struct eeh_pe {
93     int type; /* PE type: PHB/Bus/Device */
94     @@ -336,19 +337,13 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
95    
96     #define eeh_dev_check_failure(x) (0)
97    
98     -static inline void eeh_addr_cache_build(void) { }
99     -
100     -static inline void eeh_add_device_early(struct pci_dn *pdn) { }
101     -
102     -static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
103     -
104     -static inline void eeh_add_device_late(struct pci_dev *dev) { }
105     -
106     -static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
107     -
108     -static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
109     -
110     -static inline void eeh_remove_device(struct pci_dev *dev) { }
111     +#define eeh_addr_cache_build()
112     +#define eeh_add_device_early(pdn)
113     +#define eeh_add_device_tree_early(pdn)
114     +#define eeh_add_device_late(pdev)
115     +#define eeh_add_device_tree_late(pbus)
116     +#define eeh_add_sysfs_files(pbus)
117     +#define eeh_remove_device(pdev)
118    
119     #define EEH_POSSIBLE_ERROR(val, type) (0)
120     #define EEH_IO_ERROR_VALUE(size) (-1UL)
121     diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
122     index 24768ff3cb73..90cc67904dc6 100644
123     --- a/arch/powerpc/kernel/eeh_driver.c
124     +++ b/arch/powerpc/kernel/eeh_driver.c
125     @@ -561,6 +561,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
126     */
127     eeh_pe_state_mark(pe, EEH_PE_KEEP);
128     if (bus) {
129     + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
130     pci_lock_rescan_remove();
131     pcibios_remove_pci_devices(bus);
132     pci_unlock_rescan_remove();
133     @@ -792,6 +793,7 @@ perm_error:
134     * the their PCI config any more.
135     */
136     if (frozen_bus) {
137     + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
138     eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
139    
140     pci_lock_rescan_remove();
141     @@ -875,6 +877,7 @@ static void eeh_handle_special_event(void)
142     continue;
143    
144     /* Notify all devices to be down */
145     + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
146     bus = eeh_pe_bus_get(phb_pe);
147     eeh_pe_dev_traverse(pe,
148     eeh_report_failure, NULL);
149     diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
150     index 22f6d954ef89..c3e0420b8a42 100644
151     --- a/arch/powerpc/kernel/eeh_pe.c
152     +++ b/arch/powerpc/kernel/eeh_pe.c
153     @@ -906,7 +906,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
154     bus = pe->phb->bus;
155     } else if (pe->type & EEH_PE_BUS ||
156     pe->type & EEH_PE_DEVICE) {
157     - if (pe->bus) {
158     + if (pe->state & EEH_PE_PRI_BUS) {
159     bus = pe->bus;
160     goto out;
161     }
162     diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
163     index ce738ab3d5a9..abb396876b9a 100644
164     --- a/arch/powerpc/platforms/powernv/eeh-powernv.c
165     +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
166     @@ -455,9 +455,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
167     * PCI devices of the PE are expected to be removed prior
168     * to PE reset.
169     */
170     - if (!edev->pe->bus)
171     + if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
172     edev->pe->bus = pci_find_bus(hose->global_number,
173     pdn->busno);
174     + if (edev->pe->bus)
175     + edev->pe->state |= EEH_PE_PRI_BUS;
176     + }
177    
178     /*
179     * Enable EEH explicitly so that we will do EEH check
180     diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
181     index 181c53bac3a7..62855ac37ab7 100644
182     --- a/arch/x86/mm/fault.c
183     +++ b/arch/x86/mm/fault.c
184     @@ -285,6 +285,9 @@ static noinline int vmalloc_fault(unsigned long address)
185     if (!pmd_k)
186     return -1;
187    
188     + if (pmd_huge(*pmd_k))
189     + return 0;
190     +
191     pte_k = pte_offset_kernel(pmd_k, address);
192     if (!pte_present(*pte_k))
193     return -1;
194     @@ -356,8 +359,6 @@ void vmalloc_sync_all(void)
195     * 64-bit:
196     *
197     * Handle a fault on the vmalloc area
198     - *
199     - * This assumes no large pages in there.
200     */
201     static noinline int vmalloc_fault(unsigned long address)
202     {
203     @@ -399,17 +400,23 @@ static noinline int vmalloc_fault(unsigned long address)
204     if (pud_none(*pud_ref))
205     return -1;
206    
207     - if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
208     + if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
209     BUG();
210    
211     + if (pud_huge(*pud))
212     + return 0;
213     +
214     pmd = pmd_offset(pud, address);
215     pmd_ref = pmd_offset(pud_ref, address);
216     if (pmd_none(*pmd_ref))
217     return -1;
218    
219     - if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
220     + if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
221     BUG();
222    
223     + if (pmd_huge(*pmd))
224     + return 0;
225     +
226     pte_ref = pte_offset_kernel(pmd_ref, address);
227     if (!pte_present(*pte_ref))
228     return -1;
229     diff --git a/block/bio.c b/block/bio.c
230     index 4441522ca339..cbce3e2208f4 100644
231     --- a/block/bio.c
232     +++ b/block/bio.c
233     @@ -1122,9 +1122,12 @@ int bio_uncopy_user(struct bio *bio)
234     if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
235     /*
236     * if we're in a workqueue, the request is orphaned, so
237     - * don't copy into a random user address space, just free.
238     + * don't copy into a random user address space, just free
239     + * and return -EINTR so user space doesn't expect any data.
240     */
241     - if (current->mm && bio_data_dir(bio) == READ)
242     + if (!current->mm)
243     + ret = -EINTR;
244     + else if (bio_data_dir(bio) == READ)
245     ret = bio_copy_to_iter(bio, bmd->iter);
246     if (bmd->is_our_pages)
247     bio_free_pages(bio);
248     diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
249     index 5bc42f9b23f0..c0f03562a145 100644
250     --- a/crypto/algif_skcipher.c
251     +++ b/crypto/algif_skcipher.c
252     @@ -31,6 +31,11 @@ struct skcipher_sg_list {
253     struct scatterlist sg[0];
254     };
255    
256     +struct skcipher_tfm {
257     + struct crypto_ablkcipher *skcipher;
258     + bool has_key;
259     +};
260     +
261     struct skcipher_ctx {
262     struct list_head tsgl;
263     struct af_alg_sgl rsgl;
264     @@ -750,19 +755,139 @@ static struct proto_ops algif_skcipher_ops = {
265     .poll = skcipher_poll,
266     };
267    
268     +static int skcipher_check_key(struct socket *sock)
269     +{
270     + int err = 0;
271     + struct sock *psk;
272     + struct alg_sock *pask;
273     + struct skcipher_tfm *tfm;
274     + struct sock *sk = sock->sk;
275     + struct alg_sock *ask = alg_sk(sk);
276     +
277     + lock_sock(sk);
278     + if (ask->refcnt)
279     + goto unlock_child;
280     +
281     + psk = ask->parent;
282     + pask = alg_sk(ask->parent);
283     + tfm = pask->private;
284     +
285     + err = -ENOKEY;
286     + lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
287     + if (!tfm->has_key)
288     + goto unlock;
289     +
290     + if (!pask->refcnt++)
291     + sock_hold(psk);
292     +
293     + ask->refcnt = 1;
294     + sock_put(psk);
295     +
296     + err = 0;
297     +
298     +unlock:
299     + release_sock(psk);
300     +unlock_child:
301     + release_sock(sk);
302     +
303     + return err;
304     +}
305     +
306     +static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
307     + size_t size)
308     +{
309     + int err;
310     +
311     + err = skcipher_check_key(sock);
312     + if (err)
313     + return err;
314     +
315     + return skcipher_sendmsg(sock, msg, size);
316     +}
317     +
318     +static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
319     + int offset, size_t size, int flags)
320     +{
321     + int err;
322     +
323     + err = skcipher_check_key(sock);
324     + if (err)
325     + return err;
326     +
327     + return skcipher_sendpage(sock, page, offset, size, flags);
328     +}
329     +
330     +static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
331     + size_t ignored, int flags)
332     +{
333     + int err;
334     +
335     + err = skcipher_check_key(sock);
336     + if (err)
337     + return err;
338     +
339     + return skcipher_recvmsg(sock, msg, ignored, flags);
340     +}
341     +
342     +static struct proto_ops algif_skcipher_ops_nokey = {
343     + .family = PF_ALG,
344     +
345     + .connect = sock_no_connect,
346     + .socketpair = sock_no_socketpair,
347     + .getname = sock_no_getname,
348     + .ioctl = sock_no_ioctl,
349     + .listen = sock_no_listen,
350     + .shutdown = sock_no_shutdown,
351     + .getsockopt = sock_no_getsockopt,
352     + .mmap = sock_no_mmap,
353     + .bind = sock_no_bind,
354     + .accept = sock_no_accept,
355     + .setsockopt = sock_no_setsockopt,
356     +
357     + .release = af_alg_release,
358     + .sendmsg = skcipher_sendmsg_nokey,
359     + .sendpage = skcipher_sendpage_nokey,
360     + .recvmsg = skcipher_recvmsg_nokey,
361     + .poll = skcipher_poll,
362     +};
363     +
364     static void *skcipher_bind(const char *name, u32 type, u32 mask)
365     {
366     - return crypto_alloc_ablkcipher(name, type, mask);
367     + struct skcipher_tfm *tfm;
368     + struct crypto_ablkcipher *skcipher;
369     +
370     + tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
371     + if (!tfm)
372     + return ERR_PTR(-ENOMEM);
373     +
374     + skcipher = crypto_alloc_ablkcipher(name, type, mask);
375     + if (IS_ERR(skcipher)) {
376     + kfree(tfm);
377     + return ERR_CAST(skcipher);
378     + }
379     +
380     + tfm->skcipher = skcipher;
381     +
382     + return tfm;
383     }
384    
385     static void skcipher_release(void *private)
386     {
387     - crypto_free_ablkcipher(private);
388     + struct skcipher_tfm *tfm = private;
389     +
390     + crypto_free_ablkcipher(tfm->skcipher);
391     + kfree(tfm);
392     }
393    
394     static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
395     {
396     - return crypto_ablkcipher_setkey(private, key, keylen);
397     + struct skcipher_tfm *tfm = private;
398     + int err;
399     +
400     + err = crypto_ablkcipher_setkey(tfm->skcipher, key, keylen);
401     + tfm->has_key = !err;
402     +
403     + return err;
404     }
405    
406     static void skcipher_wait(struct sock *sk)
407     @@ -790,24 +915,26 @@ static void skcipher_sock_destruct(struct sock *sk)
408     af_alg_release_parent(sk);
409     }
410    
411     -static int skcipher_accept_parent(void *private, struct sock *sk)
412     +static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
413     {
414     struct skcipher_ctx *ctx;
415     struct alg_sock *ask = alg_sk(sk);
416     - unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
417     + struct skcipher_tfm *tfm = private;
418     + struct crypto_ablkcipher *skcipher = tfm->skcipher;
419     + unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(skcipher);
420    
421     ctx = sock_kmalloc(sk, len, GFP_KERNEL);
422     if (!ctx)
423     return -ENOMEM;
424    
425     - ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
426     + ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher),
427     GFP_KERNEL);
428     if (!ctx->iv) {
429     sock_kfree_s(sk, ctx, len);
430     return -ENOMEM;
431     }
432    
433     - memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
434     + memset(ctx->iv, 0, crypto_ablkcipher_ivsize(skcipher));
435    
436     INIT_LIST_HEAD(&ctx->tsgl);
437     ctx->len = len;
438     @@ -820,7 +947,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
439    
440     ask->private = ctx;
441    
442     - ablkcipher_request_set_tfm(&ctx->req, private);
443     + ablkcipher_request_set_tfm(&ctx->req, skcipher);
444     ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
445     af_alg_complete, &ctx->completion);
446    
447     @@ -829,12 +956,24 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
448     return 0;
449     }
450    
451     +static int skcipher_accept_parent(void *private, struct sock *sk)
452     +{
453     + struct skcipher_tfm *tfm = private;
454     +
455     + if (!tfm->has_key)
456     + return -ENOKEY;
457     +
458     + return skcipher_accept_parent_nokey(private, sk);
459     +}
460     +
461     static const struct af_alg_type algif_type_skcipher = {
462     .bind = skcipher_bind,
463     .release = skcipher_release,
464     .setkey = skcipher_setkey,
465     .accept = skcipher_accept_parent,
466     + .accept_nokey = skcipher_accept_parent_nokey,
467     .ops = &algif_skcipher_ops,
468     + .ops_nokey = &algif_skcipher_ops_nokey,
469     .name = "skcipher",
470     .owner = THIS_MODULE
471     };
472     diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
473     index edf2e3ea1740..6a050e12fcdf 100644
474     --- a/crypto/crypto_user.c
475     +++ b/crypto/crypto_user.c
476     @@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
477     if (link->dump == NULL)
478     return -EINVAL;
479    
480     + down_read(&crypto_alg_sem);
481     list_for_each_entry(alg, &crypto_alg_list, cra_list)
482     dump_alloc += CRYPTO_REPORT_MAXSIZE;
483    
484     @@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
485     .done = link->done,
486     .min_dump_alloc = dump_alloc,
487     };
488     - return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
489     + err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
490     }
491     + up_read(&crypto_alg_sem);
492     +
493     + return err;
494     }
495    
496     err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
497     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
498     index e6ea912aee31..666fd8a1500a 100644
499     --- a/drivers/ata/ahci.c
500     +++ b/drivers/ata/ahci.c
501     @@ -262,6 +262,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
502     { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
503     { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
504     { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
505     + { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
506     + { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
507     + { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
508     + { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
509     + { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
510     + { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
511     + { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
512     + { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
513     + { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
514     + { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
515     + { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
516     + { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
517     + { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
518     + { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
519     + { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
520     + { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
521     + { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
522     + { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
523     + { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
524     + { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
525     { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
526     { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
527     { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
528     diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
529     index 287c4ba0219f..49840264dd57 100644
530     --- a/drivers/ata/libahci.c
531     +++ b/drivers/ata/libahci.c
532     @@ -495,8 +495,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
533     }
534     }
535    
536     - /* fabricate port_map from cap.nr_ports */
537     - if (!port_map) {
538     + /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
539     + if (!port_map && vers < 0x10300) {
540     port_map = (1 << ahci_nr_ports(cap)) - 1;
541     dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
542    
543     diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
544     index cdf6215a9a22..7dbba387d12a 100644
545     --- a/drivers/ata/libata-sff.c
546     +++ b/drivers/ata/libata-sff.c
547     @@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
548     static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
549     {
550     struct ata_port *ap = qc->ap;
551     - unsigned long flags;
552    
553     if (ap->ops->error_handler) {
554     if (in_wq) {
555     - spin_lock_irqsave(ap->lock, flags);
556     -
557     /* EH might have kicked in while host lock is
558     * released.
559     */
560     @@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
561     } else
562     ata_port_freeze(ap);
563     }
564     -
565     - spin_unlock_irqrestore(ap->lock, flags);
566     } else {
567     if (likely(!(qc->err_mask & AC_ERR_HSM)))
568     ata_qc_complete(qc);
569     @@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
570     }
571     } else {
572     if (in_wq) {
573     - spin_lock_irqsave(ap->lock, flags);
574     ata_sff_irq_on(ap);
575     ata_qc_complete(qc);
576     - spin_unlock_irqrestore(ap->lock, flags);
577     } else
578     ata_qc_complete(qc);
579     }
580     @@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
581     {
582     struct ata_link *link = qc->dev->link;
583     struct ata_eh_info *ehi = &link->eh_info;
584     - unsigned long flags = 0;
585     int poll_next;
586    
587     + lockdep_assert_held(ap->lock);
588     +
589     WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
590    
591     /* Make sure ata_sff_qc_issue() does not throw things
592     @@ -1112,14 +1106,6 @@ fsm_start:
593     }
594     }
595    
596     - /* Send the CDB (atapi) or the first data block (ata pio out).
597     - * During the state transition, interrupt handler shouldn't
598     - * be invoked before the data transfer is complete and
599     - * hsm_task_state is changed. Hence, the following locking.
600     - */
601     - if (in_wq)
602     - spin_lock_irqsave(ap->lock, flags);
603     -
604     if (qc->tf.protocol == ATA_PROT_PIO) {
605     /* PIO data out protocol.
606     * send first data block.
607     @@ -1135,9 +1121,6 @@ fsm_start:
608     /* send CDB */
609     atapi_send_cdb(ap, qc);
610    
611     - if (in_wq)
612     - spin_unlock_irqrestore(ap->lock, flags);
613     -
614     /* if polling, ata_sff_pio_task() handles the rest.
615     * otherwise, interrupt handler takes over from here.
616     */
617     @@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
618     u8 status;
619     int poll_next;
620    
621     + spin_lock_irq(ap->lock);
622     +
623     BUG_ON(ap->sff_pio_task_link == NULL);
624     /* qc can be NULL if timeout occurred */
625     qc = ata_qc_from_tag(ap, link->active_tag);
626     if (!qc) {
627     ap->sff_pio_task_link = NULL;
628     - return;
629     + goto out_unlock;
630     }
631    
632     fsm_start:
633     @@ -1381,11 +1366,14 @@ fsm_start:
634     */
635     status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
636     if (status & ATA_BUSY) {
637     + spin_unlock_irq(ap->lock);
638     ata_msleep(ap, 2);
639     + spin_lock_irq(ap->lock);
640     +
641     status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
642     if (status & ATA_BUSY) {
643     ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
644     - return;
645     + goto out_unlock;
646     }
647     }
648    
649     @@ -1402,6 +1390,8 @@ fsm_start:
650     */
651     if (poll_next)
652     goto fsm_start;
653     +out_unlock:
654     + spin_unlock_irq(ap->lock);
655     }
656    
657     /**
658     diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
659     index 0f9a9dc06a83..fb16d812c8f5 100644
660     --- a/drivers/crypto/atmel-aes.c
661     +++ b/drivers/crypto/atmel-aes.c
662     @@ -260,7 +260,11 @@ static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
663    
664     static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
665     {
666     - clk_prepare_enable(dd->iclk);
667     + int err;
668     +
669     + err = clk_prepare_enable(dd->iclk);
670     + if (err)
671     + return err;
672    
673     if (!(dd->flags & AES_FLAGS_INIT)) {
674     atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
675     @@ -1320,7 +1324,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
676     struct crypto_platform_data *pdata;
677     struct device *dev = &pdev->dev;
678     struct resource *aes_res;
679     - unsigned long aes_phys_size;
680     int err;
681    
682     pdata = pdev->dev.platform_data;
683     @@ -1337,7 +1340,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
684     goto aes_dd_err;
685     }
686    
687     - aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
688     + aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
689     if (aes_dd == NULL) {
690     dev_err(dev, "unable to alloc data struct.\n");
691     err = -ENOMEM;
692     @@ -1368,36 +1371,35 @@ static int atmel_aes_probe(struct platform_device *pdev)
693     goto res_err;
694     }
695     aes_dd->phys_base = aes_res->start;
696     - aes_phys_size = resource_size(aes_res);
697    
698     /* Get the IRQ */
699     aes_dd->irq = platform_get_irq(pdev, 0);
700     if (aes_dd->irq < 0) {
701     dev_err(dev, "no IRQ resource info\n");
702     err = aes_dd->irq;
703     - goto aes_irq_err;
704     + goto res_err;
705     }
706    
707     - err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
708     - aes_dd);
709     + err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
710     + IRQF_SHARED, "atmel-aes", aes_dd);
711     if (err) {
712     dev_err(dev, "unable to request aes irq.\n");
713     - goto aes_irq_err;
714     + goto res_err;
715     }
716    
717     /* Initializing the clock */
718     - aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
719     + aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
720     if (IS_ERR(aes_dd->iclk)) {
721     dev_err(dev, "clock initialization failed.\n");
722     err = PTR_ERR(aes_dd->iclk);
723     - goto clk_err;
724     + goto res_err;
725     }
726    
727     - aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
728     + aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
729     if (!aes_dd->io_base) {
730     dev_err(dev, "can't ioremap\n");
731     err = -ENOMEM;
732     - goto aes_io_err;
733     + goto res_err;
734     }
735    
736     atmel_aes_hw_version_init(aes_dd);
737     @@ -1434,17 +1436,9 @@ err_algs:
738     err_aes_dma:
739     atmel_aes_buff_cleanup(aes_dd);
740     err_aes_buff:
741     - iounmap(aes_dd->io_base);
742     -aes_io_err:
743     - clk_put(aes_dd->iclk);
744     -clk_err:
745     - free_irq(aes_dd->irq, aes_dd);
746     -aes_irq_err:
747     res_err:
748     tasklet_kill(&aes_dd->done_task);
749     tasklet_kill(&aes_dd->queue_task);
750     - kfree(aes_dd);
751     - aes_dd = NULL;
752     aes_dd_err:
753     dev_err(dev, "initialization failed.\n");
754    
755     @@ -1469,16 +1463,6 @@ static int atmel_aes_remove(struct platform_device *pdev)
756    
757     atmel_aes_dma_cleanup(aes_dd);
758    
759     - iounmap(aes_dd->io_base);
760     -
761     - clk_put(aes_dd->iclk);
762     -
763     - if (aes_dd->irq > 0)
764     - free_irq(aes_dd->irq, aes_dd);
765     -
766     - kfree(aes_dd);
767     - aes_dd = NULL;
768     -
769     return 0;
770     }
771    
772     diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
773     index 5b35433c5399..a71c97c03c39 100644
774     --- a/drivers/crypto/atmel-sha.c
775     +++ b/drivers/crypto/atmel-sha.c
776     @@ -783,7 +783,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
777     dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
778     SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
779    
780     - clk_disable_unprepare(dd->iclk);
781     + clk_disable(dd->iclk);
782    
783     if (req->base.complete)
784     req->base.complete(&req->base, err);
785     @@ -794,7 +794,11 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
786    
787     static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
788     {
789     - clk_prepare_enable(dd->iclk);
790     + int err;
791     +
792     + err = clk_enable(dd->iclk);
793     + if (err)
794     + return err;
795    
796     if (!(SHA_FLAGS_INIT & dd->flags)) {
797     atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
798     @@ -819,7 +823,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
799     dev_info(dd->dev,
800     "version: 0x%x\n", dd->hw_version);
801    
802     - clk_disable_unprepare(dd->iclk);
803     + clk_disable(dd->iclk);
804     }
805    
806     static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
807     @@ -1345,11 +1349,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
808     struct crypto_platform_data *pdata;
809     struct device *dev = &pdev->dev;
810     struct resource *sha_res;
811     - unsigned long sha_phys_size;
812     int err;
813    
814     - sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev),
815     - GFP_KERNEL);
816     + sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
817     if (sha_dd == NULL) {
818     dev_err(dev, "unable to alloc data struct.\n");
819     err = -ENOMEM;
820     @@ -1378,7 +1380,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
821     goto res_err;
822     }
823     sha_dd->phys_base = sha_res->start;
824     - sha_phys_size = resource_size(sha_res);
825    
826     /* Get the IRQ */
827     sha_dd->irq = platform_get_irq(pdev, 0);
828     @@ -1388,28 +1389,32 @@ static int atmel_sha_probe(struct platform_device *pdev)
829     goto res_err;
830     }
831    
832     - err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha",
833     - sha_dd);
834     + err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
835     + IRQF_SHARED, "atmel-sha", sha_dd);
836     if (err) {
837     dev_err(dev, "unable to request sha irq.\n");
838     goto res_err;
839     }
840    
841     /* Initializing the clock */
842     - sha_dd->iclk = clk_get(&pdev->dev, "sha_clk");
843     + sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
844     if (IS_ERR(sha_dd->iclk)) {
845     dev_err(dev, "clock initialization failed.\n");
846     err = PTR_ERR(sha_dd->iclk);
847     - goto clk_err;
848     + goto res_err;
849     }
850    
851     - sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size);
852     + sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
853     if (!sha_dd->io_base) {
854     dev_err(dev, "can't ioremap\n");
855     err = -ENOMEM;
856     - goto sha_io_err;
857     + goto res_err;
858     }
859    
860     + err = clk_prepare(sha_dd->iclk);
861     + if (err)
862     + goto res_err;
863     +
864     atmel_sha_hw_version_init(sha_dd);
865    
866     atmel_sha_get_cap(sha_dd);
867     @@ -1421,12 +1426,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
868     if (IS_ERR(pdata)) {
869     dev_err(&pdev->dev, "platform data not available\n");
870     err = PTR_ERR(pdata);
871     - goto err_pdata;
872     + goto iclk_unprepare;
873     }
874     }
875     if (!pdata->dma_slave) {
876     err = -ENXIO;
877     - goto err_pdata;
878     + goto iclk_unprepare;
879     }
880     err = atmel_sha_dma_init(sha_dd, pdata);
881     if (err)
882     @@ -1457,12 +1462,8 @@ err_algs:
883     if (sha_dd->caps.has_dma)
884     atmel_sha_dma_cleanup(sha_dd);
885     err_sha_dma:
886     -err_pdata:
887     - iounmap(sha_dd->io_base);
888     -sha_io_err:
889     - clk_put(sha_dd->iclk);
890     -clk_err:
891     - free_irq(sha_dd->irq, sha_dd);
892     +iclk_unprepare:
893     + clk_unprepare(sha_dd->iclk);
894     res_err:
895     tasklet_kill(&sha_dd->done_task);
896     sha_dd_err:
897     @@ -1489,6 +1490,8 @@ static int atmel_sha_remove(struct platform_device *pdev)
898     if (sha_dd->caps.has_dma)
899     atmel_sha_dma_cleanup(sha_dd);
900    
901     + clk_unprepare(sha_dd->iclk);
902     +
903     iounmap(sha_dd->io_base);
904    
905     clk_put(sha_dd->iclk);
906     diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
907     index ca2999709eb4..2c7a628d0375 100644
908     --- a/drivers/crypto/atmel-tdes.c
909     +++ b/drivers/crypto/atmel-tdes.c
910     @@ -218,7 +218,11 @@ static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
911    
912     static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
913     {
914     - clk_prepare_enable(dd->iclk);
915     + int err;
916     +
917     + err = clk_prepare_enable(dd->iclk);
918     + if (err)
919     + return err;
920    
921     if (!(dd->flags & TDES_FLAGS_INIT)) {
922     atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
923     @@ -1355,7 +1359,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
924     struct crypto_platform_data *pdata;
925     struct device *dev = &pdev->dev;
926     struct resource *tdes_res;
927     - unsigned long tdes_phys_size;
928     int err;
929    
930     tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
931     @@ -1389,7 +1392,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
932     goto res_err;
933     }
934     tdes_dd->phys_base = tdes_res->start;
935     - tdes_phys_size = resource_size(tdes_res);
936    
937     /* Get the IRQ */
938     tdes_dd->irq = platform_get_irq(pdev, 0);
939     @@ -1399,26 +1401,26 @@ static int atmel_tdes_probe(struct platform_device *pdev)
940     goto res_err;
941     }
942    
943     - err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED,
944     - "atmel-tdes", tdes_dd);
945     + err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
946     + IRQF_SHARED, "atmel-tdes", tdes_dd);
947     if (err) {
948     dev_err(dev, "unable to request tdes irq.\n");
949     - goto tdes_irq_err;
950     + goto res_err;
951     }
952    
953     /* Initializing the clock */
954     - tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk");
955     + tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
956     if (IS_ERR(tdes_dd->iclk)) {
957     dev_err(dev, "clock initialization failed.\n");
958     err = PTR_ERR(tdes_dd->iclk);
959     - goto clk_err;
960     + goto res_err;
961     }
962    
963     - tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size);
964     + tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
965     if (!tdes_dd->io_base) {
966     dev_err(dev, "can't ioremap\n");
967     err = -ENOMEM;
968     - goto tdes_io_err;
969     + goto res_err;
970     }
971    
972     atmel_tdes_hw_version_init(tdes_dd);
973     @@ -1474,12 +1476,6 @@ err_tdes_dma:
974     err_pdata:
975     atmel_tdes_buff_cleanup(tdes_dd);
976     err_tdes_buff:
977     - iounmap(tdes_dd->io_base);
978     -tdes_io_err:
979     - clk_put(tdes_dd->iclk);
980     -clk_err:
981     - free_irq(tdes_dd->irq, tdes_dd);
982     -tdes_irq_err:
983     res_err:
984     tasklet_kill(&tdes_dd->done_task);
985     tasklet_kill(&tdes_dd->queue_task);
986     @@ -1510,13 +1506,6 @@ static int atmel_tdes_remove(struct platform_device *pdev)
987    
988     atmel_tdes_buff_cleanup(tdes_dd);
989    
990     - iounmap(tdes_dd->io_base);
991     -
992     - clk_put(tdes_dd->iclk);
993     -
994     - if (tdes_dd->irq >= 0)
995     - free_irq(tdes_dd->irq, tdes_dd);
996     -
997     return 0;
998     }
999    
1000     diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1001     index 303d937d63c7..ebffc744cb1b 100644
1002     --- a/drivers/dma/dw/core.c
1003     +++ b/drivers/dma/dw/core.c
1004     @@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
1005    
1006     /* Enable interrupts */
1007     channel_set_bit(dw, MASK.XFER, dwc->mask);
1008     - channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1009     channel_set_bit(dw, MASK.ERROR, dwc->mask);
1010    
1011     dwc->initialized = true;
1012     @@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
1013    
1014     spin_unlock_irqrestore(&dwc->lock, flags);
1015     }
1016     +
1017     + /* Re-enable interrupts */
1018     + channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1019     }
1020    
1021     /* ------------------------------------------------------------------------- */
1022     @@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
1023     dwc_scan_descriptors(dw, dwc);
1024     }
1025    
1026     - /*
1027     - * Re-enable interrupts.
1028     - */
1029     + /* Re-enable interrupts */
1030     channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
1031     - channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1032     channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
1033     }
1034    
1035     @@ -1256,6 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1036     int dw_dma_cyclic_start(struct dma_chan *chan)
1037     {
1038     struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1039     + struct dw_dma *dw = to_dw_dma(chan->device);
1040     unsigned long flags;
1041    
1042     if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1043     @@ -1264,7 +1264,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1044     }
1045    
1046     spin_lock_irqsave(&dwc->lock, flags);
1047     +
1048     + /* Enable interrupts to perform cyclic transfer */
1049     + channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1050     +
1051     dwc_dostart(dwc, dwc->cdesc->desc[0]);
1052     +
1053     spin_unlock_irqrestore(&dwc->lock, flags);
1054    
1055     return 0;
1056     diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
1057     index d2cd8d5b27a1..82f8e20cca74 100644
1058     --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
1059     +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
1060     @@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
1061     gpio = *data++;
1062    
1063     /* pull up/down */
1064     - action = *data++;
1065     + action = *data++ & 1;
1066     +
1067     + if (gpio >= ARRAY_SIZE(gtable)) {
1068     + DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
1069     + goto out;
1070     + }
1071    
1072     function = gtable[gpio].function_reg;
1073     pad = gtable[gpio].pad_reg;
1074     @@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
1075     vlv_gpio_nc_write(dev_priv, pad, val);
1076     mutex_unlock(&dev_priv->dpio_lock);
1077    
1078     +out:
1079     return data;
1080     }
1081    
1082     diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
1083     index 7354a4cda59d..3aefaa058f0c 100644
1084     --- a/drivers/gpu/drm/qxl/qxl_ioctl.c
1085     +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
1086     @@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
1087     cmd->command_size))
1088     return -EFAULT;
1089    
1090     - reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
1091     + reloc_info = kmalloc_array(cmd->relocs_num,
1092     + sizeof(struct qxl_reloc_info), GFP_KERNEL);
1093     if (!reloc_info)
1094     return -ENOMEM;
1095    
1096     diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
1097     index c507896aca45..197b157b73d0 100644
1098     --- a/drivers/gpu/drm/radeon/radeon_sa.c
1099     +++ b/drivers/gpu/drm/radeon/radeon_sa.c
1100     @@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
1101     /* see if we can skip over some allocations */
1102     } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
1103    
1104     + for (i = 0; i < RADEON_NUM_RINGS; ++i)
1105     + radeon_fence_ref(fences[i]);
1106     +
1107     spin_unlock(&sa_manager->wq.lock);
1108     r = radeon_fence_wait_any(rdev, fences, false);
1109     + for (i = 0; i < RADEON_NUM_RINGS; ++i)
1110     + radeon_fence_unref(&fences[i]);
1111     spin_lock(&sa_manager->wq.lock);
1112     /* if we have nothing to wait for block */
1113     if (r == -ENOENT) {
1114     diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
1115     index edafd3c2b170..f5c0590bbf73 100644
1116     --- a/drivers/gpu/drm/radeon/radeon_ttm.c
1117     +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
1118     @@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
1119     0, PAGE_SIZE,
1120     PCI_DMA_BIDIRECTIONAL);
1121     if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
1122     - while (--i) {
1123     + while (i--) {
1124     pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
1125     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1126     gtt->ttm.dma_address[i] = 0;
1127     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1128     index c32a934f7693..353e2ab090ee 100644
1129     --- a/drivers/infiniband/ulp/isert/ib_isert.c
1130     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1131     @@ -1349,7 +1349,7 @@ sequence_cmd:
1132     if (!rc && dump_payload == false && unsol_data)
1133     iscsit_set_unsoliticed_dataout(cmd);
1134     else if (dump_payload && imm_data)
1135     - target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1136     + target_put_sess_cmd(&cmd->se_cmd);
1137    
1138     return 0;
1139     }
1140     @@ -1774,7 +1774,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1141     cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1142     struct se_cmd *se_cmd = &cmd->se_cmd;
1143    
1144     - target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1145     + target_put_sess_cmd(se_cmd);
1146     }
1147     }
1148    
1149     @@ -1947,7 +1947,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1150     spin_unlock_bh(&cmd->istate_lock);
1151    
1152     if (ret) {
1153     - target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1154     + target_put_sess_cmd(se_cmd);
1155     transport_send_check_condition_and_sense(se_cmd,
1156     se_cmd->pi_err, 0);
1157     } else {
1158     diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
1159     index 9b84b4c0a000..6fbc7bc824d2 100644
1160     --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
1161     +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
1162     @@ -1334,7 +1334,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1163    
1164     BUG_ON(ch->sess == NULL);
1165    
1166     - target_put_sess_cmd(ch->sess, &ioctx->cmd);
1167     + target_put_sess_cmd(&ioctx->cmd);
1168     goto out;
1169     }
1170    
1171     @@ -1365,11 +1365,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1172     * not been received in time.
1173     */
1174     srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1175     - target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1176     + target_put_sess_cmd(&ioctx->cmd);
1177     break;
1178     case SRPT_STATE_MGMT_RSP_SENT:
1179     srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1180     - target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1181     + target_put_sess_cmd(&ioctx->cmd);
1182     break;
1183     default:
1184     WARN(1, "Unexpected command state (%d)", state);
1185     @@ -1679,7 +1679,7 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
1186     struct srpt_send_ioctx *ioctx = container_of(cmd,
1187     struct srpt_send_ioctx, cmd);
1188    
1189     - return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1190     + return target_put_sess_cmd(&ioctx->cmd);
1191     }
1192    
1193     /**
1194     @@ -3074,7 +3074,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
1195     ioctx->tag);
1196     srpt_unmap_sg_to_ib_sge(ch, ioctx);
1197     srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1198     - target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
1199     + target_put_sess_cmd(&ioctx->cmd);
1200     }
1201     }
1202    
1203     diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
1204     index e272f06258ce..a3f0f5a47490 100644
1205     --- a/drivers/input/mouse/vmmouse.c
1206     +++ b/drivers/input/mouse/vmmouse.c
1207     @@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
1208     priv->abs_dev = abs_dev;
1209     psmouse->private = priv;
1210    
1211     - input_set_capability(rel_dev, EV_REL, REL_WHEEL);
1212     -
1213     /* Set up and register absolute device */
1214     snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
1215     psmouse->ps2dev.serio->phys);
1216     @@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
1217     abs_dev->id.version = psmouse->model;
1218     abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
1219    
1220     - error = input_register_device(priv->abs_dev);
1221     - if (error)
1222     - goto init_fail;
1223     -
1224     /* Set absolute device capabilities */
1225     input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
1226     input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
1227     @@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
1228     input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
1229     input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
1230    
1231     + error = input_register_device(priv->abs_dev);
1232     + if (error)
1233     + goto init_fail;
1234     +
1235     + /* Add wheel capability to the relative device */
1236     + input_set_capability(rel_dev, EV_REL, REL_WHEEL);
1237     +
1238     psmouse->protocol_handler = vmmouse_process_byte;
1239     psmouse->disconnect = vmmouse_disconnect;
1240     psmouse->reconnect = vmmouse_reconnect;
1241     diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
1242     index 9847613085e1..5a2ec39e1fd9 100644
1243     --- a/drivers/iommu/dmar.c
1244     +++ b/drivers/iommu/dmar.c
1245     @@ -1342,7 +1342,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1246    
1247     raw_spin_lock_irqsave(&iommu->register_lock, flags);
1248    
1249     - sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1250     + sts = readl(iommu->reg + DMAR_GSTS_REG);
1251     if (!(sts & DMA_GSTS_QIES))
1252     goto end;
1253    
1254     diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
1255     index 5709ae9c3e77..04b39be8f1f3 100644
1256     --- a/drivers/iommu/intel_irq_remapping.c
1257     +++ b/drivers/iommu/intel_irq_remapping.c
1258     @@ -544,7 +544,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
1259    
1260     raw_spin_lock_irqsave(&iommu->register_lock, flags);
1261    
1262     - sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1263     + sts = readl(iommu->reg + DMAR_GSTS_REG);
1264     if (!(sts & DMA_GSTS_IRES))
1265     goto end;
1266    
1267     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1268     index 72ba774df7a7..bd744e31c434 100644
1269     --- a/drivers/net/bonding/bond_main.c
1270     +++ b/drivers/net/bonding/bond_main.c
1271     @@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
1272     static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
1273     struct rtnl_link_stats64 *stats);
1274     static void bond_slave_arr_handler(struct work_struct *work);
1275     +static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
1276     + int mod);
1277    
1278     /*---------------------------- General routines -----------------------------*/
1279    
1280     @@ -2397,7 +2399,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
1281     struct slave *slave)
1282     {
1283     struct arphdr *arp = (struct arphdr *)skb->data;
1284     - struct slave *curr_active_slave;
1285     + struct slave *curr_active_slave, *curr_arp_slave;
1286     unsigned char *arp_ptr;
1287     __be32 sip, tip;
1288     int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
1289     @@ -2444,26 +2446,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
1290     &sip, &tip);
1291    
1292     curr_active_slave = rcu_dereference(bond->curr_active_slave);
1293     + curr_arp_slave = rcu_dereference(bond->current_arp_slave);
1294    
1295     - /* Backup slaves won't see the ARP reply, but do come through
1296     - * here for each ARP probe (so we swap the sip/tip to validate
1297     - * the probe). In a "redundant switch, common router" type of
1298     - * configuration, the ARP probe will (hopefully) travel from
1299     - * the active, through one switch, the router, then the other
1300     - * switch before reaching the backup.
1301     + /* We 'trust' the received ARP enough to validate it if:
1302     + *
1303     + * (a) the slave receiving the ARP is active (which includes the
1304     + * current ARP slave, if any), or
1305     + *
1306     + * (b) the receiving slave isn't active, but there is a currently
1307     + * active slave and it received valid arp reply(s) after it became
1308     + * the currently active slave, or
1309     + *
1310     + * (c) there is an ARP slave that sent an ARP during the prior ARP
1311     + * interval, and we receive an ARP reply on any slave. We accept
1312     + * these because switch FDB update delays may deliver the ARP
1313     + * reply to a slave other than the sender of the ARP request.
1314     *
1315     - * We 'trust' the arp requests if there is an active slave and
1316     - * it received valid arp reply(s) after it became active. This
1317     - * is done to avoid endless looping when we can't reach the
1318     + * Note: for (b), backup slaves are receiving the broadcast ARP
1319     + * request, not a reply. This request passes from the sending
1320     + * slave through the L2 switch(es) to the receiving slave. Since
1321     + * this is checking the request, sip/tip are swapped for
1322     + * validation.
1323     + *
1324     + * This is done to avoid endless looping when we can't reach the
1325     * arp_ip_target and fool ourselves with our own arp requests.
1326     */
1327     -
1328     if (bond_is_active_slave(slave))
1329     bond_validate_arp(bond, slave, sip, tip);
1330     else if (curr_active_slave &&
1331     time_after(slave_last_rx(bond, curr_active_slave),
1332     curr_active_slave->last_link_up))
1333     bond_validate_arp(bond, slave, tip, sip);
1334     + else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
1335     + bond_time_in_interval(bond,
1336     + dev_trans_start(curr_arp_slave->dev), 1))
1337     + bond_validate_arp(bond, slave, sip, tip);
1338    
1339     out_unlock:
1340     if (arp != (struct arphdr *)skb->data)
1341     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1342     index 0d8af5bb5907..d5415205779f 100644
1343     --- a/drivers/net/ethernet/broadcom/tg3.c
1344     +++ b/drivers/net/ethernet/broadcom/tg3.c
1345     @@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
1346     return ret;
1347     }
1348    
1349     +static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
1350     +{
1351     + /* Check if we will never have enough descriptors,
1352     + * as gso_segs can be more than current ring size
1353     + */
1354     + return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
1355     +}
1356     +
1357     static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
1358    
1359     /* Use GSO to workaround all TSO packets that meet HW bug conditions
1360     @@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1361     * vlan encapsulated.
1362     */
1363     if (skb->protocol == htons(ETH_P_8021Q) ||
1364     - skb->protocol == htons(ETH_P_8021AD))
1365     - return tg3_tso_bug(tp, tnapi, txq, skb);
1366     + skb->protocol == htons(ETH_P_8021AD)) {
1367     + if (tg3_tso_bug_gso_check(tnapi, skb))
1368     + return tg3_tso_bug(tp, tnapi, txq, skb);
1369     + goto drop;
1370     + }
1371    
1372     if (!skb_is_gso_v6(skb)) {
1373     if (unlikely((ETH_HLEN + hdr_len) > 80) &&
1374     - tg3_flag(tp, TSO_BUG))
1375     - return tg3_tso_bug(tp, tnapi, txq, skb);
1376     -
1377     + tg3_flag(tp, TSO_BUG)) {
1378     + if (tg3_tso_bug_gso_check(tnapi, skb))
1379     + return tg3_tso_bug(tp, tnapi, txq, skb);
1380     + goto drop;
1381     + }
1382     ip_csum = iph->check;
1383     ip_tot_len = iph->tot_len;
1384     iph->check = 0;
1385     @@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1386     if (would_hit_hwbug) {
1387     tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
1388    
1389     - if (mss) {
1390     + if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
1391     /* If it's a TSO packet, do GSO instead of
1392     * allocating and copying to a large linear SKB
1393     */
1394     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
1395     index 8a083d73efdb..dae2ebb53af7 100644
1396     --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
1397     +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
1398     @@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
1399     .enable = mlx4_en_phc_enable,
1400     };
1401    
1402     +#define MLX4_EN_WRAP_AROUND_SEC 10ULL
1403     +
1404     +/* This function calculates the max shift that enables the user range
1405     + * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
1406     + */
1407     +static u32 freq_to_shift(u16 freq)
1408     +{
1409     + u32 freq_khz = freq * 1000;
1410     + u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
1411     + u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
1412     + max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
1413     + /* calculate max possible multiplier in order to fit in 64bit */
1414     + u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
1415     +
1416     + /* This comes from the reverse of clocksource_khz2mult */
1417     + return ilog2(div_u64(max_mul * freq_khz, 1000000));
1418     +}
1419     +
1420     void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
1421     {
1422     struct mlx4_dev *dev = mdev->dev;
1423     @@ -247,12 +265,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
1424     memset(&mdev->cycles, 0, sizeof(mdev->cycles));
1425     mdev->cycles.read = mlx4_en_read_clock;
1426     mdev->cycles.mask = CLOCKSOURCE_MASK(48);
1427     - /* Using shift to make calculation more accurate. Since current HW
1428     - * clock frequency is 427 MHz, and cycles are given using a 48 bits
1429     - * register, the biggest shift when calculating using u64, is 14
1430     - * (max_cycles * multiplier < 2^64)
1431     - */
1432     - mdev->cycles.shift = 14;
1433     + mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
1434     mdev->cycles.mult =
1435     clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
1436     mdev->nominal_c_mult = mdev->cycles.mult;
1437     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1438     index a5a0b8420d26..e9189597000d 100644
1439     --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1440     +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1441     @@ -2330,8 +2330,6 @@ out:
1442     /* set offloads */
1443     priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1444     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
1445     - priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
1446     - priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
1447     }
1448    
1449     static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
1450     @@ -2342,8 +2340,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
1451     /* unset offloads */
1452     priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1453     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
1454     - priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
1455     - priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
1456    
1457     ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
1458     VXLAN_STEER_BY_OUTER_MAC, 0);
1459     @@ -2940,6 +2936,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1460     priv->rss_hash_fn = ETH_RSS_HASH_TOP;
1461     }
1462    
1463     + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1464     + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
1465     + dev->features |= NETIF_F_GSO_UDP_TUNNEL;
1466     + }
1467     +
1468     mdev->pndev[port] = dev;
1469     mdev->upper[port] = NULL;
1470    
1471     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
1472     index 0a56f010c846..760a8b387912 100644
1473     --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
1474     +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
1475     @@ -223,11 +223,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
1476     stats->collisions = 0;
1477     stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
1478     stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
1479     - stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
1480     + stats->rx_over_errors = 0;
1481     stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
1482     stats->rx_frame_errors = 0;
1483     stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
1484     - stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
1485     + stats->rx_missed_errors = 0;
1486     stats->tx_aborted_errors = 0;
1487     stats->tx_carrier_errors = 0;
1488     stats->tx_fifo_errors = 0;
1489     diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
1490     index 73b6fc21ea00..4fedf7fa72c4 100644
1491     --- a/drivers/net/ethernet/rocker/rocker.c
1492     +++ b/drivers/net/ethernet/rocker/rocker.c
1493     @@ -3384,12 +3384,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
1494     info.addr = lw->addr;
1495     info.vid = lw->vid;
1496    
1497     + rtnl_lock();
1498     if (learned && removing)
1499     call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
1500     lw->dev, &info.info);
1501     else if (learned && !removing)
1502     call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
1503     lw->dev, &info.info);
1504     + rtnl_unlock();
1505    
1506     kfree(work);
1507     }
1508     diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
1509     index 00cb41e71312..c56cf0b86f2c 100644
1510     --- a/drivers/net/phy/dp83640.c
1511     +++ b/drivers/net/phy/dp83640.c
1512     @@ -833,6 +833,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
1513     struct skb_shared_hwtstamps *shhwtstamps = NULL;
1514     struct sk_buff *skb;
1515     unsigned long flags;
1516     + u8 overflow;
1517     +
1518     + overflow = (phy_rxts->ns_hi >> 14) & 0x3;
1519     + if (overflow)
1520     + pr_debug("rx timestamp queue overflow, count %d\n", overflow);
1521    
1522     spin_lock_irqsave(&dp83640->rx_lock, flags);
1523    
1524     @@ -875,6 +880,7 @@ static void decode_txts(struct dp83640_private *dp83640,
1525     struct skb_shared_hwtstamps shhwtstamps;
1526     struct sk_buff *skb;
1527     u64 ns;
1528     + u8 overflow;
1529    
1530     /* We must already have the skb that triggered this. */
1531    
1532     @@ -884,6 +890,17 @@ static void decode_txts(struct dp83640_private *dp83640,
1533     pr_debug("have timestamp but tx_queue empty\n");
1534     return;
1535     }
1536     +
1537     + overflow = (phy_txts->ns_hi >> 14) & 0x3;
1538     + if (overflow) {
1539     + pr_debug("tx timestamp queue overflow, count %d\n", overflow);
1540     + while (skb) {
1541     + skb_complete_tx_timestamp(skb, NULL);
1542     + skb = skb_dequeue(&dp83640->tx_queue);
1543     + }
1544     + return;
1545     + }
1546     +
1547     ns = phy2txts(phy_txts);
1548     memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1549     shhwtstamps.hwtstamp = ns_to_ktime(ns);
1550     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1551     index 9c8fabed4444..d1c4bc1c4df0 100644
1552     --- a/drivers/net/ppp/pppoe.c
1553     +++ b/drivers/net/ppp/pppoe.c
1554     @@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
1555    
1556     if (!__pppoe_xmit(sk_pppox(relay_po), skb))
1557     goto abort_put;
1558     +
1559     + sock_put(sk_pppox(relay_po));
1560     } else {
1561     if (sock_queue_rcv_skb(sk, skb))
1562     goto abort_kfree;
1563     diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
1564     index 0bacabfa486e..b35199cc8f34 100644
1565     --- a/drivers/net/ppp/pptp.c
1566     +++ b/drivers/net/ppp/pptp.c
1567     @@ -131,24 +131,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
1568     return i < MAX_CALLID;
1569     }
1570    
1571     -static int add_chan(struct pppox_sock *sock)
1572     +static int add_chan(struct pppox_sock *sock,
1573     + struct pptp_addr *sa)
1574     {
1575     static int call_id;
1576    
1577     spin_lock(&chan_lock);
1578     - if (!sock->proto.pptp.src_addr.call_id) {
1579     + if (!sa->call_id) {
1580     call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
1581     if (call_id == MAX_CALLID) {
1582     call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
1583     if (call_id == MAX_CALLID)
1584     goto out_err;
1585     }
1586     - sock->proto.pptp.src_addr.call_id = call_id;
1587     - } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
1588     + sa->call_id = call_id;
1589     + } else if (test_bit(sa->call_id, callid_bitmap)) {
1590     goto out_err;
1591     + }
1592    
1593     - set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
1594     - rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
1595     + sock->proto.pptp.src_addr = *sa;
1596     + set_bit(sa->call_id, callid_bitmap);
1597     + rcu_assign_pointer(callid_sock[sa->call_id], sock);
1598     spin_unlock(&chan_lock);
1599    
1600     return 0;
1601     @@ -417,7 +420,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
1602     struct sock *sk = sock->sk;
1603     struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
1604     struct pppox_sock *po = pppox_sk(sk);
1605     - struct pptp_opt *opt = &po->proto.pptp;
1606     int error = 0;
1607    
1608     if (sockaddr_len < sizeof(struct sockaddr_pppox))
1609     @@ -425,10 +427,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
1610    
1611     lock_sock(sk);
1612    
1613     - opt->src_addr = sp->sa_addr.pptp;
1614     - if (add_chan(po))
1615     + if (sk->sk_state & PPPOX_DEAD) {
1616     + error = -EALREADY;
1617     + goto out;
1618     + }
1619     +
1620     + if (sk->sk_state & PPPOX_BOUND) {
1621     error = -EBUSY;
1622     + goto out;
1623     + }
1624     +
1625     + if (add_chan(po, &sp->sa_addr.pptp))
1626     + error = -EBUSY;
1627     + else
1628     + sk->sk_state |= PPPOX_BOUND;
1629    
1630     +out:
1631     release_sock(sk);
1632     return error;
1633     }
1634     @@ -499,7 +513,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
1635     }
1636    
1637     opt->dst_addr = sp->sa_addr.pptp;
1638     - sk->sk_state = PPPOX_CONNECTED;
1639     + sk->sk_state |= PPPOX_CONNECTED;
1640    
1641     end:
1642     release_sock(sk);
1643     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1644     index 71190dc1eacf..cffb25280a3b 100644
1645     --- a/drivers/net/usb/qmi_wwan.c
1646     +++ b/drivers/net/usb/qmi_wwan.c
1647     @@ -542,6 +542,7 @@ static const struct usb_device_id products[] = {
1648    
1649     /* 3. Combined interface devices matching on interface number */
1650     {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
1651     + {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
1652     {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
1653     {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
1654     {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
1655     diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
1656     index 0bf82a20a0fb..48d21e0edd56 100644
1657     --- a/drivers/pci/pcie/aer/aerdrv.c
1658     +++ b/drivers/pci/pcie/aer/aerdrv.c
1659     @@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
1660     rpc->rpd = dev;
1661     INIT_WORK(&rpc->dpc_handler, aer_isr);
1662     mutex_init(&rpc->rpc_mutex);
1663     - init_waitqueue_head(&rpc->wait_release);
1664    
1665     /* Use PCIe bus function to store rpc into PCIe device */
1666     set_service_data(dev, rpc);
1667     @@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
1668     if (rpc->isr)
1669     free_irq(dev->irq, dev);
1670    
1671     - wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
1672     -
1673     + flush_work(&rpc->dpc_handler);
1674     aer_disable_rootport(rpc);
1675     kfree(rpc);
1676     set_service_data(dev, NULL);
1677     diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
1678     index 84420b7c9456..945c939a86c5 100644
1679     --- a/drivers/pci/pcie/aer/aerdrv.h
1680     +++ b/drivers/pci/pcie/aer/aerdrv.h
1681     @@ -72,7 +72,6 @@ struct aer_rpc {
1682     * recovery on the same
1683     * root port hierarchy
1684     */
1685     - wait_queue_head_t wait_release;
1686     };
1687    
1688     struct aer_broadcast_data {
1689     diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
1690     index 5653ea94547f..b60a325234c5 100644
1691     --- a/drivers/pci/pcie/aer/aerdrv_core.c
1692     +++ b/drivers/pci/pcie/aer/aerdrv_core.c
1693     @@ -784,8 +784,6 @@ void aer_isr(struct work_struct *work)
1694     while (get_e_source(rpc, &e_src))
1695     aer_isr_one_error(p_device, &e_src);
1696     mutex_unlock(&rpc->rpc_mutex);
1697     -
1698     - wake_up(&rpc->wait_release);
1699     }
1700    
1701     /**
1702     diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
1703     index 63bc12d7a73e..153e0a27c7ee 100644
1704     --- a/drivers/phy/phy-core.c
1705     +++ b/drivers/phy/phy-core.c
1706     @@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
1707    
1708     int phy_power_on(struct phy *phy)
1709     {
1710     - int ret;
1711     + int ret = 0;
1712    
1713     if (!phy)
1714     - return 0;
1715     + goto out;
1716    
1717     if (phy->pwr) {
1718     ret = regulator_enable(phy->pwr);
1719     if (ret)
1720     - return ret;
1721     + goto out;
1722     }
1723    
1724     ret = phy_pm_runtime_get_sync(phy);
1725     if (ret < 0 && ret != -ENOTSUPP)
1726     - return ret;
1727     + goto err_pm_sync;
1728     +
1729     ret = 0; /* Override possible ret == -ENOTSUPP */
1730    
1731     mutex_lock(&phy->mutex);
1732     @@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
1733     ret = phy->ops->power_on(phy);
1734     if (ret < 0) {
1735     dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
1736     - goto out;
1737     + goto err_pwr_on;
1738     }
1739     }
1740     ++phy->power_count;
1741     mutex_unlock(&phy->mutex);
1742     return 0;
1743    
1744     -out:
1745     +err_pwr_on:
1746     mutex_unlock(&phy->mutex);
1747     phy_pm_runtime_put_sync(phy);
1748     +err_pm_sync:
1749     if (phy->pwr)
1750     regulator_disable(phy->pwr);
1751     -
1752     +out:
1753     return ret;
1754     }
1755     EXPORT_SYMBOL_GPL(phy_power_on);
1756     diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
1757     index 6285f46f3ddb..fb9e30ed8018 100644
1758     --- a/drivers/phy/phy-twl4030-usb.c
1759     +++ b/drivers/phy/phy-twl4030-usb.c
1760     @@ -719,6 +719,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
1761     pm_runtime_use_autosuspend(&pdev->dev);
1762     pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
1763     pm_runtime_enable(&pdev->dev);
1764     + pm_runtime_get_sync(&pdev->dev);
1765    
1766     /* Our job is to use irqs and status from the power module
1767     * to keep the transceiver disabled when nothing's connected.
1768     @@ -754,6 +755,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
1769     struct twl4030_usb *twl = platform_get_drvdata(pdev);
1770     int val;
1771    
1772     + usb_remove_phy(&twl->phy);
1773     pm_runtime_get_sync(twl->dev);
1774     cancel_delayed_work(&twl->id_workaround_work);
1775     device_remove_file(twl->dev, &dev_attr_vbus);
1776     @@ -761,6 +763,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
1777     /* set transceiver mode to power on defaults */
1778     twl4030_usb_set_mode(twl, -1);
1779    
1780     + /* idle ulpi before powering off */
1781     + if (cable_present(twl->linkstat))
1782     + pm_runtime_put_noidle(twl->dev);
1783     + pm_runtime_mark_last_busy(twl->dev);
1784     + pm_runtime_put_sync_suspend(twl->dev);
1785     + pm_runtime_disable(twl->dev);
1786     +
1787     /* autogate 60MHz ULPI clock,
1788     * clear dpll clock request for i2c access,
1789     * disable 32KHz
1790     @@ -775,11 +784,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
1791     /* disable complete OTG block */
1792     twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
1793    
1794     - if (cable_present(twl->linkstat))
1795     - pm_runtime_put_noidle(twl->dev);
1796     - pm_runtime_mark_last_busy(twl->dev);
1797     - pm_runtime_put(twl->dev);
1798     -
1799     return 0;
1800     }
1801    
1802     diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
1803     index 02bc5a6343c3..aa454241489c 100644
1804     --- a/drivers/platform/x86/intel_scu_ipcutil.c
1805     +++ b/drivers/platform/x86/intel_scu_ipcutil.c
1806     @@ -49,7 +49,7 @@ struct scu_ipc_data {
1807    
1808     static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
1809     {
1810     - int count = data->count;
1811     + unsigned int count = data->count;
1812    
1813     if (count == 0 || count == 3 || count > 4)
1814     return -EINVAL;
1815     diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
1816     index a2597e683e79..6a64e86e8ccd 100644
1817     --- a/drivers/s390/block/dasd_alias.c
1818     +++ b/drivers/s390/block/dasd_alias.c
1819     @@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
1820     spin_unlock_irqrestore(&lcu->lock, flags);
1821     cancel_work_sync(&lcu->suc_data.worker);
1822     spin_lock_irqsave(&lcu->lock, flags);
1823     - if (device == lcu->suc_data.device)
1824     + if (device == lcu->suc_data.device) {
1825     + dasd_put_device(device);
1826     lcu->suc_data.device = NULL;
1827     + }
1828     }
1829     was_pending = 0;
1830     if (device == lcu->ruac_data.device) {
1831     @@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
1832     was_pending = 1;
1833     cancel_delayed_work_sync(&lcu->ruac_data.dwork);
1834     spin_lock_irqsave(&lcu->lock, flags);
1835     - if (device == lcu->ruac_data.device)
1836     + if (device == lcu->ruac_data.device) {
1837     + dasd_put_device(device);
1838     lcu->ruac_data.device = NULL;
1839     + }
1840     }
1841     private->lcu = NULL;
1842     spin_unlock_irqrestore(&lcu->lock, flags);
1843     @@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
1844     if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
1845     DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
1846     " alias data in lcu (rc = %d), retry later", rc);
1847     - schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
1848     + if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
1849     + dasd_put_device(device);
1850     } else {
1851     + dasd_put_device(device);
1852     lcu->ruac_data.device = NULL;
1853     lcu->flags &= ~UPDATE_PENDING;
1854     }
1855     @@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
1856     */
1857     if (!usedev)
1858     return -EINVAL;
1859     + dasd_get_device(usedev);
1860     lcu->ruac_data.device = usedev;
1861     - schedule_delayed_work(&lcu->ruac_data.dwork, 0);
1862     + if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
1863     + dasd_put_device(usedev);
1864     return 0;
1865     }
1866    
1867     @@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
1868     ASCEBC((char *) &cqr->magic, 4);
1869     ccw = cqr->cpaddr;
1870     ccw->cmd_code = DASD_ECKD_CCW_RSCK;
1871     - ccw->flags = 0 ;
1872     + ccw->flags = CCW_FLAG_SLI;
1873     ccw->count = 16;
1874     ccw->cda = (__u32)(addr_t) cqr->data;
1875     ((char *)cqr->data)[0] = reason;
1876     @@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
1877     /* 3. read new alias configuration */
1878     _schedule_lcu_update(lcu, device);
1879     lcu->suc_data.device = NULL;
1880     + dasd_put_device(device);
1881     spin_unlock_irqrestore(&lcu->lock, flags);
1882     }
1883    
1884     @@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
1885     }
1886     lcu->suc_data.reason = reason;
1887     lcu->suc_data.device = device;
1888     + dasd_get_device(device);
1889     spin_unlock(&lcu->lock);
1890     - schedule_work(&lcu->suc_data.worker);
1891     + if (!schedule_work(&lcu->suc_data.worker))
1892     + dasd_put_device(device);
1893     };
1894     diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
1895     index b46ace3d4bf0..dd0c133aa312 100644
1896     --- a/drivers/scsi/device_handler/scsi_dh_rdac.c
1897     +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
1898     @@ -568,7 +568,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
1899     /*
1900     * Command Lock contention
1901     */
1902     - err = SCSI_DH_RETRY;
1903     + err = SCSI_DH_IMM_RETRY;
1904     break;
1905     default:
1906     break;
1907     @@ -618,6 +618,8 @@ retry:
1908     err = mode_select_handle_sense(sdev, h->sense);
1909     if (err == SCSI_DH_RETRY && retry_cnt--)
1910     goto retry;
1911     + if (err == SCSI_DH_IMM_RETRY)
1912     + goto retry;
1913     }
1914     if (err == SCSI_DH_OK) {
1915     h->state = RDAC_STATE_ACTIVE;
1916     diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
1917     index e9ae6b924c70..8b011aef12bd 100644
1918     --- a/drivers/scsi/qla2xxx/qla_dbg.c
1919     +++ b/drivers/scsi/qla2xxx/qla_dbg.c
1920     @@ -67,10 +67,10 @@
1921     * | | | 0xd031-0xd0ff |
1922     * | | | 0xd101-0xd1fe |
1923     * | | | 0xd214-0xd2fe |
1924     - * | Target Mode | 0xe079 | |
1925     - * | Target Mode Management | 0xf080 | 0xf002 |
1926     + * | Target Mode | 0xe080 | |
1927     + * | Target Mode Management | 0xf096 | 0xf002 |
1928     * | | | 0xf046-0xf049 |
1929     - * | Target Mode Task Management | 0x1000b | |
1930     + * | Target Mode Task Management | 0x1000d | |
1931     * ----------------------------------------------------------------------
1932     */
1933    
1934     diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
1935     index e86201d3b8c6..90d926ca1200 100644
1936     --- a/drivers/scsi/qla2xxx/qla_def.h
1937     +++ b/drivers/scsi/qla2xxx/qla_def.h
1938     @@ -274,6 +274,7 @@
1939     #define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
1940    
1941     struct req_que;
1942     +struct qla_tgt_sess;
1943    
1944     /*
1945     * (sd.h is not exported, hence local inclusion)
1946     @@ -2026,6 +2027,7 @@ typedef struct fc_port {
1947     uint16_t port_id;
1948    
1949     unsigned long retry_delay_timestamp;
1950     + struct qla_tgt_sess *tgt_session;
1951     } fc_port_t;
1952    
1953     #include "qla_mr.h"
1954     @@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
1955     uint16_t fcoe_fcf_idx;
1956     uint8_t fcoe_vn_port_mac[6];
1957    
1958     + /* list of commands waiting on workqueue */
1959     + struct list_head qla_cmd_list;
1960     + struct list_head qla_sess_op_cmd_list;
1961     + spinlock_t cmd_list_lock;
1962     +
1963     + /* Counter to detect races between ELS and RSCN events */
1964     + atomic_t generation_tick;
1965     + /* Time when global fcport update has been scheduled */
1966     + int total_fcport_update_gen;
1967     +
1968     uint32_t vp_abort_cnt;
1969    
1970     struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
1971     diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
1972     index 998498e2341b..60f9651f2643 100644
1973     --- a/drivers/scsi/qla2xxx/qla_init.c
1974     +++ b/drivers/scsi/qla2xxx/qla_init.c
1975     @@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
1976     QLA_LOGIO_LOGIN_RETRIED : 0;
1977     qla2x00_post_async_login_done_work(fcport->vha, fcport,
1978     lio->u.logio.data);
1979     + } else if (sp->type == SRB_LOGOUT_CMD) {
1980     + qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
1981     }
1982     }
1983    
1984     @@ -497,7 +499,10 @@ void
1985     qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1986     uint16_t *data)
1987     {
1988     - qla2x00_mark_device_lost(vha, fcport, 1, 0);
1989     + /* Don't re-login in target mode */
1990     + if (!fcport->tgt_session)
1991     + qla2x00_mark_device_lost(vha, fcport, 1, 0);
1992     + qlt_logo_completion_handler(fcport, data[0]);
1993     return;
1994     }
1995    
1996     @@ -2189,7 +2194,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1997     /* Clear outstanding commands array. */
1998     for (que = 0; que < ha->max_req_queues; que++) {
1999     req = ha->req_q_map[que];
2000     - if (!req)
2001     + if (!req || !test_bit(que, ha->req_qid_map))
2002     continue;
2003     req->out_ptr = (void *)(req->ring + req->length);
2004     *req->out_ptr = 0;
2005     @@ -2206,7 +2211,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2006    
2007     for (que = 0; que < ha->max_rsp_queues; que++) {
2008     rsp = ha->rsp_q_map[que];
2009     - if (!rsp)
2010     + if (!rsp || !test_bit(que, ha->rsp_qid_map))
2011     continue;
2012     rsp->in_ptr = (void *)(rsp->ring + rsp->length);
2013     *rsp->in_ptr = 0;
2014     @@ -2922,24 +2927,14 @@ qla2x00_rport_del(void *data)
2015     {
2016     fc_port_t *fcport = data;
2017     struct fc_rport *rport;
2018     - scsi_qla_host_t *vha = fcport->vha;
2019     unsigned long flags;
2020     - unsigned long vha_flags;
2021    
2022     spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2023     rport = fcport->drport ? fcport->drport: fcport->rport;
2024     fcport->drport = NULL;
2025     spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2026     - if (rport) {
2027     + if (rport)
2028     fc_remote_port_delete(rport);
2029     - /*
2030     - * Release the target mode FC NEXUS in qla_target.c code
2031     - * if target mod is enabled.
2032     - */
2033     - spin_lock_irqsave(&vha->hw->hardware_lock, vha_flags);
2034     - qlt_fc_port_deleted(vha, fcport);
2035     - spin_unlock_irqrestore(&vha->hw->hardware_lock, vha_flags);
2036     - }
2037     }
2038    
2039     /**
2040     @@ -3379,6 +3374,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2041     LIST_HEAD(new_fcports);
2042     struct qla_hw_data *ha = vha->hw;
2043     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2044     + int discovery_gen;
2045    
2046     /* If FL port exists, then SNS is present */
2047     if (IS_FWI2_CAPABLE(ha))
2048     @@ -3449,6 +3445,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2049     fcport->scan_state = QLA_FCPORT_SCAN;
2050     }
2051    
2052     + /* Mark the time right before querying FW for connected ports.
2053     + * This process is long, asynchronous and by the time it's done,
2054     + * collected information might not be accurate anymore. E.g.
2055     + * disconnected port might have re-connected and a brand new
2056     + * session has been created. In this case session's generation
2057     + * will be newer than discovery_gen. */
2058     + qlt_do_generation_tick(vha, &discovery_gen);
2059     +
2060     rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2061     if (rval != QLA_SUCCESS)
2062     break;
2063     @@ -3500,7 +3504,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2064     atomic_read(&fcport->state),
2065     fcport->flags, fcport->fc4_type,
2066     fcport->scan_state);
2067     - qlt_fc_port_deleted(vha, fcport);
2068     + qlt_fc_port_deleted(vha, fcport,
2069     + discovery_gen);
2070     }
2071     }
2072     }
2073     @@ -4277,6 +4282,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
2074     atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
2075     spin_unlock_irqrestore(&ha->vport_slock, flags);
2076     qla2x00_rport_del(fcport);
2077     +
2078     + /*
2079     + * Release the target mode FC NEXUS in
2080     + * qla_target.c, if target mod is enabled.
2081     + */
2082     + qlt_fc_port_deleted(vha, fcport,
2083     + base_vha->total_fcport_update_gen);
2084     +
2085     spin_lock_irqsave(&ha->vport_slock, flags);
2086     }
2087     }
2088     @@ -4944,7 +4957,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
2089    
2090     for (i = 1; i < ha->max_rsp_queues; i++) {
2091     rsp = ha->rsp_q_map[i];
2092     - if (rsp) {
2093     + if (rsp && test_bit(i, ha->rsp_qid_map)) {
2094     rsp->options &= ~BIT_0;
2095     ret = qla25xx_init_rsp_que(base_vha, rsp);
2096     if (ret != QLA_SUCCESS)
2097     @@ -4959,8 +4972,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
2098     }
2099     for (i = 1; i < ha->max_req_queues; i++) {
2100     req = ha->req_q_map[i];
2101     - if (req) {
2102     - /* Clear outstanding commands array. */
2103     + if (req && test_bit(i, ha->req_qid_map)) {
2104     + /* Clear outstanding commands array. */
2105     req->options &= ~BIT_0;
2106     ret = qla25xx_init_req_que(base_vha, req);
2107     if (ret != QLA_SUCCESS)
2108     diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
2109     index a1ab25fca874..dc96f31a8831 100644
2110     --- a/drivers/scsi/qla2xxx/qla_iocb.c
2111     +++ b/drivers/scsi/qla2xxx/qla_iocb.c
2112     @@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2113     logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2114     logio->control_flags =
2115     cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2116     + if (!sp->fcport->tgt_session ||
2117     + !sp->fcport->tgt_session->keep_nport_handle)
2118     + logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2119     logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2120     logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2121     logio->port_id[1] = sp->fcport->d_id.b.area;
2122     diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
2123     index 6dc14cd782b2..1f3991ba7580 100644
2124     --- a/drivers/scsi/qla2xxx/qla_isr.c
2125     +++ b/drivers/scsi/qla2xxx/qla_isr.c
2126     @@ -2992,9 +2992,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2127     "MSI-X: Failed to enable support "
2128     "-- %d/%d\n Retry with %d vectors.\n",
2129     ha->msix_count, ret, ret);
2130     + ha->msix_count = ret;
2131     + ha->max_rsp_queues = ha->msix_count - 1;
2132     }
2133     - ha->msix_count = ret;
2134     - ha->max_rsp_queues = ha->msix_count - 1;
2135     ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2136     ha->msix_count, GFP_KERNEL);
2137     if (!ha->msix_entries) {
2138     diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
2139     index cc94192511cf..63abed122adf 100644
2140     --- a/drivers/scsi/qla2xxx/qla_mid.c
2141     +++ b/drivers/scsi/qla2xxx/qla_mid.c
2142     @@ -601,7 +601,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
2143     /* Delete request queues */
2144     for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
2145     req = ha->req_q_map[cnt];
2146     - if (req) {
2147     + if (req && test_bit(cnt, ha->req_qid_map)) {
2148     ret = qla25xx_delete_req_que(vha, req);
2149     if (ret != QLA_SUCCESS) {
2150     ql_log(ql_log_warn, vha, 0x00ea,
2151     @@ -615,7 +615,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
2152     /* Delete response queues */
2153     for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
2154     rsp = ha->rsp_q_map[cnt];
2155     - if (rsp) {
2156     + if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
2157     ret = qla25xx_delete_rsp_que(vha, rsp);
2158     if (ret != QLA_SUCCESS) {
2159     ql_log(ql_log_warn, vha, 0x00eb,
2160     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2161     index 7462dd70b150..d00725574577 100644
2162     --- a/drivers/scsi/qla2xxx/qla_os.c
2163     +++ b/drivers/scsi/qla2xxx/qla_os.c
2164     @@ -398,6 +398,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
2165     int cnt;
2166    
2167     for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
2168     + if (!test_bit(cnt, ha->req_qid_map))
2169     + continue;
2170     +
2171     req = ha->req_q_map[cnt];
2172     qla2x00_free_req_que(ha, req);
2173     }
2174     @@ -405,6 +408,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
2175     ha->req_q_map = NULL;
2176    
2177     for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
2178     + if (!test_bit(cnt, ha->rsp_qid_map))
2179     + continue;
2180     +
2181     rsp = ha->rsp_q_map[cnt];
2182     qla2x00_free_rsp_que(ha, rsp);
2183     }
2184     @@ -3229,11 +3235,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2185     spin_lock_irqsave(vha->host->host_lock, flags);
2186     fcport->drport = rport;
2187     spin_unlock_irqrestore(vha->host->host_lock, flags);
2188     + qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
2189     set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2190     qla2xxx_wake_dpc(base_vha);
2191     } else {
2192     + int now;
2193     fc_remote_port_delete(rport);
2194     - qlt_fc_port_deleted(vha, fcport);
2195     + qlt_do_generation_tick(vha, &now);
2196     + qlt_fc_port_deleted(vha, fcport, now);
2197     }
2198     }
2199    
2200     @@ -3763,8 +3772,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2201     INIT_LIST_HEAD(&vha->vp_fcports);
2202     INIT_LIST_HEAD(&vha->work_list);
2203     INIT_LIST_HEAD(&vha->list);
2204     + INIT_LIST_HEAD(&vha->qla_cmd_list);
2205     + INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
2206    
2207     spin_lock_init(&vha->work_lock);
2208     + spin_lock_init(&vha->cmd_list_lock);
2209    
2210     sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2211     ql_dbg(ql_dbg_init, vha, 0x0041,
2212     diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
2213     index 496a733d0ca3..df6193b48177 100644
2214     --- a/drivers/scsi/qla2xxx/qla_target.c
2215     +++ b/drivers/scsi/qla2xxx/qla_target.c
2216     @@ -114,6 +114,10 @@ static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
2217     struct atio_from_isp *atio, uint16_t status, int qfull);
2218     static void qlt_disable_vha(struct scsi_qla_host *vha);
2219     static void qlt_clear_tgt_db(struct qla_tgt *tgt);
2220     +static void qlt_send_notify_ack(struct scsi_qla_host *vha,
2221     + struct imm_ntfy_from_isp *ntfy,
2222     + uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
2223     + uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
2224     /*
2225     * Global Variables
2226     */
2227     @@ -123,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
2228     static DEFINE_MUTEX(qla_tgt_mutex);
2229     static LIST_HEAD(qla_tgt_glist);
2230    
2231     +/* This API intentionally takes dest as a parameter, rather than returning
2232     + * int value to avoid caller forgetting to issue wmb() after the store */
2233     +void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
2234     +{
2235     + scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
2236     + *dest = atomic_inc_return(&base_vha->generation_tick);
2237     + /* memory barrier */
2238     + wmb();
2239     +}
2240     +
2241     /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
2242     static struct qla_tgt_sess *qlt_find_sess_by_port_name(
2243     struct qla_tgt *tgt,
2244     @@ -382,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
2245     struct qla_tgt *tgt = sess->tgt;
2246     struct scsi_qla_host *vha = sess->vha;
2247     struct qla_hw_data *ha = vha->hw;
2248     + unsigned long flags;
2249     + bool logout_started = false;
2250     + fc_port_t fcport;
2251     +
2252     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
2253     + "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
2254     + " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
2255     + __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
2256     + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
2257     + sess->logout_on_delete, sess->keep_nport_handle,
2258     + sess->plogi_ack_needed);
2259    
2260     BUG_ON(!tgt);
2261     +
2262     + if (sess->logout_on_delete) {
2263     + int rc;
2264     +
2265     + memset(&fcport, 0, sizeof(fcport));
2266     + fcport.loop_id = sess->loop_id;
2267     + fcport.d_id = sess->s_id;
2268     + memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
2269     + fcport.vha = vha;
2270     + fcport.tgt_session = sess;
2271     +
2272     + rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
2273     + if (rc != QLA_SUCCESS)
2274     + ql_log(ql_log_warn, vha, 0xf085,
2275     + "Schedule logo failed sess %p rc %d\n",
2276     + sess, rc);
2277     + else
2278     + logout_started = true;
2279     + }
2280     +
2281     /*
2282     * Release the target session for FC Nexus from fabric module code.
2283     */
2284     if (sess->se_sess != NULL)
2285     ha->tgt.tgt_ops->free_session(sess);
2286    
2287     + if (logout_started) {
2288     + bool traced = false;
2289     +
2290     + while (!ACCESS_ONCE(sess->logout_completed)) {
2291     + if (!traced) {
2292     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
2293     + "%s: waiting for sess %p logout\n",
2294     + __func__, sess);
2295     + traced = true;
2296     + }
2297     + msleep(100);
2298     + }
2299     +
2300     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
2301     + "%s: sess %p logout completed\n",
2302     + __func__, sess);
2303     + }
2304     +
2305     + spin_lock_irqsave(&ha->hardware_lock, flags);
2306     +
2307     + if (sess->plogi_ack_needed)
2308     + qlt_send_notify_ack(vha, &sess->tm_iocb,
2309     + 0, 0, 0, 0, 0, 0);
2310     +
2311     + list_del(&sess->sess_list_entry);
2312     +
2313     + spin_unlock_irqrestore(&ha->hardware_lock, flags);
2314     +
2315     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
2316     "Unregistration of sess %p finished\n", sess);
2317    
2318     @@ -410,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
2319    
2320     vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
2321    
2322     - list_del(&sess->sess_list_entry);
2323     - if (sess->deleted)
2324     - list_del(&sess->del_list_entry);
2325     + if (!list_empty(&sess->del_list_entry))
2326     + list_del_init(&sess->del_list_entry);
2327     + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
2328    
2329     INIT_WORK(&sess->free_work, qlt_free_session_done);
2330     schedule_work(&sess->free_work);
2331     @@ -490,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
2332     struct qla_tgt *tgt = sess->tgt;
2333     uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
2334    
2335     - if (sess->deleted)
2336     - return;
2337     + if (sess->deleted) {
2338     + /* Upgrade to unconditional deletion in case it was temporary */
2339     + if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
2340     + list_del(&sess->del_list_entry);
2341     + else
2342     + return;
2343     + }
2344    
2345     ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
2346     "Scheduling sess %p for deletion\n", sess);
2347     - list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
2348     - sess->deleted = 1;
2349    
2350     - if (immediate)
2351     + if (immediate) {
2352     dev_loss_tmo = 0;
2353     + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
2354     + list_add(&sess->del_list_entry, &tgt->del_sess_list);
2355     + } else {
2356     + sess->deleted = QLA_SESS_DELETION_PENDING;
2357     + list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
2358     + }
2359    
2360     sess->expires = jiffies + dev_loss_tmo * HZ;
2361    
2362     ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
2363     - "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
2364     - "deletion in %u secs (expires: %lu) immed: %d\n",
2365     - sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
2366     - sess->expires, immediate);
2367     + "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
2368     + " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
2369     + sess->vha->vp_idx, sess->port_name, sess->loop_id,
2370     + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
2371     + dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
2372     + sess->generation);
2373    
2374     if (immediate)
2375     - schedule_delayed_work(&tgt->sess_del_work, 0);
2376     + mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
2377     else
2378     schedule_delayed_work(&tgt->sess_del_work,
2379     sess->expires - jiffies);
2380     @@ -579,9 +663,9 @@ out_free_id_list:
2381     /* ha->hardware_lock supposed to be held on entry */
2382     static void qlt_undelete_sess(struct qla_tgt_sess *sess)
2383     {
2384     - BUG_ON(!sess->deleted);
2385     + BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
2386    
2387     - list_del(&sess->del_list_entry);
2388     + list_del_init(&sess->del_list_entry);
2389     sess->deleted = 0;
2390     }
2391    
2392     @@ -600,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
2393     del_list_entry);
2394     elapsed = jiffies;
2395     if (time_after_eq(elapsed, sess->expires)) {
2396     - qlt_undelete_sess(sess);
2397     + /* No turning back */
2398     + list_del_init(&sess->del_list_entry);
2399     + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
2400    
2401     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
2402     "Timeout: sess %p about to be deleted\n",
2403     @@ -644,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
2404     fcport->d_id.b.al_pa, fcport->d_id.b.area,
2405     fcport->loop_id);
2406    
2407     + /* Cannot undelete at this point */
2408     + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2409     + spin_unlock_irqrestore(&ha->hardware_lock,
2410     + flags);
2411     + return NULL;
2412     + }
2413     +
2414     if (sess->deleted)
2415     qlt_undelete_sess(sess);
2416    
2417     @@ -653,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
2418    
2419     if (sess->local && !local)
2420     sess->local = 0;
2421     +
2422     + qlt_do_generation_tick(vha, &sess->generation);
2423     +
2424     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2425    
2426     return sess;
2427     @@ -674,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
2428     sess->s_id = fcport->d_id;
2429     sess->loop_id = fcport->loop_id;
2430     sess->local = local;
2431     + INIT_LIST_HEAD(&sess->del_list_entry);
2432     +
2433     + /* Under normal circumstances we want to logout from firmware when
2434     + * session eventually ends and release corresponding nport handle.
2435     + * In the exception cases (e.g. when new PLOGI is waiting) corresponding
2436     + * code will adjust these flags as necessary. */
2437     + sess->logout_on_delete = 1;
2438     + sess->keep_nport_handle = 0;
2439    
2440     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
2441     "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
2442     @@ -706,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
2443     spin_lock_irqsave(&ha->hardware_lock, flags);
2444     list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
2445     vha->vha_tgt.qla_tgt->sess_count++;
2446     + qlt_do_generation_tick(vha, &sess->generation);
2447     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2448    
2449     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
2450     @@ -719,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
2451     }
2452    
2453     /*
2454     - * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
2455     + * Called from qla2x00_reg_remote_port()
2456     */
2457     void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
2458     {
2459     @@ -751,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
2460     mutex_unlock(&vha->vha_tgt.tgt_mutex);
2461    
2462     spin_lock_irqsave(&ha->hardware_lock, flags);
2463     + } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2464     + /* Point of no return */
2465     + spin_unlock_irqrestore(&ha->hardware_lock, flags);
2466     + return;
2467     } else {
2468     kref_get(&sess->se_sess->sess_kref);
2469    
2470     @@ -781,7 +890,12 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
2471     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2472     }
2473    
2474     -void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
2475     +/*
2476     + * max_gen - specifies maximum session generation
2477     + * at which this deletion requestion is still valid
2478     + */
2479     +void
2480     +qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
2481     {
2482     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2483     struct qla_tgt_sess *sess;
2484     @@ -800,6 +914,15 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
2485     return;
2486     }
2487    
2488     + if (max_gen - sess->generation < 0) {
2489     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
2490     + "Ignoring stale deletion request for se_sess %p / sess %p"
2491     + " for port %8phC, req_gen %d, sess_gen %d\n",
2492     + sess->se_sess, sess, sess->port_name, max_gen,
2493     + sess->generation);
2494     + return;
2495     + }
2496     +
2497     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
2498    
2499     sess->local = 1;
2500     @@ -1170,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
2501     FCP_TMF_CMPL, true);
2502     }
2503    
2504     +static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
2505     +{
2506     + struct qla_tgt_sess_op *op;
2507     + struct qla_tgt_cmd *cmd;
2508     +
2509     + spin_lock(&vha->cmd_list_lock);
2510     +
2511     + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
2512     + if (tag == op->atio.u.isp24.exchange_addr) {
2513     + op->aborted = true;
2514     + spin_unlock(&vha->cmd_list_lock);
2515     + return 1;
2516     + }
2517     + }
2518     +
2519     + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2520     + if (tag == cmd->atio.u.isp24.exchange_addr) {
2521     + cmd->state = QLA_TGT_STATE_ABORTED;
2522     + spin_unlock(&vha->cmd_list_lock);
2523     + return 1;
2524     + }
2525     + }
2526     +
2527     + spin_unlock(&vha->cmd_list_lock);
2528     + return 0;
2529     +}
2530     +
2531     +/* drop cmds for the given lun
2532     + * XXX only looks for cmds on the port through which lun reset was recieved
2533     + * XXX does not go through the list of other port (which may have cmds
2534     + * for the same lun)
2535     + */
2536     +static void abort_cmds_for_lun(struct scsi_qla_host *vha,
2537     + uint32_t lun, uint8_t *s_id)
2538     +{
2539     + struct qla_tgt_sess_op *op;
2540     + struct qla_tgt_cmd *cmd;
2541     + uint32_t key;
2542     +
2543     + key = sid_to_key(s_id);
2544     + spin_lock(&vha->cmd_list_lock);
2545     + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
2546     + uint32_t op_key;
2547     + uint32_t op_lun;
2548     +
2549     + op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
2550     + op_lun = scsilun_to_int(
2551     + (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
2552     + if (op_key == key && op_lun == lun)
2553     + op->aborted = true;
2554     + }
2555     + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2556     + uint32_t cmd_key;
2557     + uint32_t cmd_lun;
2558     +
2559     + cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
2560     + cmd_lun = scsilun_to_int(
2561     + (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
2562     + if (cmd_key == key && cmd_lun == lun)
2563     + cmd->state = QLA_TGT_STATE_ABORTED;
2564     + }
2565     + spin_unlock(&vha->cmd_list_lock);
2566     +}
2567     +
2568     /* ha->hardware_lock supposed to be held on entry */
2569     static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2570     struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
2571     @@ -1194,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2572     }
2573     spin_unlock(&se_sess->sess_cmd_lock);
2574    
2575     - if (!found_lun)
2576     - return -ENOENT;
2577     + /* cmd not in LIO lists, look in qla list */
2578     + if (!found_lun) {
2579     + if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
2580     + /* send TASK_ABORT response immediately */
2581     + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
2582     + return 0;
2583     + } else {
2584     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
2585     + "unable to find cmd in driver or LIO for tag 0x%x\n",
2586     + abts->exchange_addr_to_abort);
2587     + return -ENOENT;
2588     + }
2589     + }
2590    
2591     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2592     "qla_target(%d): task abort (tag=%d)\n",
2593     @@ -1279,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2594     return;
2595     }
2596    
2597     + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2598     + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
2599     + return;
2600     + }
2601     +
2602     rc = __qlt_24xx_handle_abts(vha, abts, sess);
2603     if (rc != 0) {
2604     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2605     @@ -1721,21 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2606     struct qla_hw_data *ha = vha->hw;
2607     struct se_cmd *se_cmd = &cmd->se_cmd;
2608    
2609     - if (unlikely(cmd->aborted)) {
2610     - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
2611     - "qla_target(%d): terminating exchange "
2612     - "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
2613     - se_cmd, cmd->tag);
2614     -
2615     - cmd->state = QLA_TGT_STATE_ABORTED;
2616     - cmd->cmd_flags |= BIT_6;
2617     -
2618     - qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
2619     -
2620     - /* !! At this point cmd could be already freed !! */
2621     - return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
2622     - }
2623     -
2624     prm->cmd = cmd;
2625     prm->tgt = tgt;
2626     prm->rq_result = scsi_status;
2627     @@ -2298,6 +2486,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2628     unsigned long flags = 0;
2629     int res;
2630    
2631     + spin_lock_irqsave(&ha->hardware_lock, flags);
2632     + if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2633     + cmd->state = QLA_TGT_STATE_PROCESSED;
2634     + if (cmd->sess->logout_completed)
2635     + /* no need to terminate. FW already freed exchange. */
2636     + qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2637     + else
2638     + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2639     + spin_unlock_irqrestore(&ha->hardware_lock, flags);
2640     + return 0;
2641     + }
2642     + spin_unlock_irqrestore(&ha->hardware_lock, flags);
2643     +
2644     memset(&prm, 0, sizeof(prm));
2645     qlt_check_srr_debug(cmd, &xmit_type);
2646    
2647     @@ -2310,9 +2511,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2648     res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2649     &full_req_cnt);
2650     if (unlikely(res != 0)) {
2651     - if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2652     - return 0;
2653     -
2654     return res;
2655     }
2656    
2657     @@ -2459,7 +2657,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2658    
2659     spin_lock_irqsave(&ha->hardware_lock, flags);
2660    
2661     - if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2662     + if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
2663     + (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2664     /*
2665     * Either a chip reset is active or this request was from
2666     * previous life, just abort the processing.
2667     @@ -2652,6 +2851,89 @@ out:
2668    
2669     /* If hardware_lock held on entry, might drop it, then reaquire */
2670     /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2671     +static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2672     + struct imm_ntfy_from_isp *ntfy)
2673     +{
2674     + struct nack_to_isp *nack;
2675     + struct qla_hw_data *ha = vha->hw;
2676     + request_t *pkt;
2677     + int ret = 0;
2678     +
2679     + ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
2680     + "Sending TERM ELS CTIO (ha=%p)\n", ha);
2681     +
2682     + pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2683     + if (pkt == NULL) {
2684     + ql_dbg(ql_dbg_tgt, vha, 0xe080,
2685     + "qla_target(%d): %s failed: unable to allocate "
2686     + "request packet\n", vha->vp_idx, __func__);
2687     + return -ENOMEM;
2688     + }
2689     +
2690     + pkt->entry_type = NOTIFY_ACK_TYPE;
2691     + pkt->entry_count = 1;
2692     + pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2693     +
2694     + nack = (struct nack_to_isp *)pkt;
2695     + nack->ox_id = ntfy->ox_id;
2696     +
2697     + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
2698     + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
2699     + nack->u.isp24.flags = ntfy->u.isp24.flags &
2700     + __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
2701     + }
2702     +
2703     + /* terminate */
2704     + nack->u.isp24.flags |=
2705     + __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
2706     +
2707     + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
2708     + nack->u.isp24.status = ntfy->u.isp24.status;
2709     + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
2710     + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
2711     + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
2712     + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
2713     + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
2714     + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
2715     +
2716     + qla2x00_start_iocbs(vha, vha->req);
2717     + return ret;
2718     +}
2719     +
2720     +static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2721     + struct imm_ntfy_from_isp *imm, int ha_locked)
2722     +{
2723     + unsigned long flags = 0;
2724     + int rc;
2725     +
2726     + if (qlt_issue_marker(vha, ha_locked) < 0)
2727     + return;
2728     +
2729     + if (ha_locked) {
2730     + rc = __qlt_send_term_imm_notif(vha, imm);
2731     +
2732     +#if 0 /* Todo */
2733     + if (rc == -ENOMEM)
2734     + qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2735     +#endif
2736     + goto done;
2737     + }
2738     +
2739     + spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2740     + rc = __qlt_send_term_imm_notif(vha, imm);
2741     +
2742     +#if 0 /* Todo */
2743     + if (rc == -ENOMEM)
2744     + qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2745     +#endif
2746     +
2747     +done:
2748     + if (!ha_locked)
2749     + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2750     +}
2751     +
2752     +/* If hardware_lock held on entry, might drop it, then reaquire */
2753     +/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2754     static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2755     struct qla_tgt_cmd *cmd,
2756     struct atio_from_isp *atio)
2757     @@ -2794,6 +3076,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2758    
2759     }
2760    
2761     +void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
2762     +{
2763     + struct qla_tgt *tgt = cmd->tgt;
2764     + struct scsi_qla_host *vha = tgt->vha;
2765     + struct se_cmd *se_cmd = &cmd->se_cmd;
2766     +
2767     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
2768     + "qla_target(%d): terminating exchange for aborted cmd=%p "
2769     + "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
2770     + cmd->tag);
2771     +
2772     + cmd->state = QLA_TGT_STATE_ABORTED;
2773     + cmd->cmd_flags |= BIT_6;
2774     +
2775     + qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
2776     +}
2777     +EXPORT_SYMBOL(qlt_abort_cmd);
2778     +
2779     void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2780     {
2781     struct qla_tgt_sess *sess = cmd->sess;
2782     @@ -3265,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
2783     if (tgt->tgt_stop)
2784     goto out_term;
2785    
2786     + if (cmd->state == QLA_TGT_STATE_ABORTED) {
2787     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
2788     + "cmd with tag %u is aborted\n",
2789     + cmd->atio.u.isp24.exchange_addr);
2790     + goto out_term;
2791     + }
2792     +
2793     cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
2794     cmd->tag = atio->u.isp24.exchange_addr;
2795     cmd->unpacked_lun = scsilun_to_int(
2796     @@ -3318,6 +3625,12 @@ out_term:
2797     static void qlt_do_work(struct work_struct *work)
2798     {
2799     struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2800     + scsi_qla_host_t *vha = cmd->vha;
2801     + unsigned long flags;
2802     +
2803     + spin_lock_irqsave(&vha->cmd_list_lock, flags);
2804     + list_del(&cmd->cmd_list);
2805     + spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2806    
2807     __qlt_do_work(cmd);
2808     }
2809     @@ -3369,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
2810     unsigned long flags;
2811     uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
2812    
2813     + spin_lock_irqsave(&vha->cmd_list_lock, flags);
2814     + list_del(&op->cmd_list);
2815     + spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2816     +
2817     + if (op->aborted) {
2818     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
2819     + "sess_op with tag %u is aborted\n",
2820     + op->atio.u.isp24.exchange_addr);
2821     + goto out_term;
2822     + }
2823     +
2824     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
2825     - "qla_target(%d): Unable to find wwn login"
2826     - " (s_id %x:%x:%x), trying to create it manually\n",
2827     - vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2828     + "qla_target(%d): Unable to find wwn login"
2829     + " (s_id %x:%x:%x), trying to create it manually\n",
2830     + vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2831    
2832     if (op->atio.u.raw.entry_count > 1) {
2833     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
2834     - "Dropping multy entry atio %p\n", &op->atio);
2835     + "Dropping multy entry atio %p\n", &op->atio);
2836     goto out_term;
2837     }
2838    
2839     @@ -3441,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2840    
2841     memcpy(&op->atio, atio, sizeof(*atio));
2842     op->vha = vha;
2843     +
2844     + spin_lock(&vha->cmd_list_lock);
2845     + list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
2846     + spin_unlock(&vha->cmd_list_lock);
2847     +
2848     INIT_WORK(&op->work, qlt_create_sess_from_atio);
2849     queue_work(qla_tgt_wq, &op->work);
2850     return 0;
2851     }
2852     +
2853     + /* Another WWN used to have our s_id. Our PLOGI scheduled its
2854     + * session deletion, but it's still in sess_del_work wq */
2855     + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2856     + ql_dbg(ql_dbg_io, vha, 0x3061,
2857     + "New command while old session %p is being deleted\n",
2858     + sess);
2859     + return -EFAULT;
2860     + }
2861     +
2862     /*
2863     * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
2864     */
2865     @@ -3460,6 +3799,11 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2866    
2867     cmd->cmd_in_wq = 1;
2868     cmd->cmd_flags |= BIT_0;
2869     +
2870     + spin_lock(&vha->cmd_list_lock);
2871     + list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
2872     + spin_unlock(&vha->cmd_list_lock);
2873     +
2874     INIT_WORK(&cmd->work, qlt_do_work);
2875     queue_work(qla_tgt_wq, &cmd->work);
2876     return 0;
2877     @@ -3473,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
2878     struct scsi_qla_host *vha = sess->vha;
2879     struct qla_hw_data *ha = vha->hw;
2880     struct qla_tgt_mgmt_cmd *mcmd;
2881     + struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2882     int res;
2883     uint8_t tmr_func;
2884    
2885     @@ -3513,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
2886     ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
2887     "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
2888     tmr_func = TMR_LUN_RESET;
2889     + abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
2890     break;
2891    
2892     case QLA_TGT_CLEAR_TS:
2893     @@ -3601,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
2894     sizeof(struct atio_from_isp));
2895     }
2896    
2897     + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
2898     + return -EFAULT;
2899     +
2900     return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
2901     }
2902    
2903     @@ -3666,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
2904     return __qlt_abort_task(vha, iocb, sess);
2905     }
2906    
2907     +void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
2908     +{
2909     + if (fcport->tgt_session) {
2910     + if (rc != MBS_COMMAND_COMPLETE) {
2911     + ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
2912     + "%s: se_sess %p / sess %p from"
2913     + " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
2914     + " LOGO failed: %#x\n",
2915     + __func__,
2916     + fcport->tgt_session->se_sess,
2917     + fcport->tgt_session,
2918     + fcport->port_name, fcport->loop_id,
2919     + fcport->d_id.b.domain, fcport->d_id.b.area,
2920     + fcport->d_id.b.al_pa, rc);
2921     + }
2922     +
2923     + fcport->tgt_session->logout_completed = 1;
2924     + }
2925     +}
2926     +
2927     +static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
2928     + struct imm_ntfy_from_isp *b)
2929     +{
2930     + struct imm_ntfy_from_isp tmp;
2931     + memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
2932     + memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
2933     + memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
2934     +}
2935     +
2936     +/*
2937     +* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
2938     +*
2939     +* Schedules sessions with matching port_id/loop_id but different wwn for
2940     +* deletion. Returns existing session with matching wwn if present.
2941     +* Null otherwise.
2942     +*/
2943     +static struct qla_tgt_sess *
2944     +qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
2945     + port_id_t port_id, uint16_t loop_id)
2946     +{
2947     + struct qla_tgt_sess *sess = NULL, *other_sess;
2948     + uint64_t other_wwn;
2949     +
2950     + list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
2951     +
2952     + other_wwn = wwn_to_u64(other_sess->port_name);
2953     +
2954     + if (wwn == other_wwn) {
2955     + WARN_ON(sess);
2956     + sess = other_sess;
2957     + continue;
2958     + }
2959     +
2960     + /* find other sess with nport_id collision */
2961     + if (port_id.b24 == other_sess->s_id.b24) {
2962     + if (loop_id != other_sess->loop_id) {
2963     + ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
2964     + "Invalidating sess %p loop_id %d wwn %llx.\n",
2965     + other_sess, other_sess->loop_id, other_wwn);
2966     +
2967     + /*
2968     + * logout_on_delete is set by default, but another
2969     + * session that has the same s_id/loop_id combo
2970     + * might have cleared it when requested this session
2971     + * deletion, so don't touch it
2972     + */
2973     + qlt_schedule_sess_for_deletion(other_sess, true);
2974     + } else {
2975     + /*
2976     + * Another wwn used to have our s_id/loop_id
2977     + * combo - kill the session, but don't log out
2978     + */
2979     + sess->logout_on_delete = 0;
2980     + qlt_schedule_sess_for_deletion(other_sess,
2981     + true);
2982     + }
2983     + continue;
2984     + }
2985     +
2986     + /* find other sess with nport handle collision */
2987     + if (loop_id == other_sess->loop_id) {
2988     + ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
2989     + "Invalidating sess %p loop_id %d wwn %llx.\n",
2990     + other_sess, other_sess->loop_id, other_wwn);
2991     +
2992     + /* Same loop_id but different s_id
2993     + * Ok to kill and logout */
2994     + qlt_schedule_sess_for_deletion(other_sess, true);
2995     + }
2996     + }
2997     +
2998     + return sess;
2999     +}
3000     +
3001     +/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
3002     +static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
3003     +{
3004     + struct qla_tgt_sess_op *op;
3005     + struct qla_tgt_cmd *cmd;
3006     + uint32_t key;
3007     + int count = 0;
3008     +
3009     + key = (((u32)s_id->b.domain << 16) |
3010     + ((u32)s_id->b.area << 8) |
3011     + ((u32)s_id->b.al_pa));
3012     +
3013     + spin_lock(&vha->cmd_list_lock);
3014     + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
3015     + uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
3016     + if (op_key == key) {
3017     + op->aborted = true;
3018     + count++;
3019     + }
3020     + }
3021     + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
3022     + uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
3023     + if (cmd_key == key) {
3024     + cmd->state = QLA_TGT_STATE_ABORTED;
3025     + count++;
3026     + }
3027     + }
3028     + spin_unlock(&vha->cmd_list_lock);
3029     +
3030     + return count;
3031     +}
3032     +
3033     /*
3034     * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3035     */
3036     static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3037     struct imm_ntfy_from_isp *iocb)
3038     {
3039     + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3040     + struct qla_hw_data *ha = vha->hw;
3041     + struct qla_tgt_sess *sess = NULL;
3042     + uint64_t wwn;
3043     + port_id_t port_id;
3044     + uint16_t loop_id;
3045     + uint16_t wd3_lo;
3046     int res = 0;
3047    
3048     + wwn = wwn_to_u64(iocb->u.isp24.port_name);
3049     +
3050     + port_id.b.domain = iocb->u.isp24.port_id[2];
3051     + port_id.b.area = iocb->u.isp24.port_id[1];
3052     + port_id.b.al_pa = iocb->u.isp24.port_id[0];
3053     + port_id.b.rsvd_1 = 0;
3054     +
3055     + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
3056     +
3057     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
3058     "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3059     vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
3060    
3061     + /* res = 1 means ack at the end of thread
3062     + * res = 0 means ack async/later.
3063     + */
3064     switch (iocb->u.isp24.status_subcode) {
3065     case ELS_PLOGI:
3066     - case ELS_FLOGI:
3067     +
3068     + /* Mark all stale commands in qla_tgt_wq for deletion */
3069     + abort_cmds_for_s_id(vha, &port_id);
3070     +
3071     + if (wwn)
3072     + sess = qlt_find_sess_invalidate_other(tgt, wwn,
3073     + port_id, loop_id);
3074     +
3075     + if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
3076     + res = 1;
3077     + break;
3078     + }
3079     +
3080     + if (sess->plogi_ack_needed) {
3081     + /*
3082     + * Initiator sent another PLOGI before last PLOGI could
3083     + * finish. Swap plogi iocbs and terminate old one
3084     + * without acking, new one will get acked when session
3085     + * deletion completes.
3086     + */
3087     + ql_log(ql_log_warn, sess->vha, 0xf094,
3088     + "sess %p received double plogi.\n", sess);
3089     +
3090     + qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
3091     +
3092     + qlt_send_term_imm_notif(vha, iocb, 1);
3093     +
3094     + res = 0;
3095     + break;
3096     + }
3097     +
3098     + res = 0;
3099     +
3100     + /*
3101     + * Save immediate Notif IOCB for Ack when sess is done
3102     + * and being deleted.
3103     + */
3104     + memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
3105     + sess->plogi_ack_needed = 1;
3106     +
3107     + /*
3108     + * Under normal circumstances we want to release nport handle
3109     + * during LOGO process to avoid nport handle leaks inside FW.
3110     + * The exception is when LOGO is done while another PLOGI with
3111     + * the same nport handle is waiting as might be the case here.
3112     + * Note: there is always a possibily of a race where session
3113     + * deletion has already started for other reasons (e.g. ACL
3114     + * removal) and now PLOGI arrives:
3115     + * 1. if PLOGI arrived in FW after nport handle has been freed,
3116     + * FW must have assigned this PLOGI a new/same handle and we
3117     + * can proceed ACK'ing it as usual when session deletion
3118     + * completes.
3119     + * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
3120     + * bit reached it, the handle has now been released. We'll
3121     + * get an error when we ACK this PLOGI. Nothing will be sent
3122     + * back to initiator. Initiator should eventually retry
3123     + * PLOGI and situation will correct itself.
3124     + */
3125     + sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
3126     + (sess->s_id.b24 == port_id.b24));
3127     + qlt_schedule_sess_for_deletion(sess, true);
3128     + break;
3129     +
3130     case ELS_PRLI:
3131     + wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
3132     +
3133     + if (wwn)
3134     + sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
3135     + loop_id);
3136     +
3137     + if (sess != NULL) {
3138     + if (sess->deleted) {
3139     + /*
3140     + * Impatient initiator sent PRLI before last
3141     + * PLOGI could finish. Will force him to re-try,
3142     + * while last one finishes.
3143     + */
3144     + ql_log(ql_log_warn, sess->vha, 0xf095,
3145     + "sess %p PRLI received, before plogi ack.\n",
3146     + sess);
3147     + qlt_send_term_imm_notif(vha, iocb, 1);
3148     + res = 0;
3149     + break;
3150     + }
3151     +
3152     + /*
3153     + * This shouldn't happen under normal circumstances,
3154     + * since we have deleted the old session during PLOGI
3155     + */
3156     + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
3157     + "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
3158     + sess->loop_id, sess, iocb->u.isp24.nport_handle);
3159     +
3160     + sess->local = 0;
3161     + sess->loop_id = loop_id;
3162     + sess->s_id = port_id;
3163     +
3164     + if (wd3_lo & BIT_7)
3165     + sess->conf_compl_supported = 1;
3166     +
3167     + }
3168     + res = 1; /* send notify ack */
3169     +
3170     + /* Make session global (not used in fabric mode) */
3171     + if (ha->current_topology != ISP_CFG_F) {
3172     + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3173     + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3174     + qla2xxx_wake_dpc(vha);
3175     + } else {
3176     + /* todo: else - create sess here. */
3177     + res = 1; /* send notify ack */
3178     + }
3179     +
3180     + break;
3181     +
3182     case ELS_LOGO:
3183     case ELS_PRLO:
3184     res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3185     @@ -3699,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3186     break;
3187     }
3188    
3189     + case ELS_FLOGI: /* should never happen */
3190     default:
3191     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3192     "qla_target(%d): Unsupported ELS command %x "
3193     @@ -5016,6 +5624,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
3194     if (!sess)
3195     goto out_term;
3196     } else {
3197     + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3198     + sess = NULL;
3199     + goto out_term;
3200     + }
3201     +
3202     kref_get(&sess->se_sess->sess_kref);
3203     }
3204    
3205     @@ -5070,6 +5683,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
3206     if (!sess)
3207     goto out_term;
3208     } else {
3209     + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3210     + sess = NULL;
3211     + goto out_term;
3212     + }
3213     +
3214     kref_get(&sess->se_sess->sess_kref);
3215     }
3216    
3217     diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
3218     index 332086776dfe..d30c60a1d522 100644
3219     --- a/drivers/scsi/qla2xxx/qla_target.h
3220     +++ b/drivers/scsi/qla2xxx/qla_target.h
3221     @@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
3222     uint32_t srr_rel_offs;
3223     uint16_t srr_ui;
3224     uint16_t srr_ox_id;
3225     - uint8_t reserved_4[19];
3226     + union {
3227     + struct {
3228     + uint8_t node_name[8];
3229     + } plogi; /* PLOGI/ADISC/PDISC */
3230     + struct {
3231     + /* PRLI word 3 bit 0-15 */
3232     + uint16_t wd3_lo;
3233     + uint8_t resv0[6];
3234     + } prli;
3235     + struct {
3236     + uint8_t port_id[3];
3237     + uint8_t resv1;
3238     + uint16_t nport_handle;
3239     + uint16_t resv2;
3240     + } req_els;
3241     + } u;
3242     + uint8_t port_name[8];
3243     + uint8_t resv3[3];
3244     uint8_t vp_index;
3245     uint32_t reserved_5;
3246     uint8_t port_id[3];
3247     @@ -234,6 +251,7 @@ struct nack_to_isp {
3248     uint8_t reserved[2];
3249     uint16_t ox_id;
3250     } __packed;
3251     +#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
3252     #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
3253     #define NOTIFY_ACK_SRR_FLAGS_REJECT 1
3254    
3255     @@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
3256     #define FC_TM_REJECT 4
3257     #define FC_TM_FAILED 5
3258    
3259     -/*
3260     - * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
3261     - * terminated, so no more actions is needed and success should be returned
3262     - * to target.
3263     - */
3264     -#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
3265     -
3266     #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
3267     #define pci_dma_lo32(a) (a & 0xffffffff)
3268     #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
3269     @@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
3270     struct scsi_qla_host *vha;
3271     struct atio_from_isp atio;
3272     struct work_struct work;
3273     + struct list_head cmd_list;
3274     + bool aborted;
3275     +};
3276     +
3277     +enum qla_sess_deletion {
3278     + QLA_SESS_DELETION_NONE = 0,
3279     + QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
3280     + * this one */
3281     + QLA_SESS_DELETION_IN_PROGRESS = 2,
3282     };
3283    
3284     /*
3285     @@ -884,8 +904,15 @@ struct qla_tgt_sess {
3286     port_id_t s_id;
3287    
3288     unsigned int conf_compl_supported:1;
3289     - unsigned int deleted:1;
3290     + unsigned int deleted:2;
3291     unsigned int local:1;
3292     + unsigned int logout_on_delete:1;
3293     + unsigned int plogi_ack_needed:1;
3294     + unsigned int keep_nport_handle:1;
3295     +
3296     + unsigned char logout_completed;
3297     +
3298     + int generation;
3299    
3300     struct se_session *se_sess;
3301     struct scsi_qla_host *vha;
3302     @@ -897,6 +924,10 @@ struct qla_tgt_sess {
3303    
3304     uint8_t port_name[WWN_SIZE];
3305     struct work_struct free_work;
3306     +
3307     + union {
3308     + struct imm_ntfy_from_isp tm_iocb;
3309     + };
3310     };
3311    
3312     struct qla_tgt_cmd {
3313     @@ -912,7 +943,6 @@ struct qla_tgt_cmd {
3314     unsigned int conf_compl_supported:1;
3315     unsigned int sg_mapped:1;
3316     unsigned int free_sg:1;
3317     - unsigned int aborted:1; /* Needed in case of SRR */
3318     unsigned int write_data_transferred:1;
3319     unsigned int ctx_dsd_alloced:1;
3320     unsigned int q_full:1;
3321     @@ -1027,6 +1057,10 @@ struct qla_tgt_srr_ctio {
3322     struct qla_tgt_cmd *cmd;
3323     };
3324    
3325     +/* Check for Switch reserved address */
3326     +#define IS_SW_RESV_ADDR(_s_id) \
3327     + ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
3328     +
3329     #define QLA_TGT_XMIT_DATA 1
3330     #define QLA_TGT_XMIT_STATUS 2
3331     #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
3332     @@ -1044,7 +1078,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
3333     extern void qlt_lport_deregister(struct scsi_qla_host *);
3334     extern void qlt_unreg_sess(struct qla_tgt_sess *);
3335     extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
3336     -extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
3337     +extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
3338     extern int __init qlt_init(void);
3339     extern void qlt_exit(void);
3340     extern void qlt_update_vp_map(struct scsi_qla_host *, int);
3341     @@ -1074,12 +1108,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
3342     ha->host->active_mode |= MODE_INITIATOR;
3343     }
3344    
3345     +static inline uint32_t sid_to_key(const uint8_t *s_id)
3346     +{
3347     + uint32_t key;
3348     +
3349     + key = (((unsigned long)s_id[0] << 16) |
3350     + ((unsigned long)s_id[1] << 8) |
3351     + (unsigned long)s_id[2]);
3352     + return key;
3353     +}
3354     +
3355     /*
3356     * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
3357     */
3358     extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
3359     extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
3360     extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
3361     +extern void qlt_abort_cmd(struct qla_tgt_cmd *);
3362     extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
3363     extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
3364     extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
3365     @@ -1110,5 +1155,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
3366     extern irqreturn_t qla83xx_msix_atio_q(int, void *);
3367     extern void qlt_83xx_iospace_config(struct qla_hw_data *);
3368     extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
3369     +extern void qlt_logo_completion_handler(fc_port_t *, int);
3370     +extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
3371    
3372     #endif /* __QLA_TARGET_H */
3373     diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
3374     index 962cb89fe0ae..af806fdb0dbc 100644
3375     --- a/drivers/scsi/qla2xxx/qla_tmpl.c
3376     +++ b/drivers/scsi/qla2xxx/qla_tmpl.c
3377     @@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
3378     if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
3379     for (i = 0; i < vha->hw->max_req_queues; i++) {
3380     struct req_que *req = vha->hw->req_q_map[i];
3381     +
3382     + if (!test_bit(i, vha->hw->req_qid_map))
3383     + continue;
3384     +
3385     if (req || !buf) {
3386     length = req ?
3387     req->length : REQUEST_ENTRY_CNT_24XX;
3388     @@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
3389     } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
3390     for (i = 0; i < vha->hw->max_rsp_queues; i++) {
3391     struct rsp_que *rsp = vha->hw->rsp_q_map[i];
3392     +
3393     + if (!test_bit(i, vha->hw->rsp_qid_map))
3394     + continue;
3395     +
3396     if (rsp || !buf) {
3397     length = rsp ?
3398     rsp->length : RESPONSE_ENTRY_CNT_MQ;
3399     @@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
3400     if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
3401     for (i = 0; i < vha->hw->max_req_queues; i++) {
3402     struct req_que *req = vha->hw->req_q_map[i];
3403     +
3404     + if (!test_bit(i, vha->hw->req_qid_map))
3405     + continue;
3406     +
3407     if (req || !buf) {
3408     qla27xx_insert16(i, buf, len);
3409     qla27xx_insert16(1, buf, len);
3410     @@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
3411     } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
3412     for (i = 0; i < vha->hw->max_rsp_queues; i++) {
3413     struct rsp_que *rsp = vha->hw->rsp_q_map[i];
3414     +
3415     + if (!test_bit(i, vha->hw->rsp_qid_map))
3416     + continue;
3417     +
3418     if (rsp || !buf) {
3419     qla27xx_insert16(i, buf, len);
3420     qla27xx_insert16(1, buf, len);
3421     diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
3422     index 5c9e680aa375..fdad875ca777 100644
3423     --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
3424     +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
3425     @@ -429,7 +429,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
3426     cmd->cmd_flags |= BIT_14;
3427     }
3428    
3429     - return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
3430     + return target_put_sess_cmd(se_cmd);
3431     }
3432    
3433     /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
3434     @@ -669,7 +669,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
3435     cmd->cmd_flags |= BIT_4;
3436     cmd->bufflen = se_cmd->data_length;
3437     cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
3438     - cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
3439    
3440     cmd->sg_cnt = se_cmd->t_data_nents;
3441     cmd->sg = se_cmd->t_data_sg;
3442     @@ -699,7 +698,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
3443     cmd->sg_cnt = 0;
3444     cmd->offset = 0;
3445     cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
3446     - cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
3447     if (cmd->cmd_flags & BIT_5) {
3448     pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
3449     dump_stack();
3450     @@ -764,14 +762,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
3451     {
3452     struct qla_tgt_cmd *cmd = container_of(se_cmd,
3453     struct qla_tgt_cmd, se_cmd);
3454     - struct scsi_qla_host *vha = cmd->vha;
3455     - struct qla_hw_data *ha = vha->hw;
3456     -
3457     - if (!cmd->sg_mapped)
3458     - return;
3459     -
3460     - pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3461     - cmd->sg_mapped = 0;
3462     + qlt_abort_cmd(cmd);
3463     }
3464    
3465     static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
3466     @@ -1323,9 +1314,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
3467     return NULL;
3468     }
3469    
3470     - key = (((unsigned long)s_id[0] << 16) |
3471     - ((unsigned long)s_id[1] << 8) |
3472     - (unsigned long)s_id[2]);
3473     + key = sid_to_key(s_id);
3474     pr_debug("find_sess_by_s_id: 0x%06x\n", key);
3475    
3476     se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
3477     @@ -1360,9 +1349,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
3478     void *slot;
3479     int rc;
3480    
3481     - key = (((unsigned long)s_id[0] << 16) |
3482     - ((unsigned long)s_id[1] << 8) |
3483     - (unsigned long)s_id[2]);
3484     + key = sid_to_key(s_id);
3485     pr_debug("set_sess_by_s_id: %06x\n", key);
3486    
3487     slot = btree_lookup32(&lport->lport_fcport_map, key);
3488     @@ -1718,6 +1705,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
3489     }
3490    
3491     sess->conf_compl_supported = conf_compl_supported;
3492     +
3493     + /* Reset logout parameters to default */
3494     + sess->logout_on_delete = 1;
3495     + sess->keep_nport_handle = 0;
3496     }
3497    
3498     /*
3499     diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
3500     index 64ed88a67e6e..ac418e73536d 100644
3501     --- a/drivers/scsi/scsi_devinfo.c
3502     +++ b/drivers/scsi/scsi_devinfo.c
3503     @@ -205,6 +205,7 @@ static struct {
3504     {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
3505     {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
3506     {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
3507     + {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
3508     {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
3509     {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
3510     {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
3511     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3512     index 330bbe831066..2e58279fab60 100644
3513     --- a/drivers/target/iscsi/iscsi_target.c
3514     +++ b/drivers/target/iscsi/iscsi_target.c
3515     @@ -712,7 +712,7 @@ static int iscsit_add_reject_from_cmd(
3516     */
3517     if (cmd->se_cmd.se_tfo != NULL) {
3518     pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
3519     - target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
3520     + target_put_sess_cmd(&cmd->se_cmd);
3521     }
3522     return -1;
3523     }
3524     @@ -998,7 +998,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3525     hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
3526     conn->cid);
3527    
3528     - target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
3529     + target_get_sess_cmd(&cmd->se_cmd, true);
3530    
3531     cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
3532     scsilun_to_int(&hdr->lun));
3533     @@ -1064,7 +1064,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3534     if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
3535     return -1;
3536     else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
3537     - target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
3538     + target_put_sess_cmd(&cmd->se_cmd);
3539     return 0;
3540     }
3541     }
3542     @@ -1080,7 +1080,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3543     if (!cmd->sense_reason)
3544     return 0;
3545    
3546     - target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
3547     + target_put_sess_cmd(&cmd->se_cmd);
3548     return 0;
3549     }
3550    
3551     @@ -1111,7 +1111,6 @@ static int
3552     iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
3553     bool dump_payload)
3554     {
3555     - struct iscsi_conn *conn = cmd->conn;
3556     int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
3557     /*
3558     * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
3559     @@ -1138,7 +1137,7 @@ after_immediate_data:
3560    
3561     rc = iscsit_dump_data_payload(cmd->conn,
3562     cmd->first_burst_len, 1);
3563     - target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
3564     + target_put_sess_cmd(&cmd->se_cmd);
3565     return rc;
3566     } else if (cmd->unsolicited_data)
3567     iscsit_set_unsoliticed_dataout(cmd);
3568     @@ -1807,7 +1806,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
3569     conn->sess->se_sess, 0, DMA_NONE,
3570     TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
3571    
3572     - target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
3573     + target_get_sess_cmd(&cmd->se_cmd, true);
3574     sess_ref = true;
3575    
3576     switch (function) {
3577     @@ -1949,7 +1948,7 @@ attach:
3578     */
3579     if (sess_ref) {
3580     pr_debug("Handle TMR, using sess_ref=true check\n");
3581     - target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
3582     + target_put_sess_cmd(&cmd->se_cmd);
3583     }
3584    
3585     iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
3586     diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
3587     index 5a8add721741..83bb55b94434 100644
3588     --- a/drivers/target/iscsi/iscsi_target_configfs.c
3589     +++ b/drivers/target/iscsi/iscsi_target_configfs.c
3590     @@ -1981,7 +1981,7 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
3591    
3592     static int lio_check_stop_free(struct se_cmd *se_cmd)
3593     {
3594     - return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
3595     + return target_put_sess_cmd(se_cmd);
3596     }
3597    
3598     static void lio_release_cmd(struct se_cmd *se_cmd)
3599     diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
3600     index b18edda3e8af..231e2e0e5894 100644
3601     --- a/drivers/target/iscsi/iscsi_target_util.c
3602     +++ b/drivers/target/iscsi/iscsi_target_util.c
3603     @@ -746,7 +746,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
3604     rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
3605     if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
3606     __iscsit_free_cmd(cmd, true, shutdown);
3607     - target_put_sess_cmd(se_cmd->se_sess, se_cmd);
3608     + target_put_sess_cmd(se_cmd);
3609     }
3610     break;
3611     case ISCSI_OP_REJECT:
3612     @@ -762,7 +762,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
3613     rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
3614     if (!rc && shutdown && se_cmd->se_sess) {
3615     __iscsit_free_cmd(cmd, true, shutdown);
3616     - target_put_sess_cmd(se_cmd->se_sess, se_cmd);
3617     + target_put_sess_cmd(se_cmd);
3618     }
3619     break;
3620     }
3621     diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
3622     index 315ec3458eeb..adb8016955c4 100644
3623     --- a/drivers/target/target_core_tmr.c
3624     +++ b/drivers/target/target_core_tmr.c
3625     @@ -71,7 +71,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
3626    
3627     if (dev) {
3628     spin_lock_irqsave(&dev->se_tmr_lock, flags);
3629     - list_del(&tmr->tmr_list);
3630     + list_del_init(&tmr->tmr_list);
3631     spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
3632     }
3633    
3634     @@ -153,7 +153,7 @@ void core_tmr_abort_task(
3635     cancel_work_sync(&se_cmd->work);
3636     transport_wait_for_tasks(se_cmd);
3637    
3638     - target_put_sess_cmd(se_sess, se_cmd);
3639     + target_put_sess_cmd(se_cmd);
3640     transport_cmd_finish_abort(se_cmd, true);
3641    
3642     printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
3643     @@ -175,9 +175,11 @@ static void core_tmr_drain_tmr_list(
3644     struct list_head *preempt_and_abort_list)
3645     {
3646     LIST_HEAD(drain_tmr_list);
3647     + struct se_session *sess;
3648     struct se_tmr_req *tmr_p, *tmr_pp;
3649     struct se_cmd *cmd;
3650     unsigned long flags;
3651     + bool rc;
3652     /*
3653     * Release all pending and outgoing TMRs aside from the received
3654     * LUN_RESET tmr..
3655     @@ -203,17 +205,31 @@ static void core_tmr_drain_tmr_list(
3656     if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
3657     continue;
3658    
3659     + sess = cmd->se_sess;
3660     + if (WARN_ON_ONCE(!sess))
3661     + continue;
3662     +
3663     + spin_lock(&sess->sess_cmd_lock);
3664     spin_lock(&cmd->t_state_lock);
3665     if (!(cmd->transport_state & CMD_T_ACTIVE)) {
3666     spin_unlock(&cmd->t_state_lock);
3667     + spin_unlock(&sess->sess_cmd_lock);
3668     continue;
3669     }
3670     if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
3671     spin_unlock(&cmd->t_state_lock);
3672     + spin_unlock(&sess->sess_cmd_lock);
3673     continue;
3674     }
3675     + cmd->transport_state |= CMD_T_ABORTED;
3676     spin_unlock(&cmd->t_state_lock);
3677    
3678     + rc = kref_get_unless_zero(&cmd->cmd_kref);
3679     + spin_unlock(&sess->sess_cmd_lock);
3680     + if (!rc) {
3681     + printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
3682     + continue;
3683     + }
3684     list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
3685     }
3686     spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
3687     @@ -227,7 +243,11 @@ static void core_tmr_drain_tmr_list(
3688     (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
3689     tmr_p->function, tmr_p->response, cmd->t_state);
3690    
3691     + cancel_work_sync(&cmd->work);
3692     + transport_wait_for_tasks(cmd);
3693     +
3694     transport_cmd_finish_abort(cmd, 1);
3695     + target_put_sess_cmd(cmd);
3696     }
3697     }
3698    
3699     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3700     index 675f2d9d1f14..3881504b40d8 100644
3701     --- a/drivers/target/target_core_transport.c
3702     +++ b/drivers/target/target_core_transport.c
3703     @@ -1419,7 +1419,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
3704     * for fabrics using TARGET_SCF_ACK_KREF that expect a second
3705     * kref_put() to happen during fabric packet acknowledgement.
3706     */
3707     - ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
3708     + ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
3709     if (ret)
3710     return ret;
3711     /*
3712     @@ -1433,7 +1433,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
3713     rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
3714     if (rc) {
3715     transport_send_check_condition_and_sense(se_cmd, rc, 0);
3716     - target_put_sess_cmd(se_sess, se_cmd);
3717     + target_put_sess_cmd(se_cmd);
3718     return 0;
3719     }
3720    
3721     @@ -1584,7 +1584,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
3722     se_cmd->se_tmr_req->ref_task_tag = tag;
3723    
3724     /* See target_submit_cmd for commentary */
3725     - ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
3726     + ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
3727     if (ret) {
3728     core_tmr_release_req(se_cmd->se_tmr_req);
3729     return ret;
3730     @@ -2227,7 +2227,7 @@ static int transport_release_cmd(struct se_cmd *cmd)
3731     * If this cmd has been setup with target_get_sess_cmd(), drop
3732     * the kref and call ->release_cmd() in kref callback.
3733     */
3734     - return target_put_sess_cmd(cmd->se_sess, cmd);
3735     + return target_put_sess_cmd(cmd);
3736     }
3737    
3738     /**
3739     @@ -2471,13 +2471,12 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
3740     EXPORT_SYMBOL(transport_generic_free_cmd);
3741    
3742     /* target_get_sess_cmd - Add command to active ->sess_cmd_list
3743     - * @se_sess: session to reference
3744     * @se_cmd: command descriptor to add
3745     * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
3746     */
3747     -int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
3748     - bool ack_kref)
3749     +int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
3750     {
3751     + struct se_session *se_sess = se_cmd->se_sess;
3752     unsigned long flags;
3753     int ret = 0;
3754    
3755     @@ -2499,7 +2498,7 @@ out:
3756     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
3757    
3758     if (ret && ack_kref)
3759     - target_put_sess_cmd(se_sess, se_cmd);
3760     + target_put_sess_cmd(se_cmd);
3761    
3762     return ret;
3763     }
3764     @@ -2528,11 +2527,12 @@ static void target_release_cmd_kref(struct kref *kref)
3765     }
3766    
3767     /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
3768     - * @se_sess: session to reference
3769     * @se_cmd: command descriptor to drop
3770     */
3771     -int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
3772     +int target_put_sess_cmd(struct se_cmd *se_cmd)
3773     {
3774     + struct se_session *se_sess = se_cmd->se_sess;
3775     +
3776     if (!se_sess) {
3777     se_cmd->se_tfo->release_cmd(se_cmd);
3778     return 1;
3779     @@ -3025,8 +3025,17 @@ static void target_tmr_work(struct work_struct *work)
3780     struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3781     struct se_device *dev = cmd->se_dev;
3782     struct se_tmr_req *tmr = cmd->se_tmr_req;
3783     + unsigned long flags;
3784     int ret;
3785    
3786     + spin_lock_irqsave(&cmd->t_state_lock, flags);
3787     + if (cmd->transport_state & CMD_T_ABORTED) {
3788     + tmr->response = TMR_FUNCTION_REJECTED;
3789     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3790     + goto check_stop;
3791     + }
3792     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3793     +
3794     switch (tmr->function) {
3795     case TMR_ABORT_TASK:
3796     core_tmr_abort_task(dev, tmr, cmd->se_sess);
3797     @@ -3054,9 +3063,17 @@ static void target_tmr_work(struct work_struct *work)
3798     break;
3799     }
3800    
3801     + spin_lock_irqsave(&cmd->t_state_lock, flags);
3802     + if (cmd->transport_state & CMD_T_ABORTED) {
3803     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3804     + goto check_stop;
3805     + }
3806     cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3807     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3808     +
3809     cmd->se_tfo->queue_tm_rsp(cmd);
3810    
3811     +check_stop:
3812     transport_cmd_check_stop_to_fabric(cmd);
3813     }
3814    
3815     diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
3816     index 4d5e8409769c..254c183a5efe 100644
3817     --- a/drivers/tty/pty.c
3818     +++ b/drivers/tty/pty.c
3819     @@ -672,7 +672,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
3820     /* this is called once with whichever end is closed last */
3821     static void pty_unix98_shutdown(struct tty_struct *tty)
3822     {
3823     - devpts_kill_index(tty->driver_data, tty->index);
3824     + struct inode *ptmx_inode;
3825     +
3826     + if (tty->driver->subtype == PTY_TYPE_MASTER)
3827     + ptmx_inode = tty->driver_data;
3828     + else
3829     + ptmx_inode = tty->link->driver_data;
3830     + devpts_kill_index(ptmx_inode, tty->index);
3831     + devpts_del_ref(ptmx_inode);
3832     }
3833    
3834     static const struct tty_operations ptm_unix98_ops = {
3835     @@ -764,6 +771,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
3836     set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
3837     tty->driver_data = inode;
3838    
3839     + /*
3840     + * In the case where all references to ptmx inode are dropped and we
3841     + * still have /dev/tty opened pointing to the master/slave pair (ptmx
3842     + * is closed/released before /dev/tty), we must make sure that the inode
3843     + * is still valid when we call the final pty_unix98_shutdown, thus we
3844     + * hold an additional reference to the ptmx inode. For the same /dev/tty
3845     + * last close case, we also need to make sure the super_block isn't
3846     + * destroyed (devpts instance unmounted), before /dev/tty is closed and
3847     + * on its release devpts_kill_index is called.
3848     + */
3849     + devpts_add_ref(inode);
3850     +
3851     tty_add_file(tty, filp);
3852    
3853     slave_inode = devpts_pty_new(inode,
3854     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
3855     index 2fd163b75665..b82b2a0f82a3 100644
3856     --- a/drivers/tty/serial/8250/8250_pci.c
3857     +++ b/drivers/tty/serial/8250/8250_pci.c
3858     @@ -2002,6 +2002,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
3859     #define PCIE_VENDOR_ID_WCH 0x1c00
3860     #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
3861     #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
3862     +#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
3863    
3864     #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
3865     #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
3866     @@ -2729,6 +2730,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
3867     .subdevice = PCI_ANY_ID,
3868     .setup = pci_wch_ch353_setup,
3869     },
3870     + /* WCH CH382 2S card (16850 clone) */
3871     + {
3872     + .vendor = PCIE_VENDOR_ID_WCH,
3873     + .device = PCIE_DEVICE_ID_WCH_CH382_2S,
3874     + .subvendor = PCI_ANY_ID,
3875     + .subdevice = PCI_ANY_ID,
3876     + .setup = pci_wch_ch38x_setup,
3877     + },
3878     /* WCH CH382 2S1P card (16850 clone) */
3879     {
3880     .vendor = PCIE_VENDOR_ID_WCH,
3881     @@ -3049,6 +3058,7 @@ enum pci_board_num_t {
3882     pbn_fintek_4,
3883     pbn_fintek_8,
3884     pbn_fintek_12,
3885     + pbn_wch382_2,
3886     pbn_wch384_4,
3887     pbn_pericom_PI7C9X7951,
3888     pbn_pericom_PI7C9X7952,
3889     @@ -3879,6 +3889,13 @@ static struct pciserial_board pci_boards[] = {
3890     .base_baud = 115200,
3891     .first_offset = 0x40,
3892     },
3893     + [pbn_wch382_2] = {
3894     + .flags = FL_BASE0,
3895     + .num_ports = 2,
3896     + .base_baud = 115200,
3897     + .uart_offset = 8,
3898     + .first_offset = 0xC0,
3899     + },
3900     [pbn_wch384_4] = {
3901     .flags = FL_BASE0,
3902     .num_ports = 4,
3903     @@ -5691,6 +5708,10 @@ static struct pci_device_id serial_pci_tbl[] = {
3904     PCI_ANY_ID, PCI_ANY_ID,
3905     0, 0, pbn_b0_bt_2_115200 },
3906    
3907     + { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
3908     + PCI_ANY_ID, PCI_ANY_ID,
3909     + 0, 0, pbn_wch382_2 },
3910     +
3911     { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
3912     PCI_ANY_ID, PCI_ANY_ID,
3913     0, 0, pbn_wch384_4 },
3914     diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
3915     index 7f49172ccd86..0a88693cd8ca 100644
3916     --- a/drivers/tty/serial/omap-serial.c
3917     +++ b/drivers/tty/serial/omap-serial.c
3918     @@ -1368,7 +1368,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
3919    
3920     /* Enable or disable the rs485 support */
3921     static int
3922     -serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
3923     +serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
3924     {
3925     struct uart_omap_port *up = to_uart_omap_port(port);
3926     unsigned int mode;
3927     @@ -1381,8 +1381,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
3928     up->ier = 0;
3929     serial_out(up, UART_IER, 0);
3930    
3931     + /* Clamp the delays to [0, 100ms] */
3932     + rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
3933     + rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
3934     +
3935     /* store new config */
3936     - port->rs485 = *rs485conf;
3937     + port->rs485 = *rs485;
3938    
3939     /*
3940     * Just as a precaution, only allow rs485
3941     diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
3942     index ea32b386797f..636435b41293 100644
3943     --- a/drivers/vhost/scsi.c
3944     +++ b/drivers/vhost/scsi.c
3945     @@ -607,7 +607,7 @@ static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
3946    
3947     static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
3948     {
3949     - return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
3950     + return target_put_sess_cmd(se_cmd);
3951     }
3952    
3953     static void
3954     diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
3955     index 723470850b94..30bc9fa763bd 100644
3956     --- a/fs/btrfs/backref.c
3957     +++ b/fs/btrfs/backref.c
3958     @@ -1369,7 +1369,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
3959     read_extent_buffer(eb, dest + bytes_left,
3960     name_off, name_len);
3961     if (eb != eb_in) {
3962     - btrfs_tree_read_unlock_blocking(eb);
3963     + if (!path->skip_locking)
3964     + btrfs_tree_read_unlock_blocking(eb);
3965     free_extent_buffer(eb);
3966     }
3967     ret = btrfs_find_item(fs_root, path, parent, 0,
3968     @@ -1389,9 +1390,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
3969     eb = path->nodes[0];
3970     /* make sure we can use eb after releasing the path */
3971     if (eb != eb_in) {
3972     - atomic_inc(&eb->refs);
3973     - btrfs_tree_read_lock(eb);
3974     - btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
3975     + if (!path->skip_locking)
3976     + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
3977     + path->nodes[0] = NULL;
3978     + path->locks[0] = 0;
3979     }
3980     btrfs_release_path(path);
3981     iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
3982     diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
3983     index a2ae42720a6a..bc2d048a9eb9 100644
3984     --- a/fs/btrfs/delayed-inode.c
3985     +++ b/fs/btrfs/delayed-inode.c
3986     @@ -1690,7 +1690,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
3987     *
3988     */
3989     int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
3990     - struct list_head *ins_list)
3991     + struct list_head *ins_list, bool *emitted)
3992     {
3993     struct btrfs_dir_item *di;
3994     struct btrfs_delayed_item *curr, *next;
3995     @@ -1734,6 +1734,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
3996    
3997     if (over)
3998     return 1;
3999     + *emitted = true;
4000     }
4001     return 0;
4002     }
4003     diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
4004     index f70119f25421..0167853c84ae 100644
4005     --- a/fs/btrfs/delayed-inode.h
4006     +++ b/fs/btrfs/delayed-inode.h
4007     @@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
4008     int btrfs_should_delete_dir_index(struct list_head *del_list,
4009     u64 index);
4010     int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
4011     - struct list_head *ins_list);
4012     + struct list_head *ins_list, bool *emitted);
4013    
4014     /* for init */
4015     int __init btrfs_delayed_inode_init(void);
4016     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4017     index df4e0462976e..b114a0539d3d 100644
4018     --- a/fs/btrfs/inode.c
4019     +++ b/fs/btrfs/inode.c
4020     @@ -5666,6 +5666,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
4021     char *name_ptr;
4022     int name_len;
4023     int is_curr = 0; /* ctx->pos points to the current index? */
4024     + bool emitted;
4025    
4026     /* FIXME, use a real flag for deciding about the key type */
4027     if (root->fs_info->tree_root == root)
4028     @@ -5694,6 +5695,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
4029     if (ret < 0)
4030     goto err;
4031    
4032     + emitted = false;
4033     while (1) {
4034     leaf = path->nodes[0];
4035     slot = path->slots[0];
4036     @@ -5773,6 +5775,7 @@ skip:
4037    
4038     if (over)
4039     goto nopos;
4040     + emitted = true;
4041     di_len = btrfs_dir_name_len(leaf, di) +
4042     btrfs_dir_data_len(leaf, di) + sizeof(*di);
4043     di_cur += di_len;
4044     @@ -5785,11 +5788,20 @@ next:
4045     if (key_type == BTRFS_DIR_INDEX_KEY) {
4046     if (is_curr)
4047     ctx->pos++;
4048     - ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
4049     + ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
4050     if (ret)
4051     goto nopos;
4052     }
4053    
4054     + /*
4055     + * If we haven't emitted any dir entry, we must not touch ctx->pos as
4056     + * it was was set to the termination value in previous call. We assume
4057     + * that "." and ".." were emitted if we reach this point and set the
4058     + * termination value as well for an empty directory.
4059     + */
4060     + if (ctx->pos > 2 && !emitted)
4061     + goto nopos;
4062     +
4063     /* Reached end of directory/root. Bump pos past the last item. */
4064     ctx->pos++;
4065    
4066     diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
4067     index afa09fce8151..e682b36a210f 100644
4068     --- a/fs/cifs/cifsencrypt.c
4069     +++ b/fs/cifs/cifsencrypt.c
4070     @@ -714,7 +714,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
4071    
4072     ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
4073     if (!ses->auth_key.response) {
4074     - rc = ENOMEM;
4075     + rc = -ENOMEM;
4076     ses->auth_key.len = 0;
4077     goto setup_ntlmv2_rsp_ret;
4078     }
4079     diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
4080     index add566303c68..91360444adf5 100644
4081     --- a/fs/devpts/inode.c
4082     +++ b/fs/devpts/inode.c
4083     @@ -569,6 +569,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
4084     mutex_unlock(&allocated_ptys_lock);
4085     }
4086    
4087     +/*
4088     + * pty code needs to hold extra references in case of last /dev/tty close
4089     + */
4090     +
4091     +void devpts_add_ref(struct inode *ptmx_inode)
4092     +{
4093     + struct super_block *sb = pts_sb_from_inode(ptmx_inode);
4094     +
4095     + atomic_inc(&sb->s_active);
4096     + ihold(ptmx_inode);
4097     +}
4098     +
4099     +void devpts_del_ref(struct inode *ptmx_inode)
4100     +{
4101     + struct super_block *sb = pts_sb_from_inode(ptmx_inode);
4102     +
4103     + iput(ptmx_inode);
4104     + deactivate_super(sb);
4105     +}
4106     +
4107     /**
4108     * devpts_pty_new -- create a new inode in /dev/pts/
4109     * @ptmx_inode: inode of the master
4110     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4111     index 966c614822cc..2b3a53a51582 100644
4112     --- a/fs/ext4/inode.c
4113     +++ b/fs/ext4/inode.c
4114     @@ -3133,29 +3133,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
4115     * case, we allocate an io_end structure to hook to the iocb.
4116     */
4117     iocb->private = NULL;
4118     - ext4_inode_aio_set(inode, NULL);
4119     - if (!is_sync_kiocb(iocb)) {
4120     - io_end = ext4_init_io_end(inode, GFP_NOFS);
4121     - if (!io_end) {
4122     - ret = -ENOMEM;
4123     - goto retake_lock;
4124     - }
4125     - /*
4126     - * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
4127     - */
4128     - iocb->private = ext4_get_io_end(io_end);
4129     - /*
4130     - * we save the io structure for current async direct
4131     - * IO, so that later ext4_map_blocks() could flag the
4132     - * io structure whether there is a unwritten extents
4133     - * needs to be converted when IO is completed.
4134     - */
4135     - ext4_inode_aio_set(inode, io_end);
4136     - }
4137     -
4138     if (overwrite) {
4139     get_block_func = ext4_get_block_write_nolock;
4140     } else {
4141     + ext4_inode_aio_set(inode, NULL);
4142     + if (!is_sync_kiocb(iocb)) {
4143     + io_end = ext4_init_io_end(inode, GFP_NOFS);
4144     + if (!io_end) {
4145     + ret = -ENOMEM;
4146     + goto retake_lock;
4147     + }
4148     + /*
4149     + * Grab reference for DIO. Will be dropped in
4150     + * ext4_end_io_dio()
4151     + */
4152     + iocb->private = ext4_get_io_end(io_end);
4153     + /*
4154     + * we save the io structure for current async direct
4155     + * IO, so that later ext4_map_blocks() could flag the
4156     + * io structure whether there is a unwritten extents
4157     + * needs to be converted when IO is completed.
4158     + */
4159     + ext4_inode_aio_set(inode, io_end);
4160     + }
4161     get_block_func = ext4_get_block_write;
4162     dio_flags = DIO_LOCKING;
4163     }
4164     diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
4165     index 370420bfae8d..7da8ac1047f8 100644
4166     --- a/fs/ext4/move_extent.c
4167     +++ b/fs/ext4/move_extent.c
4168     @@ -268,11 +268,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
4169     ext4_lblk_t orig_blk_offset, donor_blk_offset;
4170     unsigned long blocksize = orig_inode->i_sb->s_blocksize;
4171     unsigned int tmp_data_size, data_size, replaced_size;
4172     - int err2, jblocks, retries = 0;
4173     + int i, err2, jblocks, retries = 0;
4174     int replaced_count = 0;
4175     int from = data_offset_in_page << orig_inode->i_blkbits;
4176     int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
4177     struct super_block *sb = orig_inode->i_sb;
4178     + struct buffer_head *bh = NULL;
4179    
4180     /*
4181     * It needs twice the amount of ordinary journal buffers because
4182     @@ -383,8 +384,16 @@ data_copy:
4183     }
4184     /* Perform all necessary steps similar write_begin()/write_end()
4185     * but keeping in mind that i_size will not change */
4186     - *err = __block_write_begin(pagep[0], from, replaced_size,
4187     - ext4_get_block);
4188     + if (!page_has_buffers(pagep[0]))
4189     + create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
4190     + bh = page_buffers(pagep[0]);
4191     + for (i = 0; i < data_offset_in_page; i++)
4192     + bh = bh->b_this_page;
4193     + for (i = 0; i < block_len_in_page; i++) {
4194     + *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
4195     + if (*err < 0)
4196     + break;
4197     + }
4198     if (!*err)
4199     *err = block_commit_write(pagep[0], from, from + replaced_size);
4200    
4201     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
4202     index cf0c472047e3..0e783b9f7007 100644
4203     --- a/fs/ext4/resize.c
4204     +++ b/fs/ext4/resize.c
4205     @@ -198,7 +198,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
4206     if (flex_gd == NULL)
4207     goto out3;
4208    
4209     - if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
4210     + if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
4211     goto out2;
4212     flex_gd->count = flexbg_size;
4213    
4214     diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
4215     index 0419485891f2..0f1c6f315cdc 100644
4216     --- a/include/asm-generic/cputime_nsecs.h
4217     +++ b/include/asm-generic/cputime_nsecs.h
4218     @@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
4219     */
4220     static inline cputime_t timespec_to_cputime(const struct timespec *val)
4221     {
4222     - u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
4223     + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
4224     return (__force cputime_t) ret;
4225     }
4226     static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
4227     @@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
4228     */
4229     static inline cputime_t timeval_to_cputime(const struct timeval *val)
4230     {
4231     - u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
4232     + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
4233     + val->tv_usec * NSEC_PER_USEC;
4234     return (__force cputime_t) ret;
4235     }
4236     static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
4237     diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
4238     new file mode 100644
4239     index 000000000000..8d9c7e7a6432
4240     --- /dev/null
4241     +++ b/include/linux/cgroup-defs.h
4242     @@ -0,0 +1,470 @@
4243     +/*
4244     + * linux/cgroup-defs.h - basic definitions for cgroup
4245     + *
4246     + * This file provides basic type and interface. Include this file directly
4247     + * only if necessary to avoid cyclic dependencies.
4248     + */
4249     +#ifndef _LINUX_CGROUP_DEFS_H
4250     +#define _LINUX_CGROUP_DEFS_H
4251     +
4252     +#include <linux/limits.h>
4253     +#include <linux/list.h>
4254     +#include <linux/idr.h>
4255     +#include <linux/wait.h>
4256     +#include <linux/mutex.h>
4257     +#include <linux/rcupdate.h>
4258     +#include <linux/percpu-refcount.h>
4259     +#include <linux/workqueue.h>
4260     +
4261     +#ifdef CONFIG_CGROUPS
4262     +
4263     +struct cgroup;
4264     +struct cgroup_root;
4265     +struct cgroup_subsys;
4266     +struct cgroup_taskset;
4267     +struct kernfs_node;
4268     +struct kernfs_ops;
4269     +struct kernfs_open_file;
4270     +
4271     +#define MAX_CGROUP_TYPE_NAMELEN 32
4272     +#define MAX_CGROUP_ROOT_NAMELEN 64
4273     +#define MAX_CFTYPE_NAME 64
4274     +
4275     +/* define the enumeration of all cgroup subsystems */
4276     +#define SUBSYS(_x) _x ## _cgrp_id,
4277     +enum cgroup_subsys_id {
4278     +#include <linux/cgroup_subsys.h>
4279     + CGROUP_SUBSYS_COUNT,
4280     +};
4281     +#undef SUBSYS
4282     +
4283     +/* bits in struct cgroup_subsys_state flags field */
4284     +enum {
4285     + CSS_NO_REF = (1 << 0), /* no reference counting for this css */
4286     + CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
4287     + CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
4288     +};
4289     +
4290     +/* bits in struct cgroup flags field */
4291     +enum {
4292     + /* Control Group requires release notifications to userspace */
4293     + CGRP_NOTIFY_ON_RELEASE,
4294     + /*
4295     + * Clone the parent's configuration when creating a new child
4296     + * cpuset cgroup. For historical reasons, this option can be
4297     + * specified at mount time and thus is implemented here.
4298     + */
4299     + CGRP_CPUSET_CLONE_CHILDREN,
4300     +};
4301     +
4302     +/* cgroup_root->flags */
4303     +enum {
4304     + CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
4305     + CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
4306     + CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
4307     +};
4308     +
4309     +/* cftype->flags */
4310     +enum {
4311     + CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
4312     + CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
4313     + CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
4314     +
4315     + /* internal flags, do not use outside cgroup core proper */
4316     + __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
4317     + __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
4318     +};
4319     +
4320     +/*
4321     + * Per-subsystem/per-cgroup state maintained by the system. This is the
4322     + * fundamental structural building block that controllers deal with.
4323     + *
4324     + * Fields marked with "PI:" are public and immutable and may be accessed
4325     + * directly without synchronization.
4326     + */
4327     +struct cgroup_subsys_state {
4328     + /* PI: the cgroup that this css is attached to */
4329     + struct cgroup *cgroup;
4330     +
4331     + /* PI: the cgroup subsystem that this css is attached to */
4332     + struct cgroup_subsys *ss;
4333     +
4334     + /* reference count - access via css_[try]get() and css_put() */
4335     + struct percpu_ref refcnt;
4336     +
4337     + /* PI: the parent css */
4338     + struct cgroup_subsys_state *parent;
4339     +
4340     + /* siblings list anchored at the parent's ->children */
4341     + struct list_head sibling;
4342     + struct list_head children;
4343     +
4344     + /*
4345     + * PI: Subsys-unique ID. 0 is unused and root is always 1. The
4346     + * matching css can be looked up using css_from_id().
4347     + */
4348     + int id;
4349     +
4350     + unsigned int flags;
4351     +
4352     + /*
4353     + * Monotonically increasing unique serial number which defines a
4354     + * uniform order among all csses. It's guaranteed that all
4355     + * ->children lists are in the ascending order of ->serial_nr and
4356     + * used to allow interrupting and resuming iterations.
4357     + */
4358     + u64 serial_nr;
4359     +
4360     + /*
4361     + * Incremented by online self and children. Used to guarantee that
4362     + * parents are not offlined before their children.
4363     + */
4364     + atomic_t online_cnt;
4365     +
4366     + /* percpu_ref killing and RCU release */
4367     + struct rcu_head rcu_head;
4368     + struct work_struct destroy_work;
4369     +};
4370     +
4371     +/*
4372     + * A css_set is a structure holding pointers to a set of
4373     + * cgroup_subsys_state objects. This saves space in the task struct
4374     + * object and speeds up fork()/exit(), since a single inc/dec and a
4375     + * list_add()/del() can bump the reference count on the entire cgroup
4376     + * set for a task.
4377     + */
4378     +struct css_set {
4379     + /* Reference count */
4380     + atomic_t refcount;
4381     +
4382     + /*
4383     + * List running through all cgroup groups in the same hash
4384     + * slot. Protected by css_set_lock
4385     + */
4386     + struct hlist_node hlist;
4387     +
4388     + /*
4389     + * Lists running through all tasks using this cgroup group.
4390     + * mg_tasks lists tasks which belong to this cset but are in the
4391     + * process of being migrated out or in. Protected by
4392     + * css_set_rwsem, but, during migration, once tasks are moved to
4393     + * mg_tasks, it can be read safely while holding cgroup_mutex.
4394     + */
4395     + struct list_head tasks;
4396     + struct list_head mg_tasks;
4397     +
4398     + /*
4399     + * List of cgrp_cset_links pointing at cgroups referenced from this
4400     + * css_set. Protected by css_set_lock.
4401     + */
4402     + struct list_head cgrp_links;
4403     +
4404     + /* the default cgroup associated with this css_set */
4405     + struct cgroup *dfl_cgrp;
4406     +
4407     + /*
4408     + * Set of subsystem states, one for each subsystem. This array is
4409     + * immutable after creation apart from the init_css_set during
4410     + * subsystem registration (at boot time).
4411     + */
4412     + struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
4413     +
4414     + /*
4415     + * List of csets participating in the on-going migration either as
4416     + * source or destination. Protected by cgroup_mutex.
4417     + */
4418     + struct list_head mg_preload_node;
4419     + struct list_head mg_node;
4420     +
4421     + /*
4422     + * If this cset is acting as the source of migration the following
4423     + * two fields are set. mg_src_cgrp is the source cgroup of the
4424     + * on-going migration and mg_dst_cset is the destination cset the
4425     + * target tasks on this cset should be migrated to. Protected by
4426     + * cgroup_mutex.
4427     + */
4428     + struct cgroup *mg_src_cgrp;
4429     + struct css_set *mg_dst_cset;
4430     +
4431     + /*
4432     + * On the default hierarhcy, ->subsys[ssid] may point to a css
4433     + * attached to an ancestor instead of the cgroup this css_set is
4434     + * associated with. The following node is anchored at
4435     + * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
4436     + * iterate through all css's attached to a given cgroup.
4437     + */
4438     + struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
4439     +
4440     + /* For RCU-protected deletion */
4441     + struct rcu_head rcu_head;
4442     +};
4443     +
4444     +struct cgroup {
4445     + /* self css with NULL ->ss, points back to this cgroup */
4446     + struct cgroup_subsys_state self;
4447     +
4448     + unsigned long flags; /* "unsigned long" so bitops work */
4449     +
4450     + /*
4451     + * idr allocated in-hierarchy ID.
4452     + *
4453     + * ID 0 is not used, the ID of the root cgroup is always 1, and a
4454     + * new cgroup will be assigned with a smallest available ID.
4455     + *
4456     + * Allocating/Removing ID must be protected by cgroup_mutex.
4457     + */
4458     + int id;
4459     +
4460     + /*
4461     + * If this cgroup contains any tasks, it contributes one to
4462     + * populated_cnt. All children with non-zero popuplated_cnt of
4463     + * their own contribute one. The count is zero iff there's no task
4464     + * in this cgroup or its subtree.
4465     + */
4466     + int populated_cnt;
4467     +
4468     + struct kernfs_node *kn; /* cgroup kernfs entry */
4469     + struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
4470     +
4471     + /*
4472     + * The bitmask of subsystems enabled on the child cgroups.
4473     + * ->subtree_control is the one configured through
4474     + * "cgroup.subtree_control" while ->child_subsys_mask is the
4475     + * effective one which may have more subsystems enabled.
4476     + * Controller knobs are made available iff it's enabled in
4477     + * ->subtree_control.
4478     + */
4479     + unsigned int subtree_control;
4480     + unsigned int child_subsys_mask;
4481     +
4482     + /* Private pointers for each registered subsystem */
4483     + struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
4484     +
4485     + struct cgroup_root *root;
4486     +
4487     + /*
4488     + * List of cgrp_cset_links pointing at css_sets with tasks in this
4489     + * cgroup. Protected by css_set_lock.
4490     + */
4491     + struct list_head cset_links;
4492     +
4493     + /*
4494     + * On the default hierarchy, a css_set for a cgroup with some
4495     + * susbsys disabled will point to css's which are associated with
4496     + * the closest ancestor which has the subsys enabled. The
4497     + * following lists all css_sets which point to this cgroup's css
4498     + * for the given subsystem.
4499     + */
4500     + struct list_head e_csets[CGROUP_SUBSYS_COUNT];
4501     +
4502     + /*
4503     + * list of pidlists, up to two for each namespace (one for procs, one
4504     + * for tasks); created on demand.
4505     + */
4506     + struct list_head pidlists;
4507     + struct mutex pidlist_mutex;
4508     +
4509     + /* used to wait for offlining of csses */
4510     + wait_queue_head_t offline_waitq;
4511     +
4512     + /* used to schedule release agent */
4513     + struct work_struct release_agent_work;
4514     +};
4515     +
4516     +/*
4517     + * A cgroup_root represents the root of a cgroup hierarchy, and may be
4518     + * associated with a kernfs_root to form an active hierarchy. This is
4519     + * internal to cgroup core. Don't access directly from controllers.
4520     + */
4521     +struct cgroup_root {
4522     + struct kernfs_root *kf_root;
4523     +
4524     + /* The bitmask of subsystems attached to this hierarchy */
4525     + unsigned int subsys_mask;
4526     +
4527     + /* Unique id for this hierarchy. */
4528     + int hierarchy_id;
4529     +
4530     + /* The root cgroup. Root is destroyed on its release. */
4531     + struct cgroup cgrp;
4532     +
4533     + /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
4534     + atomic_t nr_cgrps;
4535     +
4536     + /* A list running through the active hierarchies */
4537     + struct list_head root_list;
4538     +
4539     + /* Hierarchy-specific flags */
4540     + unsigned int flags;
4541     +
4542     + /* IDs for cgroups in this hierarchy */
4543     + struct idr cgroup_idr;
4544     +
4545     + /* The path to use for release notifications. */
4546     + char release_agent_path[PATH_MAX];
4547     +
4548     + /* The name for this hierarchy - may be empty */
4549     + char name[MAX_CGROUP_ROOT_NAMELEN];
4550     +};
4551     +
4552     +/*
4553     + * struct cftype: handler definitions for cgroup control files
4554     + *
4555     + * When reading/writing to a file:
4556     + * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
4557     + * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
4558     + */
4559     +struct cftype {
4560     + /*
4561     + * By convention, the name should begin with the name of the
4562     + * subsystem, followed by a period. Zero length string indicates
4563     + * end of cftype array.
4564     + */
4565     + char name[MAX_CFTYPE_NAME];
4566     + int private;
4567     + /*
4568     + * If not 0, file mode is set to this value, otherwise it will
4569     + * be figured out automatically
4570     + */
4571     + umode_t mode;
4572     +
4573     + /*
4574     + * The maximum length of string, excluding trailing nul, that can
4575     + * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
4576     + */
4577     + size_t max_write_len;
4578     +
4579     + /* CFTYPE_* flags */
4580     + unsigned int flags;
4581     +
4582     + /*
4583     + * Fields used for internal bookkeeping. Initialized automatically
4584     + * during registration.
4585     + */
4586     + struct cgroup_subsys *ss; /* NULL for cgroup core files */
4587     + struct list_head node; /* anchored at ss->cfts */
4588     + struct kernfs_ops *kf_ops;
4589     +
4590     + /*
4591     + * read_u64() is a shortcut for the common case of returning a
4592     + * single integer. Use it in place of read()
4593     + */
4594     + u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
4595     + /*
4596     + * read_s64() is a signed version of read_u64()
4597     + */
4598     + s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
4599     +
4600     + /* generic seq_file read interface */
4601     + int (*seq_show)(struct seq_file *sf, void *v);
4602     +
4603     + /* optional ops, implement all or none */
4604     + void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
4605     + void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
4606     + void (*seq_stop)(struct seq_file *sf, void *v);
4607     +
4608     + /*
4609     + * write_u64() is a shortcut for the common case of accepting
4610     + * a single integer (as parsed by simple_strtoull) from
4611     + * userspace. Use in place of write(); return 0 or error.
4612     + */
4613     + int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
4614     + u64 val);
4615     + /*
4616     + * write_s64() is a signed version of write_u64()
4617     + */
4618     + int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
4619     + s64 val);
4620     +
4621     + /*
4622     + * write() is the generic write callback which maps directly to
4623     + * kernfs write operation and overrides all other operations.
4624     + * Maximum write size is determined by ->max_write_len. Use
4625     + * of_css/cft() to access the associated css and cft.
4626     + */
4627     + ssize_t (*write)(struct kernfs_open_file *of,
4628     + char *buf, size_t nbytes, loff_t off);
4629     +
4630     +#ifdef CONFIG_DEBUG_LOCK_ALLOC
4631     + struct lock_class_key lockdep_key;
4632     +#endif
4633     +};
4634     +
4635     +/*
4636     + * Control Group subsystem type.
4637     + * See Documentation/cgroups/cgroups.txt for details
4638     + */
4639     +struct cgroup_subsys {
4640     + struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
4641     + int (*css_online)(struct cgroup_subsys_state *css);
4642     + void (*css_offline)(struct cgroup_subsys_state *css);
4643     + void (*css_released)(struct cgroup_subsys_state *css);
4644     + void (*css_free)(struct cgroup_subsys_state *css);
4645     + void (*css_reset)(struct cgroup_subsys_state *css);
4646     + void (*css_e_css_changed)(struct cgroup_subsys_state *css);
4647     +
4648     + int (*can_attach)(struct cgroup_subsys_state *css,
4649     + struct cgroup_taskset *tset);
4650     + void (*cancel_attach)(struct cgroup_subsys_state *css,
4651     + struct cgroup_taskset *tset);
4652     + void (*attach)(struct cgroup_subsys_state *css,
4653     + struct cgroup_taskset *tset);
4654     + void (*fork)(struct task_struct *task);
4655     + void (*exit)(struct cgroup_subsys_state *css,
4656     + struct cgroup_subsys_state *old_css,
4657     + struct task_struct *task);
4658     + void (*bind)(struct cgroup_subsys_state *root_css);
4659     +
4660     + int disabled;
4661     + int early_init;
4662     +
4663     + /*
4664     + * If %false, this subsystem is properly hierarchical -
4665     + * configuration, resource accounting and restriction on a parent
4666     + * cgroup cover those of its children. If %true, hierarchy support
4667     + * is broken in some ways - some subsystems ignore hierarchy
4668     + * completely while others are only implemented half-way.
4669     + *
4670     + * It's now disallowed to create nested cgroups if the subsystem is
4671     + * broken and cgroup core will emit a warning message on such
4672     + * cases. Eventually, all subsystems will be made properly
4673     + * hierarchical and this will go away.
4674     + */
4675     + bool broken_hierarchy;
4676     + bool warned_broken_hierarchy;
4677     +
4678     + /* the following two fields are initialized automtically during boot */
4679     + int id;
4680     + const char *name;
4681     +
4682     + /* link to parent, protected by cgroup_lock() */
4683     + struct cgroup_root *root;
4684     +
4685     + /* idr for css->id */
4686     + struct idr css_idr;
4687     +
4688     + /*
4689     + * List of cftypes. Each entry is the first entry of an array
4690     + * terminated by zero length name.
4691     + */
4692     + struct list_head cfts;
4693     +
4694     + /*
4695     + * Base cftypes which are automatically registered. The two can
4696     + * point to the same array.
4697     + */
4698     + struct cftype *dfl_cftypes; /* for the default hierarchy */
4699     + struct cftype *legacy_cftypes; /* for the legacy hierarchies */
4700     +
4701     + /*
4702     + * A subsystem may depend on other subsystems. When such subsystem
4703     + * is enabled on a cgroup, the depended-upon subsystems are enabled
4704     + * together if available. Subsystems enabled due to dependency are
4705     + * not visible to userland until explicitly enabled. The following
4706     + * specifies the mask of subsystems that this one depends on.
4707     + */
4708     + unsigned int depends_on;
4709     +};
4710     +
4711     +#endif /* CONFIG_CGROUPS */
4712     +#endif /* _LINUX_CGROUP_DEFS_H */
4713     diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
4714     index b9cb94c3102a..96a2ecd5aa69 100644
4715     --- a/include/linux/cgroup.h
4716     +++ b/include/linux/cgroup.h
4717     @@ -11,23 +11,16 @@
4718     #include <linux/sched.h>
4719     #include <linux/cpumask.h>
4720     #include <linux/nodemask.h>
4721     -#include <linux/rcupdate.h>
4722     #include <linux/rculist.h>
4723     #include <linux/cgroupstats.h>
4724     #include <linux/rwsem.h>
4725     -#include <linux/idr.h>
4726     -#include <linux/workqueue.h>
4727     #include <linux/fs.h>
4728     -#include <linux/percpu-refcount.h>
4729     #include <linux/seq_file.h>
4730     #include <linux/kernfs.h>
4731     -#include <linux/wait.h>
4732    
4733     -#ifdef CONFIG_CGROUPS
4734     +#include <linux/cgroup-defs.h>
4735    
4736     -struct cgroup_root;
4737     -struct cgroup_subsys;
4738     -struct cgroup;
4739     +#ifdef CONFIG_CGROUPS
4740    
4741     extern int cgroup_init_early(void);
4742     extern int cgroup_init(void);
4743     @@ -40,66 +33,6 @@ extern int cgroupstats_build(struct cgroupstats *stats,
4744     extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
4745     struct pid *pid, struct task_struct *tsk);
4746    
4747     -/* define the enumeration of all cgroup subsystems */
4748     -#define SUBSYS(_x) _x ## _cgrp_id,
4749     -enum cgroup_subsys_id {
4750     -#include <linux/cgroup_subsys.h>
4751     - CGROUP_SUBSYS_COUNT,
4752     -};
4753     -#undef SUBSYS
4754     -
4755     -/*
4756     - * Per-subsystem/per-cgroup state maintained by the system. This is the
4757     - * fundamental structural building block that controllers deal with.
4758     - *
4759     - * Fields marked with "PI:" are public and immutable and may be accessed
4760     - * directly without synchronization.
4761     - */
4762     -struct cgroup_subsys_state {
4763     - /* PI: the cgroup that this css is attached to */
4764     - struct cgroup *cgroup;
4765     -
4766     - /* PI: the cgroup subsystem that this css is attached to */
4767     - struct cgroup_subsys *ss;
4768     -
4769     - /* reference count - access via css_[try]get() and css_put() */
4770     - struct percpu_ref refcnt;
4771     -
4772     - /* PI: the parent css */
4773     - struct cgroup_subsys_state *parent;
4774     -
4775     - /* siblings list anchored at the parent's ->children */
4776     - struct list_head sibling;
4777     - struct list_head children;
4778     -
4779     - /*
4780     - * PI: Subsys-unique ID. 0 is unused and root is always 1. The
4781     - * matching css can be looked up using css_from_id().
4782     - */
4783     - int id;
4784     -
4785     - unsigned int flags;
4786     -
4787     - /*
4788     - * Monotonically increasing unique serial number which defines a
4789     - * uniform order among all csses. It's guaranteed that all
4790     - * ->children lists are in the ascending order of ->serial_nr and
4791     - * used to allow interrupting and resuming iterations.
4792     - */
4793     - u64 serial_nr;
4794     -
4795     - /* percpu_ref killing and RCU release */
4796     - struct rcu_head rcu_head;
4797     - struct work_struct destroy_work;
4798     -};
4799     -
4800     -/* bits in struct cgroup_subsys_state flags field */
4801     -enum {
4802     - CSS_NO_REF = (1 << 0), /* no reference counting for this css */
4803     - CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
4804     - CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
4805     -};
4806     -
4807     /**
4808     * css_get - obtain a reference on the specified css
4809     * @css: target css
4810     @@ -185,307 +118,6 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
4811     percpu_ref_put_many(&css->refcnt, n);
4812     }
4813    
4814     -/* bits in struct cgroup flags field */
4815     -enum {
4816     - /* Control Group requires release notifications to userspace */
4817     - CGRP_NOTIFY_ON_RELEASE,
4818     - /*
4819     - * Clone the parent's configuration when creating a new child
4820     - * cpuset cgroup. For historical reasons, this option can be
4821     - * specified at mount time and thus is implemented here.
4822     - */
4823     - CGRP_CPUSET_CLONE_CHILDREN,
4824     -};
4825     -
4826     -struct cgroup {
4827     - /* self css with NULL ->ss, points back to this cgroup */
4828     - struct cgroup_subsys_state self;
4829     -
4830     - unsigned long flags; /* "unsigned long" so bitops work */
4831     -
4832     - /*
4833     - * idr allocated in-hierarchy ID.
4834     - *
4835     - * ID 0 is not used, the ID of the root cgroup is always 1, and a
4836     - * new cgroup will be assigned with a smallest available ID.
4837     - *
4838     - * Allocating/Removing ID must be protected by cgroup_mutex.
4839     - */
4840     - int id;
4841     -
4842     - /*
4843     - * If this cgroup contains any tasks, it contributes one to
4844     - * populated_cnt. All children with non-zero popuplated_cnt of
4845     - * their own contribute one. The count is zero iff there's no task
4846     - * in this cgroup or its subtree.
4847     - */
4848     - int populated_cnt;
4849     -
4850     - struct kernfs_node *kn; /* cgroup kernfs entry */
4851     - struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
4852     -
4853     - /*
4854     - * The bitmask of subsystems enabled on the child cgroups.
4855     - * ->subtree_control is the one configured through
4856     - * "cgroup.subtree_control" while ->child_subsys_mask is the
4857     - * effective one which may have more subsystems enabled.
4858     - * Controller knobs are made available iff it's enabled in
4859     - * ->subtree_control.
4860     - */
4861     - unsigned int subtree_control;
4862     - unsigned int child_subsys_mask;
4863     -
4864     - /* Private pointers for each registered subsystem */
4865     - struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
4866     -
4867     - struct cgroup_root *root;
4868     -
4869     - /*
4870     - * List of cgrp_cset_links pointing at css_sets with tasks in this
4871     - * cgroup. Protected by css_set_lock.
4872     - */
4873     - struct list_head cset_links;
4874     -
4875     - /*
4876     - * On the default hierarchy, a css_set for a cgroup with some
4877     - * susbsys disabled will point to css's which are associated with
4878     - * the closest ancestor which has the subsys enabled. The
4879     - * following lists all css_sets which point to this cgroup's css
4880     - * for the given subsystem.
4881     - */
4882     - struct list_head e_csets[CGROUP_SUBSYS_COUNT];
4883     -
4884     - /*
4885     - * list of pidlists, up to two for each namespace (one for procs, one
4886     - * for tasks); created on demand.
4887     - */
4888     - struct list_head pidlists;
4889     - struct mutex pidlist_mutex;
4890     -
4891     - /* used to wait for offlining of csses */
4892     - wait_queue_head_t offline_waitq;
4893     -
4894     - /* used to schedule release agent */
4895     - struct work_struct release_agent_work;
4896     -};
4897     -
4898     -#define MAX_CGROUP_ROOT_NAMELEN 64
4899     -
4900     -/* cgroup_root->flags */
4901     -enum {
4902     - CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
4903     - CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
4904     - CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
4905     -};
4906     -
4907     -/*
4908     - * A cgroup_root represents the root of a cgroup hierarchy, and may be
4909     - * associated with a kernfs_root to form an active hierarchy. This is
4910     - * internal to cgroup core. Don't access directly from controllers.
4911     - */
4912     -struct cgroup_root {
4913     - struct kernfs_root *kf_root;
4914     -
4915     - /* The bitmask of subsystems attached to this hierarchy */
4916     - unsigned int subsys_mask;
4917     -
4918     - /* Unique id for this hierarchy. */
4919     - int hierarchy_id;
4920     -
4921     - /* The root cgroup. Root is destroyed on its release. */
4922     - struct cgroup cgrp;
4923     -
4924     - /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
4925     - atomic_t nr_cgrps;
4926     -
4927     - /* A list running through the active hierarchies */
4928     - struct list_head root_list;
4929     -
4930     - /* Hierarchy-specific flags */
4931     - unsigned int flags;
4932     -
4933     - /* IDs for cgroups in this hierarchy */
4934     - struct idr cgroup_idr;
4935     -
4936     - /* The path to use for release notifications. */
4937     - char release_agent_path[PATH_MAX];
4938     -
4939     - /* The name for this hierarchy - may be empty */
4940     - char name[MAX_CGROUP_ROOT_NAMELEN];
4941     -};
4942     -
4943     -/*
4944     - * A css_set is a structure holding pointers to a set of
4945     - * cgroup_subsys_state objects. This saves space in the task struct
4946     - * object and speeds up fork()/exit(), since a single inc/dec and a
4947     - * list_add()/del() can bump the reference count on the entire cgroup
4948     - * set for a task.
4949     - */
4950     -
4951     -struct css_set {
4952     -
4953     - /* Reference count */
4954     - atomic_t refcount;
4955     -
4956     - /*
4957     - * List running through all cgroup groups in the same hash
4958     - * slot. Protected by css_set_lock
4959     - */
4960     - struct hlist_node hlist;
4961     -
4962     - /*
4963     - * Lists running through all tasks using this cgroup group.
4964     - * mg_tasks lists tasks which belong to this cset but are in the
4965     - * process of being migrated out or in. Protected by
4966     - * css_set_rwsem, but, during migration, once tasks are moved to
4967     - * mg_tasks, it can be read safely while holding cgroup_mutex.
4968     - */
4969     - struct list_head tasks;
4970     - struct list_head mg_tasks;
4971     -
4972     - /*
4973     - * List of cgrp_cset_links pointing at cgroups referenced from this
4974     - * css_set. Protected by css_set_lock.
4975     - */
4976     - struct list_head cgrp_links;
4977     -
4978     - /* the default cgroup associated with this css_set */
4979     - struct cgroup *dfl_cgrp;
4980     -
4981     - /*
4982     - * Set of subsystem states, one for each subsystem. This array is
4983     - * immutable after creation apart from the init_css_set during
4984     - * subsystem registration (at boot time).
4985     - */
4986     - struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
4987     -
4988     - /*
4989     - * List of csets participating in the on-going migration either as
4990     - * source or destination. Protected by cgroup_mutex.
4991     - */
4992     - struct list_head mg_preload_node;
4993     - struct list_head mg_node;
4994     -
4995     - /*
4996     - * If this cset is acting as the source of migration the following
4997     - * two fields are set. mg_src_cgrp is the source cgroup of the
4998     - * on-going migration and mg_dst_cset is the destination cset the
4999     - * target tasks on this cset should be migrated to. Protected by
5000     - * cgroup_mutex.
5001     - */
5002     - struct cgroup *mg_src_cgrp;
5003     - struct css_set *mg_dst_cset;
5004     -
5005     - /*
5006     - * On the default hierarhcy, ->subsys[ssid] may point to a css
5007     - * attached to an ancestor instead of the cgroup this css_set is
5008     - * associated with. The following node is anchored at
5009     - * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
5010     - * iterate through all css's attached to a given cgroup.
5011     - */
5012     - struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
5013     -
5014     - /* For RCU-protected deletion */
5015     - struct rcu_head rcu_head;
5016     -};
5017     -
5018     -/*
5019     - * struct cftype: handler definitions for cgroup control files
5020     - *
5021     - * When reading/writing to a file:
5022     - * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
5023     - * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
5024     - */
5025     -
5026     -/* cftype->flags */
5027     -enum {
5028     - CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
5029     - CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
5030     - CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
5031     -
5032     - /* internal flags, do not use outside cgroup core proper */
5033     - __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
5034     - __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
5035     -};
5036     -
5037     -#define MAX_CFTYPE_NAME 64
5038     -
5039     -struct cftype {
5040     - /*
5041     - * By convention, the name should begin with the name of the
5042     - * subsystem, followed by a period. Zero length string indicates
5043     - * end of cftype array.
5044     - */
5045     - char name[MAX_CFTYPE_NAME];
5046     - int private;
5047     - /*
5048     - * If not 0, file mode is set to this value, otherwise it will
5049     - * be figured out automatically
5050     - */
5051     - umode_t mode;
5052     -
5053     - /*
5054     - * The maximum length of string, excluding trailing nul, that can
5055     - * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
5056     - */
5057     - size_t max_write_len;
5058     -
5059     - /* CFTYPE_* flags */
5060     - unsigned int flags;
5061     -
5062     - /*
5063     - * Fields used for internal bookkeeping. Initialized automatically
5064     - * during registration.
5065     - */
5066     - struct cgroup_subsys *ss; /* NULL for cgroup core files */
5067     - struct list_head node; /* anchored at ss->cfts */
5068     - struct kernfs_ops *kf_ops;
5069     -
5070     - /*
5071     - * read_u64() is a shortcut for the common case of returning a
5072     - * single integer. Use it in place of read()
5073     - */
5074     - u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
5075     - /*
5076     - * read_s64() is a signed version of read_u64()
5077     - */
5078     - s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
5079     -
5080     - /* generic seq_file read interface */
5081     - int (*seq_show)(struct seq_file *sf, void *v);
5082     -
5083     - /* optional ops, implement all or none */
5084     - void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
5085     - void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
5086     - void (*seq_stop)(struct seq_file *sf, void *v);
5087     -
5088     - /*
5089     - * write_u64() is a shortcut for the common case of accepting
5090     - * a single integer (as parsed by simple_strtoull) from
5091     - * userspace. Use in place of write(); return 0 or error.
5092     - */
5093     - int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
5094     - u64 val);
5095     - /*
5096     - * write_s64() is a signed version of write_u64()
5097     - */
5098     - int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
5099     - s64 val);
5100     -
5101     - /*
5102     - * write() is the generic write callback which maps directly to
5103     - * kernfs write operation and overrides all other operations.
5104     - * Maximum write size is determined by ->max_write_len. Use
5105     - * of_css/cft() to access the associated css and cft.
5106     - */
5107     - ssize_t (*write)(struct kernfs_open_file *of,
5108     - char *buf, size_t nbytes, loff_t off);
5109     -
5110     -#ifdef CONFIG_DEBUG_LOCK_ALLOC
5111     - struct lock_class_key lockdep_key;
5112     -#endif
5113     -};
5114     -
5115     extern struct cgroup_root cgrp_dfl_root;
5116     extern struct css_set init_css_set;
5117    
5118     @@ -612,11 +244,6 @@ int cgroup_rm_cftypes(struct cftype *cfts);
5119    
5120     bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
5121    
5122     -/*
5123     - * Control Group taskset, used to pass around set of tasks to cgroup_subsys
5124     - * methods.
5125     - */
5126     -struct cgroup_taskset;
5127     struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
5128     struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
5129    
5130     @@ -629,84 +256,6 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
5131     for ((task) = cgroup_taskset_first((tset)); (task); \
5132     (task) = cgroup_taskset_next((tset)))
5133    
5134     -/*
5135     - * Control Group subsystem type.
5136     - * See Documentation/cgroups/cgroups.txt for details
5137     - */
5138     -
5139     -struct cgroup_subsys {
5140     - struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
5141     - int (*css_online)(struct cgroup_subsys_state *css);
5142     - void (*css_offline)(struct cgroup_subsys_state *css);
5143     - void (*css_released)(struct cgroup_subsys_state *css);
5144     - void (*css_free)(struct cgroup_subsys_state *css);
5145     - void (*css_reset)(struct cgroup_subsys_state *css);
5146     - void (*css_e_css_changed)(struct cgroup_subsys_state *css);
5147     -
5148     - int (*can_attach)(struct cgroup_subsys_state *css,
5149     - struct cgroup_taskset *tset);
5150     - void (*cancel_attach)(struct cgroup_subsys_state *css,
5151     - struct cgroup_taskset *tset);
5152     - void (*attach)(struct cgroup_subsys_state *css,
5153     - struct cgroup_taskset *tset);
5154     - void (*fork)(struct task_struct *task);
5155     - void (*exit)(struct cgroup_subsys_state *css,
5156     - struct cgroup_subsys_state *old_css,
5157     - struct task_struct *task);
5158     - void (*bind)(struct cgroup_subsys_state *root_css);
5159     -
5160     - int disabled;
5161     - int early_init;
5162     -
5163     - /*
5164     - * If %false, this subsystem is properly hierarchical -
5165     - * configuration, resource accounting and restriction on a parent
5166     - * cgroup cover those of its children. If %true, hierarchy support
5167     - * is broken in some ways - some subsystems ignore hierarchy
5168     - * completely while others are only implemented half-way.
5169     - *
5170     - * It's now disallowed to create nested cgroups if the subsystem is
5171     - * broken and cgroup core will emit a warning message on such
5172     - * cases. Eventually, all subsystems will be made properly
5173     - * hierarchical and this will go away.
5174     - */
5175     - bool broken_hierarchy;
5176     - bool warned_broken_hierarchy;
5177     -
5178     - /* the following two fields are initialized automtically during boot */
5179     - int id;
5180     -#define MAX_CGROUP_TYPE_NAMELEN 32
5181     - const char *name;
5182     -
5183     - /* link to parent, protected by cgroup_lock() */
5184     - struct cgroup_root *root;
5185     -
5186     - /* idr for css->id */
5187     - struct idr css_idr;
5188     -
5189     - /*
5190     - * List of cftypes. Each entry is the first entry of an array
5191     - * terminated by zero length name.
5192     - */
5193     - struct list_head cfts;
5194     -
5195     - /*
5196     - * Base cftypes which are automatically registered. The two can
5197     - * point to the same array.
5198     - */
5199     - struct cftype *dfl_cftypes; /* for the default hierarchy */
5200     - struct cftype *legacy_cftypes; /* for the legacy hierarchies */
5201     -
5202     - /*
5203     - * A subsystem may depend on other subsystems. When such subsystem
5204     - * is enabled on a cgroup, the depended-upon subsystems are enabled
5205     - * together if available. Subsystems enabled due to dependency are
5206     - * not visible to userland until explicitly enabled. The following
5207     - * specifies the mask of subsystems that this one depends on.
5208     - */
5209     - unsigned int depends_on;
5210     -};
5211     -
5212     #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
5213     #include <linux/cgroup_subsys.h>
5214     #undef SUBSYS
5215     diff --git a/include/linux/compiler.h b/include/linux/compiler.h
5216     index 867722591be2..99728072e536 100644
5217     --- a/include/linux/compiler.h
5218     +++ b/include/linux/compiler.h
5219     @@ -142,7 +142,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
5220     */
5221     #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
5222     #define __trace_if(cond) \
5223     - if (__builtin_constant_p((cond)) ? !!(cond) : \
5224     + if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
5225     ({ \
5226     int ______r; \
5227     static struct ftrace_branch_data \
5228     diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
5229     index 251a2090a554..e0ee0b3000b2 100644
5230     --- a/include/linux/devpts_fs.h
5231     +++ b/include/linux/devpts_fs.h
5232     @@ -19,6 +19,8 @@
5233    
5234     int devpts_new_index(struct inode *ptmx_inode);
5235     void devpts_kill_index(struct inode *ptmx_inode, int idx);
5236     +void devpts_add_ref(struct inode *ptmx_inode);
5237     +void devpts_del_ref(struct inode *ptmx_inode);
5238     /* mknod in devpts */
5239     struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
5240     void *priv);
5241     @@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
5242     /* Dummy stubs in the no-pty case */
5243     static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
5244     static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
5245     +static inline void devpts_add_ref(struct inode *ptmx_inode) { }
5246     +static inline void devpts_del_ref(struct inode *ptmx_inode) { }
5247     static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
5248     dev_t device, int index, void *priv)
5249     {
5250     diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
5251     index e4b464983322..01c25923675b 100644
5252     --- a/include/linux/ipv6.h
5253     +++ b/include/linux/ipv6.h
5254     @@ -29,6 +29,7 @@ struct ipv6_devconf {
5255     __s32 max_desync_factor;
5256     __s32 max_addresses;
5257     __s32 accept_ra_defrtr;
5258     + __s32 accept_ra_min_hop_limit;
5259     __s32 accept_ra_pinfo;
5260     #ifdef CONFIG_IPV6_ROUTER_PREF
5261     __s32 accept_ra_rtr_pref;
5262     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
5263     index 1f17abe23725..6633b0cd3fb9 100644
5264     --- a/include/linux/skbuff.h
5265     +++ b/include/linux/skbuff.h
5266     @@ -203,6 +203,7 @@ struct sk_buff;
5267     #else
5268     #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
5269     #endif
5270     +extern int sysctl_max_skb_frags;
5271    
5272     typedef struct skb_frag_struct skb_frag_t;
5273    
5274     diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
5275     index a5f7f3ecafa3..a6e1bca88cc6 100644
5276     --- a/include/linux/tracepoint.h
5277     +++ b/include/linux/tracepoint.h
5278     @@ -14,8 +14,10 @@
5279     * See the file COPYING for more details.
5280     */
5281    
5282     +#include <linux/smp.h>
5283     #include <linux/errno.h>
5284     #include <linux/types.h>
5285     +#include <linux/cpumask.h>
5286     #include <linux/rcupdate.h>
5287     #include <linux/static_key.h>
5288    
5289     @@ -129,6 +131,9 @@ extern void syscall_unregfunc(void);
5290     void *it_func; \
5291     void *__data; \
5292     \
5293     + if (!cpu_online(raw_smp_processor_id())) \
5294     + return; \
5295     + \
5296     if (!(cond)) \
5297     return; \
5298     prercu; \
5299     diff --git a/include/net/af_unix.h b/include/net/af_unix.h
5300     index e830c3dff61a..7bb69c9c3c43 100644
5301     --- a/include/net/af_unix.h
5302     +++ b/include/net/af_unix.h
5303     @@ -6,8 +6,8 @@
5304     #include <linux/mutex.h>
5305     #include <net/sock.h>
5306    
5307     -void unix_inflight(struct file *fp);
5308     -void unix_notinflight(struct file *fp);
5309     +void unix_inflight(struct user_struct *user, struct file *fp);
5310     +void unix_notinflight(struct user_struct *user, struct file *fp);
5311     void unix_gc(void);
5312     void wait_for_unix_gc(void);
5313     struct sock *unix_get_socket(struct file *filp);
5314     diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
5315     index 5e192068e6cb..388dea4da083 100644
5316     --- a/include/net/ip6_route.h
5317     +++ b/include/net/ip6_route.h
5318     @@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
5319    
5320     void ip6_route_input(struct sk_buff *skb);
5321    
5322     -struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
5323     - struct flowi6 *fl6);
5324     +struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
5325     + struct flowi6 *fl6, int flags);
5326     +
5327     +static inline struct dst_entry *ip6_route_output(struct net *net,
5328     + const struct sock *sk,
5329     + struct flowi6 *fl6)
5330     +{
5331     + return ip6_route_output_flags(net, sk, fl6, 0);
5332     +}
5333     +
5334     struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
5335     int flags);
5336    
5337     diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
5338     index 54271ed0ed45..13f1a97f6b2b 100644
5339     --- a/include/net/ip_fib.h
5340     +++ b/include/net/ip_fib.h
5341     @@ -59,6 +59,7 @@ struct fib_nh_exception {
5342     struct rtable __rcu *fnhe_rth_input;
5343     struct rtable __rcu *fnhe_rth_output;
5344     unsigned long fnhe_stamp;
5345     + struct rcu_head rcu;
5346     };
5347    
5348     struct fnhe_hash_bucket {
5349     diff --git a/include/net/scm.h b/include/net/scm.h
5350     index 262532d111f5..59fa93c01d2a 100644
5351     --- a/include/net/scm.h
5352     +++ b/include/net/scm.h
5353     @@ -21,6 +21,7 @@ struct scm_creds {
5354     struct scm_fp_list {
5355     short count;
5356     short max;
5357     + struct user_struct *user;
5358     struct file *fp[SCM_MAX_FD];
5359     };
5360    
5361     diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
5362     index 0f4dc3768587..24c8d9d0d946 100644
5363     --- a/include/target/target_core_fabric.h
5364     +++ b/include/target/target_core_fabric.h
5365     @@ -155,8 +155,8 @@ bool transport_wait_for_tasks(struct se_cmd *);
5366     int transport_check_aborted_status(struct se_cmd *, int);
5367     int transport_send_check_condition_and_sense(struct se_cmd *,
5368     sense_reason_t, int);
5369     -int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
5370     -int target_put_sess_cmd(struct se_session *, struct se_cmd *);
5371     +int target_get_sess_cmd(struct se_cmd *, bool);
5372     +int target_put_sess_cmd(struct se_cmd *);
5373     void target_sess_cmd_list_set_waiting(struct se_session *);
5374     void target_wait_for_sess_cmds(struct se_session *);
5375    
5376     diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
5377     index 5efa54ae567c..80f3b74446a1 100644
5378     --- a/include/uapi/linux/ipv6.h
5379     +++ b/include/uapi/linux/ipv6.h
5380     @@ -171,6 +171,8 @@ enum {
5381     DEVCONF_USE_OPTIMISTIC,
5382     DEVCONF_ACCEPT_RA_MTU,
5383     DEVCONF_STABLE_SECRET,
5384     + DEVCONF_USE_OIF_ADDRS_ONLY,
5385     + DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
5386     DEVCONF_MAX
5387     };
5388    
5389     diff --git a/ipc/msgutil.c b/ipc/msgutil.c
5390     index 2b491590ebab..71f448e5e927 100644
5391     --- a/ipc/msgutil.c
5392     +++ b/ipc/msgutil.c
5393     @@ -123,7 +123,7 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst)
5394     size_t len = src->m_ts;
5395     size_t alen;
5396    
5397     - BUG_ON(dst == NULL);
5398     + WARN_ON(dst == NULL);
5399     if (src->m_ts > dst->m_ts)
5400     return ERR_PTR(-EINVAL);
5401    
5402     diff --git a/ipc/shm.c b/ipc/shm.c
5403     index 499a8bd22fad..bbe5f62f2b12 100644
5404     --- a/ipc/shm.c
5405     +++ b/ipc/shm.c
5406     @@ -155,9 +155,13 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
5407     {
5408     struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
5409    
5410     + /*
5411     + * Callers of shm_lock() must validate the status of the returned ipc
5412     + * object pointer (as returned by ipc_lock()), and error out as
5413     + * appropriate.
5414     + */
5415     if (IS_ERR(ipcp))
5416     - return (struct shmid_kernel *)ipcp;
5417     -
5418     + return (void *)ipcp;
5419     return container_of(ipcp, struct shmid_kernel, shm_perm);
5420     }
5421    
5422     @@ -183,19 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
5423     }
5424    
5425    
5426     -/* This is called by fork, once for every shm attach. */
5427     -static void shm_open(struct vm_area_struct *vma)
5428     +static int __shm_open(struct vm_area_struct *vma)
5429     {
5430     struct file *file = vma->vm_file;
5431     struct shm_file_data *sfd = shm_file_data(file);
5432     struct shmid_kernel *shp;
5433    
5434     shp = shm_lock(sfd->ns, sfd->id);
5435     - BUG_ON(IS_ERR(shp));
5436     +
5437     + if (IS_ERR(shp))
5438     + return PTR_ERR(shp);
5439     +
5440     shp->shm_atim = get_seconds();
5441     shp->shm_lprid = task_tgid_vnr(current);
5442     shp->shm_nattch++;
5443     shm_unlock(shp);
5444     + return 0;
5445     +}
5446     +
5447     +/* This is called by fork, once for every shm attach. */
5448     +static void shm_open(struct vm_area_struct *vma)
5449     +{
5450     + int err = __shm_open(vma);
5451     + /*
5452     + * We raced in the idr lookup or with shm_destroy().
5453     + * Either way, the ID is busted.
5454     + */
5455     + WARN_ON_ONCE(err);
5456     }
5457    
5458     /*
5459     @@ -258,7 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
5460     down_write(&shm_ids(ns).rwsem);
5461     /* remove from the list of attaches of the shm segment */
5462     shp = shm_lock(ns, sfd->id);
5463     - BUG_ON(IS_ERR(shp));
5464     +
5465     + /*
5466     + * We raced in the idr lookup or with shm_destroy().
5467     + * Either way, the ID is busted.
5468     + */
5469     + if (WARN_ON_ONCE(IS_ERR(shp)))
5470     + goto done; /* no-op */
5471     +
5472     shp->shm_lprid = task_tgid_vnr(current);
5473     shp->shm_dtim = get_seconds();
5474     shp->shm_nattch--;
5475     @@ -266,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
5476     shm_destroy(ns, shp);
5477     else
5478     shm_unlock(shp);
5479     +done:
5480     up_write(&shm_ids(ns).rwsem);
5481     }
5482    
5483     @@ -387,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
5484     struct shm_file_data *sfd = shm_file_data(file);
5485     int ret;
5486    
5487     + /*
5488     + * In case of remap_file_pages() emulation, the file can represent
5489     + * removed IPC ID: propogate shm_lock() error to caller.
5490     + */
5491     + ret =__shm_open(vma);
5492     + if (ret)
5493     + return ret;
5494     +
5495     ret = sfd->file->f_op->mmap(sfd->file, vma);
5496     - if (ret != 0)
5497     + if (ret) {
5498     + shm_close(vma);
5499     return ret;
5500     + }
5501     sfd->vm_ops = vma->vm_ops;
5502     #ifdef CONFIG_MMU
5503     - BUG_ON(!sfd->vm_ops->fault);
5504     + WARN_ON(!sfd->vm_ops->fault);
5505     #endif
5506     vma->vm_ops = &shm_vm_ops;
5507     - shm_open(vma);
5508     -
5509     - return ret;
5510     + return 0;
5511     }
5512    
5513     static int shm_release(struct inode *ino, struct file *file)
5514     @@ -1192,7 +1226,6 @@ out_fput:
5515     out_nattch:
5516     down_write(&shm_ids(ns).rwsem);
5517     shp = shm_lock(ns, shmid);
5518     - BUG_ON(IS_ERR(shp));
5519     shp->shm_nattch--;
5520     if (shm_may_destroy(ns, shp))
5521     shm_destroy(ns, shp);
5522     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
5523     index 141d562064a7..6582410a71c7 100644
5524     --- a/kernel/bpf/verifier.c
5525     +++ b/kernel/bpf/verifier.c
5526     @@ -1944,7 +1944,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
5527     /* adjust offset of jmps if necessary */
5528     if (i < pos && i + insn->off + 1 > pos)
5529     insn->off += delta;
5530     - else if (i > pos && i + insn->off + 1 < pos)
5531     + else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
5532     insn->off -= delta;
5533     }
5534     }
5535     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
5536     index 4d65b66ae60d..359da3abb004 100644
5537     --- a/kernel/cgroup.c
5538     +++ b/kernel/cgroup.c
5539     @@ -4481,6 +4481,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
5540     INIT_LIST_HEAD(&css->sibling);
5541     INIT_LIST_HEAD(&css->children);
5542     css->serial_nr = css_serial_nr_next++;
5543     + atomic_set(&css->online_cnt, 0);
5544    
5545     if (cgroup_parent(cgrp)) {
5546     css->parent = cgroup_css(cgroup_parent(cgrp), ss);
5547     @@ -4503,6 +4504,10 @@ static int online_css(struct cgroup_subsys_state *css)
5548     if (!ret) {
5549     css->flags |= CSS_ONLINE;
5550     rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
5551     +
5552     + atomic_inc(&css->online_cnt);
5553     + if (css->parent)
5554     + atomic_inc(&css->parent->online_cnt);
5555     }
5556     return ret;
5557     }
5558     @@ -4740,10 +4745,15 @@ static void css_killed_work_fn(struct work_struct *work)
5559     container_of(work, struct cgroup_subsys_state, destroy_work);
5560    
5561     mutex_lock(&cgroup_mutex);
5562     - offline_css(css);
5563     - mutex_unlock(&cgroup_mutex);
5564    
5565     - css_put(css);
5566     + do {
5567     + offline_css(css);
5568     + css_put(css);
5569     + /* @css can't go away while we're holding cgroup_mutex */
5570     + css = css->parent;
5571     + } while (css && atomic_dec_and_test(&css->online_cnt));
5572     +
5573     + mutex_unlock(&cgroup_mutex);
5574     }
5575    
5576     /* css kill confirmation processing requires process context, bounce */
5577     @@ -4752,8 +4762,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
5578     struct cgroup_subsys_state *css =
5579     container_of(ref, struct cgroup_subsys_state, refcnt);
5580    
5581     - INIT_WORK(&css->destroy_work, css_killed_work_fn);
5582     - queue_work(cgroup_destroy_wq, &css->destroy_work);
5583     + if (atomic_dec_and_test(&css->online_cnt)) {
5584     + INIT_WORK(&css->destroy_work, css_killed_work_fn);
5585     + queue_work(cgroup_destroy_wq, &css->destroy_work);
5586     + }
5587     }
5588    
5589     /**
5590     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
5591     index 5c01664c26e2..6d631161705c 100644
5592     --- a/kernel/workqueue.c
5593     +++ b/kernel/workqueue.c
5594     @@ -127,6 +127,11 @@ enum {
5595     *
5596     * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
5597     *
5598     + * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
5599     + *
5600     + * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
5601     + * sched-RCU for reads.
5602     + *
5603     * WQ: wq->mutex protected.
5604     *
5605     * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
5606     @@ -247,8 +252,8 @@ struct workqueue_struct {
5607     int nr_drainers; /* WQ: drain in progress */
5608     int saved_max_active; /* WQ: saved pwq max_active */
5609    
5610     - struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */
5611     - struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */
5612     + struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
5613     + struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
5614    
5615     #ifdef CONFIG_SYSFS
5616     struct wq_device *wq_dev; /* I: for sysfs interface */
5617     @@ -268,7 +273,7 @@ struct workqueue_struct {
5618     /* hot fields used during command issue, aligned to cacheline */
5619     unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
5620     struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
5621     - struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
5622     + struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
5623     };
5624    
5625     static struct kmem_cache *pwq_cache;
5626     @@ -347,6 +352,12 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
5627     lockdep_is_held(&wq->mutex), \
5628     "sched RCU or wq->mutex should be held")
5629    
5630     +#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
5631     + rcu_lockdep_assert(rcu_read_lock_sched_held() || \
5632     + lockdep_is_held(&wq->mutex) || \
5633     + lockdep_is_held(&wq_pool_mutex), \
5634     + "sched RCU, wq->mutex or wq_pool_mutex should be held")
5635     +
5636     #define for_each_cpu_worker_pool(pool, cpu) \
5637     for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
5638     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
5639     @@ -551,7 +562,8 @@ static int worker_pool_assign_id(struct worker_pool *pool)
5640     * @wq: the target workqueue
5641     * @node: the node ID
5642     *
5643     - * This must be called either with pwq_lock held or sched RCU read locked.
5644     + * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
5645     + * read locked.
5646     * If the pwq needs to be used beyond the locking in effect, the caller is
5647     * responsible for guaranteeing that the pwq stays online.
5648     *
5649     @@ -560,7 +572,17 @@ static int worker_pool_assign_id(struct worker_pool *pool)
5650     static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
5651     int node)
5652     {
5653     - assert_rcu_or_wq_mutex(wq);
5654     + assert_rcu_or_wq_mutex_or_pool_mutex(wq);
5655     +
5656     + /*
5657     + * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
5658     + * delayed item is pending. The plan is to keep CPU -> NODE
5659     + * mapping valid and stable across CPU on/offlines. Once that
5660     + * happens, this workaround can be removed.
5661     + */
5662     + if (unlikely(node == NUMA_NO_NODE))
5663     + return wq->dfl_pwq;
5664     +
5665     return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
5666     }
5667    
5668     @@ -1451,13 +1473,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
5669     timer_stats_timer_set_start_info(&dwork->timer);
5670    
5671     dwork->wq = wq;
5672     - /* timer isn't guaranteed to run in this cpu, record earlier */
5673     - if (cpu == WORK_CPU_UNBOUND)
5674     - cpu = raw_smp_processor_id();
5675     dwork->cpu = cpu;
5676     timer->expires = jiffies + delay;
5677    
5678     - add_timer_on(timer, cpu);
5679     + if (unlikely(cpu != WORK_CPU_UNBOUND))
5680     + add_timer_on(timer, cpu);
5681     + else
5682     + add_timer(timer);
5683     }
5684    
5685     /**
5686     @@ -3425,17 +3447,6 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
5687     return pwq;
5688     }
5689    
5690     -/* undo alloc_unbound_pwq(), used only in the error path */
5691     -static void free_unbound_pwq(struct pool_workqueue *pwq)
5692     -{
5693     - lockdep_assert_held(&wq_pool_mutex);
5694     -
5695     - if (pwq) {
5696     - put_unbound_pool(pwq->pool);
5697     - kmem_cache_free(pwq_cache, pwq);
5698     - }
5699     -}
5700     -
5701     /**
5702     * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
5703     * @attrs: the wq_attrs of interest
5704     @@ -3488,6 +3499,7 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
5705     {
5706     struct pool_workqueue *old_pwq;
5707    
5708     + lockdep_assert_held(&wq_pool_mutex);
5709     lockdep_assert_held(&wq->mutex);
5710    
5711     /* link_pwq() can handle duplicate calls */
5712     @@ -3498,42 +3510,48 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
5713     return old_pwq;
5714     }
5715    
5716     -/**
5717     - * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
5718     - * @wq: the target workqueue
5719     - * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
5720     - *
5721     - * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
5722     - * machines, this function maps a separate pwq to each NUMA node with
5723     - * possibles CPUs in @attrs->cpumask so that work items are affine to the
5724     - * NUMA node it was issued on. Older pwqs are released as in-flight work
5725     - * items finish. Note that a work item which repeatedly requeues itself
5726     - * back-to-back will stay on its current pwq.
5727     - *
5728     - * Performs GFP_KERNEL allocations.
5729     - *
5730     - * Return: 0 on success and -errno on failure.
5731     - */
5732     -int apply_workqueue_attrs(struct workqueue_struct *wq,
5733     - const struct workqueue_attrs *attrs)
5734     +/* context to store the prepared attrs & pwqs before applying */
5735     +struct apply_wqattrs_ctx {
5736     + struct workqueue_struct *wq; /* target workqueue */
5737     + struct workqueue_attrs *attrs; /* attrs to apply */
5738     + struct pool_workqueue *dfl_pwq;
5739     + struct pool_workqueue *pwq_tbl[];
5740     +};
5741     +
5742     +/* free the resources after success or abort */
5743     +static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
5744     {
5745     + if (ctx) {
5746     + int node;
5747     +
5748     + for_each_node(node)
5749     + put_pwq_unlocked(ctx->pwq_tbl[node]);
5750     + put_pwq_unlocked(ctx->dfl_pwq);
5751     +
5752     + free_workqueue_attrs(ctx->attrs);
5753     +
5754     + kfree(ctx);
5755     + }
5756     +}
5757     +
5758     +/* allocate the attrs and pwqs for later installation */
5759     +static struct apply_wqattrs_ctx *
5760     +apply_wqattrs_prepare(struct workqueue_struct *wq,
5761     + const struct workqueue_attrs *attrs)
5762     +{
5763     + struct apply_wqattrs_ctx *ctx;
5764     struct workqueue_attrs *new_attrs, *tmp_attrs;
5765     - struct pool_workqueue **pwq_tbl, *dfl_pwq;
5766     - int node, ret;
5767     + int node;
5768    
5769     - /* only unbound workqueues can change attributes */
5770     - if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
5771     - return -EINVAL;
5772     + lockdep_assert_held(&wq_pool_mutex);
5773    
5774     - /* creating multiple pwqs breaks ordering guarantee */
5775     - if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
5776     - return -EINVAL;
5777     + ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
5778     + GFP_KERNEL);
5779    
5780     - pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
5781     new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
5782     tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
5783     - if (!pwq_tbl || !new_attrs || !tmp_attrs)
5784     - goto enomem;
5785     + if (!ctx || !new_attrs || !tmp_attrs)
5786     + goto out_free;
5787    
5788     /* make a copy of @attrs and sanitize it */
5789     copy_workqueue_attrs(new_attrs, attrs);
5790     @@ -3547,75 +3565,111 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
5791     copy_workqueue_attrs(tmp_attrs, new_attrs);
5792    
5793     /*
5794     - * CPUs should stay stable across pwq creations and installations.
5795     - * Pin CPUs, determine the target cpumask for each node and create
5796     - * pwqs accordingly.
5797     - */
5798     - get_online_cpus();
5799     -
5800     - mutex_lock(&wq_pool_mutex);
5801     -
5802     - /*
5803     * If something goes wrong during CPU up/down, we'll fall back to
5804     * the default pwq covering whole @attrs->cpumask. Always create
5805     * it even if we don't use it immediately.
5806     */
5807     - dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
5808     - if (!dfl_pwq)
5809     - goto enomem_pwq;
5810     + ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
5811     + if (!ctx->dfl_pwq)
5812     + goto out_free;
5813    
5814     for_each_node(node) {
5815     if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
5816     - pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
5817     - if (!pwq_tbl[node])
5818     - goto enomem_pwq;
5819     + ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
5820     + if (!ctx->pwq_tbl[node])
5821     + goto out_free;
5822     } else {
5823     - dfl_pwq->refcnt++;
5824     - pwq_tbl[node] = dfl_pwq;
5825     + ctx->dfl_pwq->refcnt++;
5826     + ctx->pwq_tbl[node] = ctx->dfl_pwq;
5827     }
5828     }
5829    
5830     - mutex_unlock(&wq_pool_mutex);
5831     + ctx->attrs = new_attrs;
5832     + ctx->wq = wq;
5833     + free_workqueue_attrs(tmp_attrs);
5834     + return ctx;
5835     +
5836     +out_free:
5837     + free_workqueue_attrs(tmp_attrs);
5838     + free_workqueue_attrs(new_attrs);
5839     + apply_wqattrs_cleanup(ctx);
5840     + return NULL;
5841     +}
5842     +
5843     +/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
5844     +static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
5845     +{
5846     + int node;
5847    
5848     /* all pwqs have been created successfully, let's install'em */
5849     - mutex_lock(&wq->mutex);
5850     + mutex_lock(&ctx->wq->mutex);
5851    
5852     - copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
5853     + copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
5854    
5855     /* save the previous pwq and install the new one */
5856     for_each_node(node)
5857     - pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
5858     + ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
5859     + ctx->pwq_tbl[node]);
5860    
5861     /* @dfl_pwq might not have been used, ensure it's linked */
5862     - link_pwq(dfl_pwq);
5863     - swap(wq->dfl_pwq, dfl_pwq);
5864     + link_pwq(ctx->dfl_pwq);
5865     + swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
5866    
5867     - mutex_unlock(&wq->mutex);
5868     + mutex_unlock(&ctx->wq->mutex);
5869     +}
5870    
5871     - /* put the old pwqs */
5872     - for_each_node(node)
5873     - put_pwq_unlocked(pwq_tbl[node]);
5874     - put_pwq_unlocked(dfl_pwq);
5875     +/**
5876     + * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
5877     + * @wq: the target workqueue
5878     + * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
5879     + *
5880     + * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
5881     + * machines, this function maps a separate pwq to each NUMA node with
5882     + * possibles CPUs in @attrs->cpumask so that work items are affine to the
5883     + * NUMA node it was issued on. Older pwqs are released as in-flight work
5884     + * items finish. Note that a work item which repeatedly requeues itself
5885     + * back-to-back will stay on its current pwq.
5886     + *
5887     + * Performs GFP_KERNEL allocations.
5888     + *
5889     + * Return: 0 on success and -errno on failure.
5890     + */
5891     +int apply_workqueue_attrs(struct workqueue_struct *wq,
5892     + const struct workqueue_attrs *attrs)
5893     +{
5894     + struct apply_wqattrs_ctx *ctx;
5895     + int ret = -ENOMEM;
5896    
5897     - put_online_cpus();
5898     - ret = 0;
5899     - /* fall through */
5900     -out_free:
5901     - free_workqueue_attrs(tmp_attrs);
5902     - free_workqueue_attrs(new_attrs);
5903     - kfree(pwq_tbl);
5904     - return ret;
5905     + /* only unbound workqueues can change attributes */
5906     + if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
5907     + return -EINVAL;
5908     +
5909     + /* creating multiple pwqs breaks ordering guarantee */
5910     + if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
5911     + return -EINVAL;
5912     +
5913     + /*
5914     + * CPUs should stay stable across pwq creations and installations.
5915     + * Pin CPUs, determine the target cpumask for each node and create
5916     + * pwqs accordingly.
5917     + */
5918     + get_online_cpus();
5919     + mutex_lock(&wq_pool_mutex);
5920     +
5921     + ctx = apply_wqattrs_prepare(wq, attrs);
5922     +
5923     + /* the ctx has been prepared successfully, let's commit it */
5924     + if (ctx) {
5925     + apply_wqattrs_commit(ctx);
5926     + ret = 0;
5927     + }
5928    
5929     -enomem_pwq:
5930     - free_unbound_pwq(dfl_pwq);
5931     - for_each_node(node)
5932     - if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
5933     - free_unbound_pwq(pwq_tbl[node]);
5934     mutex_unlock(&wq_pool_mutex);
5935     put_online_cpus();
5936     -enomem:
5937     - ret = -ENOMEM;
5938     - goto out_free;
5939     +
5940     + apply_wqattrs_cleanup(ctx);
5941     +
5942     + return ret;
5943     }
5944    
5945     /**
5946     diff --git a/lib/klist.c b/lib/klist.c
5947     index 89b485a2a58d..2a072bfaeace 100644
5948     --- a/lib/klist.c
5949     +++ b/lib/klist.c
5950     @@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
5951     struct klist_node *n)
5952     {
5953     i->i_klist = k;
5954     - i->i_cur = n;
5955     - if (n)
5956     - kref_get(&n->n_ref);
5957     + i->i_cur = NULL;
5958     + if (n && kref_get_unless_zero(&n->n_ref))
5959     + i->i_cur = n;
5960     }
5961     EXPORT_SYMBOL_GPL(klist_iter_init_node);
5962    
5963     diff --git a/mm/mmap.c b/mm/mmap.c
5964     index b639fa2721d8..d30b8f8f02b1 100644
5965     --- a/mm/mmap.c
5966     +++ b/mm/mmap.c
5967     @@ -2654,12 +2654,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
5968     if (!vma || !(vma->vm_flags & VM_SHARED))
5969     goto out;
5970    
5971     - if (start < vma->vm_start || start + size > vma->vm_end)
5972     + if (start < vma->vm_start)
5973     goto out;
5974    
5975     - if (pgoff == linear_page_index(vma, start)) {
5976     - ret = 0;
5977     - goto out;
5978     + if (start + size > vma->vm_end) {
5979     + struct vm_area_struct *next;
5980     +
5981     + for (next = vma->vm_next; next; next = next->vm_next) {
5982     + /* hole between vmas ? */
5983     + if (next->vm_start != next->vm_prev->vm_end)
5984     + goto out;
5985     +
5986     + if (next->vm_file != vma->vm_file)
5987     + goto out;
5988     +
5989     + if (next->vm_flags != vma->vm_flags)
5990     + goto out;
5991     +
5992     + if (start + size <= next->vm_end)
5993     + break;
5994     + }
5995     +
5996     + if (!next)
5997     + goto out;
5998     }
5999    
6000     prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
6001     @@ -2669,9 +2686,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
6002     flags &= MAP_NONBLOCK;
6003     flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
6004     if (vma->vm_flags & VM_LOCKED) {
6005     + struct vm_area_struct *tmp;
6006     flags |= MAP_LOCKED;
6007     +
6008     /* drop PG_Mlocked flag for over-mapped range */
6009     - munlock_vma_pages_range(vma, start, start + size);
6010     + for (tmp = vma; tmp->vm_start >= start + size;
6011     + tmp = tmp->vm_next) {
6012     + munlock_vma_pages_range(tmp,
6013     + max(tmp->vm_start, start),
6014     + min(tmp->vm_end, start + size));
6015     + }
6016     }
6017    
6018     file = get_file(vma->vm_file);
6019     diff --git a/net/bridge/br.c b/net/bridge/br.c
6020     index 02c24cf63c34..c72e01cf09d0 100644
6021     --- a/net/bridge/br.c
6022     +++ b/net/bridge/br.c
6023     @@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
6024     .notifier_call = br_device_event
6025     };
6026    
6027     +/* called with RTNL */
6028     static int br_netdev_switch_event(struct notifier_block *unused,
6029     unsigned long event, void *ptr)
6030     {
6031     @@ -130,7 +131,6 @@ static int br_netdev_switch_event(struct notifier_block *unused,
6032     struct netdev_switch_notifier_fdb_info *fdb_info;
6033     int err = NOTIFY_DONE;
6034    
6035     - rtnl_lock();
6036     p = br_port_get_rtnl(dev);
6037     if (!p)
6038     goto out;
6039     @@ -155,7 +155,6 @@ static int br_netdev_switch_event(struct notifier_block *unused,
6040     }
6041    
6042     out:
6043     - rtnl_unlock();
6044     return err;
6045     }
6046    
6047     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
6048     index 2c35c02a931e..3556791fdc6e 100644
6049     --- a/net/core/flow_dissector.c
6050     +++ b/net/core/flow_dissector.c
6051     @@ -113,7 +113,6 @@ ip:
6052     case htons(ETH_P_IPV6): {
6053     const struct ipv6hdr *iph;
6054     struct ipv6hdr _iph;
6055     - __be32 flow_label;
6056    
6057     ipv6:
6058     iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
6059     @@ -130,8 +129,9 @@ ipv6:
6060     flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
6061     flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
6062    
6063     - flow_label = ip6_flowlabel(iph);
6064     - if (flow_label) {
6065     + if (skb && ip6_flowlabel(iph)) {
6066     + __be32 flow_label = ip6_flowlabel(iph);
6067     +
6068     /* Awesome, IPv6 packet has a flow label so we can
6069     * use that to represent the ports without any
6070     * further dissection.
6071     @@ -233,6 +233,13 @@ ipv6:
6072     return false;
6073     proto = eth->h_proto;
6074     nhoff += sizeof(*eth);
6075     +
6076     + /* Cap headers that we access via pointers at the
6077     + * end of the Ethernet header as our maximum alignment
6078     + * at that point is only 2 bytes.
6079     + */
6080     + if (NET_IP_ALIGN)
6081     + hlen = nhoff;
6082     }
6083     goto again;
6084     }
6085     diff --git a/net/core/scm.c b/net/core/scm.c
6086     index 8a1741b14302..dce0acb929f1 100644
6087     --- a/net/core/scm.c
6088     +++ b/net/core/scm.c
6089     @@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
6090     *fplp = fpl;
6091     fpl->count = 0;
6092     fpl->max = SCM_MAX_FD;
6093     + fpl->user = NULL;
6094     }
6095     fpp = &fpl->fp[fpl->count];
6096    
6097     @@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
6098     *fpp++ = file;
6099     fpl->count++;
6100     }
6101     +
6102     + if (!fpl->user)
6103     + fpl->user = get_uid(current_user());
6104     +
6105     return num;
6106     }
6107    
6108     @@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
6109     scm->fp = NULL;
6110     for (i=fpl->count-1; i>=0; i--)
6111     fput(fpl->fp[i]);
6112     + free_uid(fpl->user);
6113     kfree(fpl);
6114     }
6115     }
6116     @@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
6117     for (i = 0; i < fpl->count; i++)
6118     get_file(fpl->fp[i]);
6119     new_fpl->max = new_fpl->count;
6120     + new_fpl->user = get_uid(fpl->user);
6121     }
6122     return new_fpl;
6123     }
6124     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
6125     index 2e5fcda16570..c9793c6c5005 100644
6126     --- a/net/core/skbuff.c
6127     +++ b/net/core/skbuff.c
6128     @@ -79,6 +79,8 @@
6129    
6130     struct kmem_cache *skbuff_head_cache __read_mostly;
6131     static struct kmem_cache *skbuff_fclone_cache __read_mostly;
6132     +int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
6133     +EXPORT_SYMBOL(sysctl_max_skb_frags);
6134    
6135     /**
6136     * skb_panic - private function for out-of-line support
6137     diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
6138     index 95b6139d710c..a6beb7b6ae55 100644
6139     --- a/net/core/sysctl_net_core.c
6140     +++ b/net/core/sysctl_net_core.c
6141     @@ -26,6 +26,7 @@ static int zero = 0;
6142     static int one = 1;
6143     static int min_sndbuf = SOCK_MIN_SNDBUF;
6144     static int min_rcvbuf = SOCK_MIN_RCVBUF;
6145     +static int max_skb_frags = MAX_SKB_FRAGS;
6146    
6147     static int net_msg_warn; /* Unused, but still a sysctl */
6148    
6149     @@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
6150     .mode = 0644,
6151     .proc_handler = proc_dointvec
6152     },
6153     + {
6154     + .procname = "max_skb_frags",
6155     + .data = &sysctl_max_skb_frags,
6156     + .maxlen = sizeof(int),
6157     + .mode = 0644,
6158     + .proc_handler = proc_dointvec_minmax,
6159     + .extra1 = &one,
6160     + .extra2 = &max_skb_frags,
6161     + },
6162     { }
6163     };
6164    
6165     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
6166     index 419d23c53ec7..280d46f947ea 100644
6167     --- a/net/ipv4/devinet.c
6168     +++ b/net/ipv4/devinet.c
6169     @@ -1839,7 +1839,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
6170     if (err < 0)
6171     goto errout;
6172    
6173     - err = EINVAL;
6174     + err = -EINVAL;
6175     if (!tb[NETCONFA_IFINDEX])
6176     goto errout;
6177    
6178     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
6179     index 6ddde89996f4..b6c7bdea4853 100644
6180     --- a/net/ipv4/ip_sockglue.c
6181     +++ b/net/ipv4/ip_sockglue.c
6182     @@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
6183     switch (cmsg->cmsg_type) {
6184     case IP_RETOPTS:
6185     err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
6186     +
6187     + /* Our caller is responsible for freeing ipc->opt */
6188     err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
6189     err < 40 ? err : 40);
6190     if (err)
6191     diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
6192     index 05ff44b758df..f6ee0d561aab 100644
6193     --- a/net/ipv4/ping.c
6194     +++ b/net/ipv4/ping.c
6195     @@ -745,8 +745,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
6196    
6197     if (msg->msg_controllen) {
6198     err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
6199     - if (err)
6200     + if (unlikely(err)) {
6201     + kfree(ipc.opt);
6202     return err;
6203     + }
6204     if (ipc.opt)
6205     free = 1;
6206     }
6207     diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
6208     index 561cd4b8fc6e..c77aac75759d 100644
6209     --- a/net/ipv4/raw.c
6210     +++ b/net/ipv4/raw.c
6211     @@ -543,8 +543,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
6212    
6213     if (msg->msg_controllen) {
6214     err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
6215     - if (err)
6216     + if (unlikely(err)) {
6217     + kfree(ipc.opt);
6218     goto out;
6219     + }
6220     if (ipc.opt)
6221     free = 1;
6222     }
6223     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
6224     index f45f2a12f37b..1d3cdb4d4ebc 100644
6225     --- a/net/ipv4/route.c
6226     +++ b/net/ipv4/route.c
6227     @@ -125,6 +125,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
6228     static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
6229     static int ip_rt_min_advmss __read_mostly = 256;
6230    
6231     +static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
6232     /*
6233     * Interface to generic destination cache.
6234     */
6235     @@ -753,7 +754,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
6236     struct fib_nh *nh = &FIB_RES_NH(res);
6237    
6238     update_or_create_fnhe(nh, fl4->daddr, new_gw,
6239     - 0, 0);
6240     + 0, jiffies + ip_rt_gc_timeout);
6241     }
6242     if (kill_route)
6243     rt->dst.obsolete = DST_OBSOLETE_KILL;
6244     @@ -1538,6 +1539,36 @@ static void ip_handle_martian_source(struct net_device *dev,
6245     #endif
6246     }
6247    
6248     +static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
6249     +{
6250     + struct fnhe_hash_bucket *hash;
6251     + struct fib_nh_exception *fnhe, __rcu **fnhe_p;
6252     + u32 hval = fnhe_hashfun(daddr);
6253     +
6254     + spin_lock_bh(&fnhe_lock);
6255     +
6256     + hash = rcu_dereference_protected(nh->nh_exceptions,
6257     + lockdep_is_held(&fnhe_lock));
6258     + hash += hval;
6259     +
6260     + fnhe_p = &hash->chain;
6261     + fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
6262     + while (fnhe) {
6263     + if (fnhe->fnhe_daddr == daddr) {
6264     + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
6265     + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
6266     + fnhe_flush_routes(fnhe);
6267     + kfree_rcu(fnhe, rcu);
6268     + break;
6269     + }
6270     + fnhe_p = &fnhe->fnhe_next;
6271     + fnhe = rcu_dereference_protected(fnhe->fnhe_next,
6272     + lockdep_is_held(&fnhe_lock));
6273     + }
6274     +
6275     + spin_unlock_bh(&fnhe_lock);
6276     +}
6277     +
6278     /* called in rcu_read_lock() section */
6279     static int __mkroute_input(struct sk_buff *skb,
6280     const struct fib_result *res,
6281     @@ -1592,11 +1623,20 @@ static int __mkroute_input(struct sk_buff *skb,
6282    
6283     fnhe = find_exception(&FIB_RES_NH(*res), daddr);
6284     if (do_cache) {
6285     - if (fnhe)
6286     + if (fnhe) {
6287     rth = rcu_dereference(fnhe->fnhe_rth_input);
6288     - else
6289     - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
6290     + if (rth && rth->dst.expires &&
6291     + time_after(jiffies, rth->dst.expires)) {
6292     + ip_del_fnhe(&FIB_RES_NH(*res), daddr);
6293     + fnhe = NULL;
6294     + } else {
6295     + goto rt_cache;
6296     + }
6297     + }
6298     +
6299     + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
6300    
6301     +rt_cache:
6302     if (rt_cache_valid(rth)) {
6303     skb_dst_set_noref(skb, &rth->dst);
6304     goto out;
6305     @@ -1945,19 +1985,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
6306     struct fib_nh *nh = &FIB_RES_NH(*res);
6307    
6308     fnhe = find_exception(nh, fl4->daddr);
6309     - if (fnhe)
6310     + if (fnhe) {
6311     prth = &fnhe->fnhe_rth_output;
6312     - else {
6313     - if (unlikely(fl4->flowi4_flags &
6314     - FLOWI_FLAG_KNOWN_NH &&
6315     - !(nh->nh_gw &&
6316     - nh->nh_scope == RT_SCOPE_LINK))) {
6317     - do_cache = false;
6318     - goto add;
6319     + rth = rcu_dereference(*prth);
6320     + if (rth && rth->dst.expires &&
6321     + time_after(jiffies, rth->dst.expires)) {
6322     + ip_del_fnhe(nh, fl4->daddr);
6323     + fnhe = NULL;
6324     + } else {
6325     + goto rt_cache;
6326     }
6327     - prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
6328     }
6329     +
6330     + if (unlikely(fl4->flowi4_flags &
6331     + FLOWI_FLAG_KNOWN_NH &&
6332     + !(nh->nh_gw &&
6333     + nh->nh_scope == RT_SCOPE_LINK))) {
6334     + do_cache = false;
6335     + goto add;
6336     + }
6337     + prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
6338     rth = rcu_dereference(*prth);
6339     +
6340     +rt_cache:
6341     if (rt_cache_valid(rth)) {
6342     dst_hold(&rth->dst);
6343     return rth;
6344     @@ -2504,7 +2554,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
6345     }
6346    
6347     #ifdef CONFIG_SYSCTL
6348     -static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
6349     static int ip_rt_gc_interval __read_mostly = 60 * HZ;
6350     static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
6351     static int ip_rt_gc_elasticity __read_mostly = 8;
6352     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
6353     index bb2ce74f6004..19d385a0f02d 100644
6354     --- a/net/ipv4/tcp.c
6355     +++ b/net/ipv4/tcp.c
6356     @@ -279,6 +279,7 @@
6357    
6358     #include <asm/uaccess.h>
6359     #include <asm/ioctls.h>
6360     +#include <asm/unaligned.h>
6361     #include <net/busy_poll.h>
6362    
6363     int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
6364     @@ -921,7 +922,7 @@ new_segment:
6365    
6366     i = skb_shinfo(skb)->nr_frags;
6367     can_coalesce = skb_can_coalesce(skb, i, page, offset);
6368     - if (!can_coalesce && i >= MAX_SKB_FRAGS) {
6369     + if (!can_coalesce && i >= sysctl_max_skb_frags) {
6370     tcp_mark_push(tp, skb);
6371     goto new_segment;
6372     }
6373     @@ -1187,7 +1188,7 @@ new_segment:
6374    
6375     if (!skb_can_coalesce(skb, i, pfrag->page,
6376     pfrag->offset)) {
6377     - if (i == MAX_SKB_FRAGS || !sg) {
6378     + if (i == sysctl_max_skb_frags || !sg) {
6379     tcp_mark_push(tp, skb);
6380     goto new_segment;
6381     }
6382     @@ -2603,6 +2604,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
6383     const struct inet_connection_sock *icsk = inet_csk(sk);
6384     u32 now = tcp_time_stamp;
6385     unsigned int start;
6386     + u64 rate64;
6387     u32 rate;
6388    
6389     memset(info, 0, sizeof(*info));
6390     @@ -2665,15 +2667,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
6391     info->tcpi_total_retrans = tp->total_retrans;
6392    
6393     rate = READ_ONCE(sk->sk_pacing_rate);
6394     - info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
6395     + rate64 = rate != ~0U ? rate : ~0ULL;
6396     + put_unaligned(rate64, &info->tcpi_pacing_rate);
6397    
6398     rate = READ_ONCE(sk->sk_max_pacing_rate);
6399     - info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
6400     + rate64 = rate != ~0U ? rate : ~0ULL;
6401     + put_unaligned(rate64, &info->tcpi_max_pacing_rate);
6402    
6403     do {
6404     start = u64_stats_fetch_begin_irq(&tp->syncp);
6405     - info->tcpi_bytes_acked = tp->bytes_acked;
6406     - info->tcpi_bytes_received = tp->bytes_received;
6407     + put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
6408     + put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
6409     } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
6410     }
6411     EXPORT_SYMBOL_GPL(tcp_get_info);
6412     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
6413     index cd18c3d3251e..13b92d595138 100644
6414     --- a/net/ipv4/tcp_ipv4.c
6415     +++ b/net/ipv4/tcp_ipv4.c
6416     @@ -705,7 +705,8 @@ release_sk1:
6417     outside socket context is ugly, certainly. What can I do?
6418     */
6419    
6420     -static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
6421     +static void tcp_v4_send_ack(struct net *net,
6422     + struct sk_buff *skb, u32 seq, u32 ack,
6423     u32 win, u32 tsval, u32 tsecr, int oif,
6424     struct tcp_md5sig_key *key,
6425     int reply_flags, u8 tos)
6426     @@ -720,7 +721,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
6427     ];
6428     } rep;
6429     struct ip_reply_arg arg;
6430     - struct net *net = dev_net(skb_dst(skb)->dev);
6431    
6432     memset(&rep.th, 0, sizeof(struct tcphdr));
6433     memset(&arg, 0, sizeof(arg));
6434     @@ -782,7 +782,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
6435     struct inet_timewait_sock *tw = inet_twsk(sk);
6436     struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
6437    
6438     - tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
6439     + tcp_v4_send_ack(sock_net(sk), skb,
6440     + tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
6441     tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
6442     tcp_time_stamp + tcptw->tw_ts_offset,
6443     tcptw->tw_ts_recent,
6444     @@ -801,8 +802,10 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
6445     /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
6446     * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
6447     */
6448     - tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
6449     - tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
6450     + u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
6451     + tcp_sk(sk)->snd_nxt;
6452     +
6453     + tcp_v4_send_ack(sock_net(sk), skb, seq,
6454     tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
6455     tcp_time_stamp,
6456     req->ts_recent,
6457     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
6458     index 1b8c5ba7d5f7..a390174b96de 100644
6459     --- a/net/ipv4/udp.c
6460     +++ b/net/ipv4/udp.c
6461     @@ -963,8 +963,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
6462     if (msg->msg_controllen) {
6463     err = ip_cmsg_send(sock_net(sk), msg, &ipc,
6464     sk->sk_family == AF_INET6);
6465     - if (err)
6466     + if (unlikely(err)) {
6467     + kfree(ipc.opt);
6468     return err;
6469     + }
6470     if (ipc.opt)
6471     free = 1;
6472     connected = 0;
6473     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
6474     index f4795b0d6e6e..f555f4fc1d62 100644
6475     --- a/net/ipv6/addrconf.c
6476     +++ b/net/ipv6/addrconf.c
6477     @@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
6478     .max_addresses = IPV6_MAX_ADDRESSES,
6479     .accept_ra_defrtr = 1,
6480     .accept_ra_from_local = 0,
6481     + .accept_ra_min_hop_limit= 1,
6482     .accept_ra_pinfo = 1,
6483     #ifdef CONFIG_IPV6_ROUTER_PREF
6484     .accept_ra_rtr_pref = 1,
6485     @@ -236,6 +237,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
6486     .max_addresses = IPV6_MAX_ADDRESSES,
6487     .accept_ra_defrtr = 1,
6488     .accept_ra_from_local = 0,
6489     + .accept_ra_min_hop_limit= 1,
6490     .accept_ra_pinfo = 1,
6491     #ifdef CONFIG_IPV6_ROUTER_PREF
6492     .accept_ra_rtr_pref = 1,
6493     @@ -567,7 +569,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
6494     if (err < 0)
6495     goto errout;
6496    
6497     - err = EINVAL;
6498     + err = -EINVAL;
6499     if (!tb[NETCONFA_IFINDEX])
6500     goto errout;
6501    
6502     @@ -3421,6 +3423,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
6503     {
6504     struct inet6_dev *idev = ifp->idev;
6505     struct net_device *dev = idev->dev;
6506     + bool notify = false;
6507    
6508     addrconf_join_solict(dev, &ifp->addr);
6509    
6510     @@ -3466,7 +3469,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
6511     /* Because optimistic nodes can use this address,
6512     * notify listeners. If DAD fails, RTM_DELADDR is sent.
6513     */
6514     - ipv6_ifa_notify(RTM_NEWADDR, ifp);
6515     + notify = true;
6516     }
6517     }
6518    
6519     @@ -3474,6 +3477,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
6520     out:
6521     spin_unlock(&ifp->lock);
6522     read_unlock_bh(&idev->lock);
6523     + if (notify)
6524     + ipv6_ifa_notify(RTM_NEWADDR, ifp);
6525     }
6526    
6527     static void addrconf_dad_start(struct inet6_ifaddr *ifp)
6528     @@ -4565,6 +4570,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
6529     array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
6530     array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
6531     array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
6532     + array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
6533     array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
6534     #ifdef CONFIG_IPV6_ROUTER_PREF
6535     array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
6536     @@ -5458,6 +5464,13 @@ static struct addrconf_sysctl_table
6537     .proc_handler = proc_dointvec,
6538     },
6539     {
6540     + .procname = "accept_ra_min_hop_limit",
6541     + .data = &ipv6_devconf.accept_ra_min_hop_limit,
6542     + .maxlen = sizeof(int),
6543     + .mode = 0644,
6544     + .proc_handler = proc_dointvec,
6545     + },
6546     + {
6547     .procname = "accept_ra_pinfo",
6548     .data = &ipv6_devconf.accept_ra_pinfo,
6549     .maxlen = sizeof(int),
6550     diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
6551     index 13ca4cf5616f..8e6cb3f14326 100644
6552     --- a/net/ipv6/datagram.c
6553     +++ b/net/ipv6/datagram.c
6554     @@ -162,6 +162,9 @@ ipv4_connected:
6555     fl6.fl6_dport = inet->inet_dport;
6556     fl6.fl6_sport = inet->inet_sport;
6557    
6558     + if (!fl6.flowi6_oif)
6559     + fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
6560     +
6561     if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
6562     fl6.flowi6_oif = np->mcast_oif;
6563    
6564     diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
6565     index d491125011c4..db939e4ac68a 100644
6566     --- a/net/ipv6/ip6_flowlabel.c
6567     +++ b/net/ipv6/ip6_flowlabel.c
6568     @@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
6569     }
6570     spin_lock_bh(&ip6_sk_fl_lock);
6571     for (sflp = &np->ipv6_fl_list;
6572     - (sfl = rcu_dereference(*sflp)) != NULL;
6573     + (sfl = rcu_dereference_protected(*sflp,
6574     + lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
6575     sflp = &sfl->next) {
6576     if (sfl->fl->label == freq.flr_label) {
6577     if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
6578     np->flow_label &= ~IPV6_FLOWLABEL_MASK;
6579     - *sflp = rcu_dereference(sfl->next);
6580     + *sflp = sfl->next;
6581     spin_unlock_bh(&ip6_sk_fl_lock);
6582     fl_release(sfl->fl);
6583     kfree_rcu(sfl, rcu);
6584     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
6585     index f50228b0abe5..36b9ac48b8fb 100644
6586     --- a/net/ipv6/ip6_output.c
6587     +++ b/net/ipv6/ip6_output.c
6588     @@ -885,6 +885,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
6589     struct rt6_info *rt;
6590     #endif
6591     int err;
6592     + int flags = 0;
6593    
6594     /* The correct way to handle this would be to do
6595     * ip6_route_get_saddr, and then ip6_route_output; however,
6596     @@ -916,10 +917,13 @@ static int ip6_dst_lookup_tail(struct sock *sk,
6597     dst_release(*dst);
6598     *dst = NULL;
6599     }
6600     +
6601     + if (fl6->flowi6_oif)
6602     + flags |= RT6_LOOKUP_F_IFACE;
6603     }
6604    
6605     if (!*dst)
6606     - *dst = ip6_route_output(net, sk, fl6);
6607     + *dst = ip6_route_output_flags(net, sk, fl6, flags);
6608    
6609     err = (*dst)->error;
6610     if (err)
6611     diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
6612     index 96f153c0846b..abb0bdda759a 100644
6613     --- a/net/ipv6/ndisc.c
6614     +++ b/net/ipv6/ndisc.c
6615     @@ -1225,18 +1225,16 @@ static void ndisc_router_discovery(struct sk_buff *skb)
6616    
6617     if (rt)
6618     rt6_set_expires(rt, jiffies + (HZ * lifetime));
6619     - if (ra_msg->icmph.icmp6_hop_limit) {
6620     - /* Only set hop_limit on the interface if it is higher than
6621     - * the current hop_limit.
6622     - */
6623     - if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
6624     + if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
6625     + ra_msg->icmph.icmp6_hop_limit) {
6626     + if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
6627     in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
6628     + if (rt)
6629     + dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
6630     + ra_msg->icmph.icmp6_hop_limit);
6631     } else {
6632     - ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
6633     + ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
6634     }
6635     - if (rt)
6636     - dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
6637     - ra_msg->icmph.icmp6_hop_limit);
6638     }
6639    
6640     skip_defrtr:
6641     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
6642     index f371fefa7fdc..fe70bd6a7516 100644
6643     --- a/net/ipv6/route.c
6644     +++ b/net/ipv6/route.c
6645     @@ -1030,11 +1030,9 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
6646     return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
6647     }
6648    
6649     -struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
6650     - struct flowi6 *fl6)
6651     +struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
6652     + struct flowi6 *fl6, int flags)
6653     {
6654     - int flags = 0;
6655     -
6656     fl6->flowi6_iif = LOOPBACK_IFINDEX;
6657    
6658     if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
6659     @@ -1047,7 +1045,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
6660    
6661     return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
6662     }
6663     -EXPORT_SYMBOL(ip6_route_output);
6664     +EXPORT_SYMBOL_GPL(ip6_route_output_flags);
6665    
6666     struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
6667     {
6668     diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
6669     index 6daa52a18d40..123f6f9f854c 100644
6670     --- a/net/iucv/af_iucv.c
6671     +++ b/net/iucv/af_iucv.c
6672     @@ -709,6 +709,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
6673     if (!addr || addr->sa_family != AF_IUCV)
6674     return -EINVAL;
6675    
6676     + if (addr_len < sizeof(struct sockaddr_iucv))
6677     + return -EINVAL;
6678     +
6679     lock_sock(sk);
6680     if (sk->sk_state != IUCV_OPEN) {
6681     err = -EBADFD;
6682     diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
6683     index 9e13c2ff8789..fe92a08b3cd5 100644
6684     --- a/net/l2tp/l2tp_netlink.c
6685     +++ b/net/l2tp/l2tp_netlink.c
6686     @@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
6687     ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
6688     NLM_F_ACK, tunnel, cmd);
6689    
6690     - if (ret >= 0)
6691     - return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
6692     + if (ret >= 0) {
6693     + ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
6694     + /* We don't care if no one is listening */
6695     + if (ret == -ESRCH)
6696     + ret = 0;
6697     + return ret;
6698     + }
6699    
6700     nlmsg_free(msg);
6701    
6702     @@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
6703     ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
6704     NLM_F_ACK, session, cmd);
6705    
6706     - if (ret >= 0)
6707     - return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
6708     + if (ret >= 0) {
6709     + ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
6710     + /* We don't care if no one is listening */
6711     + if (ret == -ESRCH)
6712     + ret = 0;
6713     + return ret;
6714     + }
6715    
6716     nlmsg_free(msg);
6717    
6718     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
6719     index e13c3c3ea4ac..9d134ab3351f 100644
6720     --- a/net/sctp/protocol.c
6721     +++ b/net/sctp/protocol.c
6722     @@ -60,6 +60,8 @@
6723     #include <net/inet_common.h>
6724     #include <net/inet_ecn.h>
6725    
6726     +#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
6727     +
6728     /* Global data structures. */
6729     struct sctp_globals sctp_globals __read_mostly;
6730    
6731     @@ -1332,6 +1334,8 @@ static __init int sctp_init(void)
6732     unsigned long limit;
6733     int max_share;
6734     int order;
6735     + int num_entries;
6736     + int max_entry_order;
6737    
6738     sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
6739    
6740     @@ -1384,14 +1388,24 @@ static __init int sctp_init(void)
6741    
6742     /* Size and allocate the association hash table.
6743     * The methodology is similar to that of the tcp hash tables.
6744     + * Though not identical. Start by getting a goal size
6745     */
6746     if (totalram_pages >= (128 * 1024))
6747     goal = totalram_pages >> (22 - PAGE_SHIFT);
6748     else
6749     goal = totalram_pages >> (24 - PAGE_SHIFT);
6750    
6751     - for (order = 0; (1UL << order) < goal; order++)
6752     - ;
6753     + /* Then compute the page order for said goal */
6754     + order = get_order(goal);
6755     +
6756     + /* Now compute the required page order for the maximum sized table we
6757     + * want to create
6758     + */
6759     + max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
6760     + sizeof(struct sctp_bind_hashbucket));
6761     +
6762     + /* Limit the page order by that maximum hash table size */
6763     + order = min(order, max_entry_order);
6764    
6765     do {
6766     sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
6767     @@ -1425,27 +1439,42 @@ static __init int sctp_init(void)
6768     INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
6769     }
6770    
6771     - /* Allocate and initialize the SCTP port hash table. */
6772     + /* Allocate and initialize the SCTP port hash table.
6773     + * Note that order is initalized to start at the max sized
6774     + * table we want to support. If we can't get that many pages
6775     + * reduce the order and try again
6776     + */
6777     do {
6778     - sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
6779     - sizeof(struct sctp_bind_hashbucket);
6780     - if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
6781     - continue;
6782     sctp_port_hashtable = (struct sctp_bind_hashbucket *)
6783     __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
6784     } while (!sctp_port_hashtable && --order > 0);
6785     +
6786     if (!sctp_port_hashtable) {
6787     pr_err("Failed bind hash alloc\n");
6788     status = -ENOMEM;
6789     goto err_bhash_alloc;
6790     }
6791     +
6792     + /* Now compute the number of entries that will fit in the
6793     + * port hash space we allocated
6794     + */
6795     + num_entries = (1UL << order) * PAGE_SIZE /
6796     + sizeof(struct sctp_bind_hashbucket);
6797     +
6798     + /* And finish by rounding it down to the nearest power of two
6799     + * this wastes some memory of course, but its needed because
6800     + * the hash function operates based on the assumption that
6801     + * that the number of entries is a power of two
6802     + */
6803     + sctp_port_hashsize = rounddown_pow_of_two(num_entries);
6804     +
6805     for (i = 0; i < sctp_port_hashsize; i++) {
6806     spin_lock_init(&sctp_port_hashtable[i].lock);
6807     INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
6808     }
6809    
6810     - pr_info("Hash tables configured (established %d bind %d)\n",
6811     - sctp_assoc_hashsize, sctp_port_hashsize);
6812     + pr_info("Hash tables configured (established %d bind %d/%d)\n",
6813     + sctp_assoc_hashsize, sctp_port_hashsize, num_entries);
6814    
6815     sctp_sysctl_register();
6816    
6817     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
6818     index 76e6ec62cf92..3c5833058b03 100644
6819     --- a/net/sctp/socket.c
6820     +++ b/net/sctp/socket.c
6821     @@ -5555,6 +5555,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
6822     struct sctp_hmac_algo_param *hmacs;
6823     __u16 data_len = 0;
6824     u32 num_idents;
6825     + int i;
6826    
6827     if (!ep->auth_enable)
6828     return -EACCES;
6829     @@ -5572,8 +5573,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
6830     return -EFAULT;
6831     if (put_user(num_idents, &p->shmac_num_idents))
6832     return -EFAULT;
6833     - if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
6834     - return -EFAULT;
6835     + for (i = 0; i < num_idents; i++) {
6836     + __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
6837     +
6838     + if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
6839     + return -EFAULT;
6840     + }
6841     return 0;
6842     }
6843    
6844     @@ -6653,6 +6658,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
6845    
6846     if (cmsgs->srinfo->sinfo_flags &
6847     ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
6848     + SCTP_SACK_IMMEDIATELY |
6849     SCTP_ABORT | SCTP_EOF))
6850     return -EINVAL;
6851     break;
6852     @@ -6676,6 +6682,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
6853    
6854     if (cmsgs->sinfo->snd_flags &
6855     ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
6856     + SCTP_SACK_IMMEDIATELY |
6857     SCTP_ABORT | SCTP_EOF))
6858     return -EINVAL;
6859     break;
6860     diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
6861     index 055453d48668..a8dbe8001e46 100644
6862     --- a/net/switchdev/switchdev.c
6863     +++ b/net/switchdev/switchdev.c
6864     @@ -15,6 +15,7 @@
6865     #include <linux/mutex.h>
6866     #include <linux/notifier.h>
6867     #include <linux/netdevice.h>
6868     +#include <linux/rtnetlink.h>
6869     #include <net/ip_fib.h>
6870     #include <net/switchdev.h>
6871    
6872     @@ -64,7 +65,6 @@ int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
6873     }
6874     EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);
6875    
6876     -static DEFINE_MUTEX(netdev_switch_mutex);
6877     static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
6878    
6879     /**
6880     @@ -79,9 +79,9 @@ int register_netdev_switch_notifier(struct notifier_block *nb)
6881     {
6882     int err;
6883    
6884     - mutex_lock(&netdev_switch_mutex);
6885     + rtnl_lock();
6886     err = raw_notifier_chain_register(&netdev_switch_notif_chain, nb);
6887     - mutex_unlock(&netdev_switch_mutex);
6888     + rtnl_unlock();
6889     return err;
6890     }
6891     EXPORT_SYMBOL_GPL(register_netdev_switch_notifier);
6892     @@ -97,9 +97,9 @@ int unregister_netdev_switch_notifier(struct notifier_block *nb)
6893     {
6894     int err;
6895    
6896     - mutex_lock(&netdev_switch_mutex);
6897     + rtnl_lock();
6898     err = raw_notifier_chain_unregister(&netdev_switch_notif_chain, nb);
6899     - mutex_unlock(&netdev_switch_mutex);
6900     + rtnl_unlock();
6901     return err;
6902     }
6903     EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
6904     @@ -113,16 +113,17 @@ EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
6905     * Call all network notifier blocks. This should be called by driver
6906     * when it needs to propagate hardware event.
6907     * Return values are same as for atomic_notifier_call_chain().
6908     + * rtnl_lock must be held.
6909     */
6910     int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
6911     struct netdev_switch_notifier_info *info)
6912     {
6913     int err;
6914    
6915     + ASSERT_RTNL();
6916     +
6917     info->dev = dev;
6918     - mutex_lock(&netdev_switch_mutex);
6919     err = raw_notifier_call_chain(&netdev_switch_notif_chain, val, info);
6920     - mutex_unlock(&netdev_switch_mutex);
6921     return err;
6922     }
6923     EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers);
6924     diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
6925     index 1c147c869c2e..948f316019d7 100644
6926     --- a/net/tipc/subscr.c
6927     +++ b/net/tipc/subscr.c
6928     @@ -302,11 +302,10 @@ static void subscr_conn_msg_event(struct net *net, int conid,
6929     struct tipc_net *tn = net_generic(net, tipc_net_id);
6930    
6931     spin_lock_bh(&subscriber->lock);
6932     - subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub);
6933     - if (sub)
6934     - tipc_nametbl_subscribe(sub);
6935     - else
6936     + if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub))
6937     tipc_conn_terminate(tn->topsrv, subscriber->conid);
6938     + else
6939     + tipc_nametbl_subscribe(sub);
6940     spin_unlock_bh(&subscriber->lock);
6941     }
6942    
6943     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6944     index cb3a01a9ed38..535a642a1688 100644
6945     --- a/net/unix/af_unix.c
6946     +++ b/net/unix/af_unix.c
6947     @@ -1464,7 +1464,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
6948     UNIXCB(skb).fp = NULL;
6949    
6950     for (i = scm->fp->count-1; i >= 0; i--)
6951     - unix_notinflight(scm->fp->fp[i]);
6952     + unix_notinflight(scm->fp->user, scm->fp->fp[i]);
6953     }
6954    
6955     static void unix_destruct_scm(struct sk_buff *skb)
6956     @@ -1529,7 +1529,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
6957     return -ENOMEM;
6958    
6959     for (i = scm->fp->count - 1; i >= 0; i--)
6960     - unix_inflight(scm->fp->fp[i]);
6961     + unix_inflight(scm->fp->user, scm->fp->fp[i]);
6962     return max_level;
6963     }
6964    
6965     @@ -1714,7 +1714,12 @@ restart_locked:
6966     goto out_unlock;
6967     }
6968    
6969     - if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
6970     + /* other == sk && unix_peer(other) != sk if
6971     + * - unix_peer(sk) == NULL, destination address bound to sk
6972     + * - unix_peer(sk) == sk by time of get but disconnected before lock
6973     + */
6974     + if (other != sk &&
6975     + unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
6976     if (timeo) {
6977     timeo = unix_wait_for_peer(other, timeo);
6978    
6979     @@ -2131,6 +2136,7 @@ again:
6980    
6981     if (signal_pending(current)) {
6982     err = sock_intr_errno(timeo);
6983     + scm_destroy(&scm);
6984     goto out;
6985     }
6986    
6987     diff --git a/net/unix/diag.c b/net/unix/diag.c
6988     index c512f64d5287..4d9679701a6d 100644
6989     --- a/net/unix/diag.c
6990     +++ b/net/unix/diag.c
6991     @@ -220,7 +220,7 @@ done:
6992     return skb->len;
6993     }
6994    
6995     -static struct sock *unix_lookup_by_ino(int ino)
6996     +static struct sock *unix_lookup_by_ino(unsigned int ino)
6997     {
6998     int i;
6999     struct sock *sk;
7000     diff --git a/net/unix/garbage.c b/net/unix/garbage.c
7001     index 8fcdc2283af5..6a0d48525fcf 100644
7002     --- a/net/unix/garbage.c
7003     +++ b/net/unix/garbage.c
7004     @@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
7005     * descriptor if it is for an AF_UNIX socket.
7006     */
7007    
7008     -void unix_inflight(struct file *fp)
7009     +void unix_inflight(struct user_struct *user, struct file *fp)
7010     {
7011     struct sock *s = unix_get_socket(fp);
7012    
7013     @@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
7014     }
7015     unix_tot_inflight++;
7016     }
7017     - fp->f_cred->user->unix_inflight++;
7018     + user->unix_inflight++;
7019     spin_unlock(&unix_gc_lock);
7020     }
7021    
7022     -void unix_notinflight(struct file *fp)
7023     +void unix_notinflight(struct user_struct *user, struct file *fp)
7024     {
7025     struct sock *s = unix_get_socket(fp);
7026    
7027     @@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
7028     list_del_init(&u->link);
7029     unix_tot_inflight--;
7030     }
7031     - fp->f_cred->user->unix_inflight--;
7032     + user->unix_inflight--;
7033     spin_unlock(&unix_gc_lock);
7034     }
7035    
7036     diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
7037     index 582091498819..d6bc2b3af9ef 100644
7038     --- a/security/integrity/evm/evm_main.c
7039     +++ b/security/integrity/evm/evm_main.c
7040     @@ -23,6 +23,7 @@
7041     #include <linux/integrity.h>
7042     #include <linux/evm.h>
7043     #include <crypto/hash.h>
7044     +#include <crypto/algapi.h>
7045     #include "evm.h"
7046    
7047     int evm_initialized;
7048     @@ -148,7 +149,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
7049     xattr_value_len, calc.digest);
7050     if (rc)
7051     break;
7052     - rc = memcmp(xattr_data->digest, calc.digest,
7053     + rc = crypto_memneq(xattr_data->digest, calc.digest,
7054     sizeof(calc.digest));
7055     if (rc)
7056     rc = -EINVAL;
7057     diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
7058     index 75888dd38a7f..aa999e747c94 100644
7059     --- a/sound/core/pcm_native.c
7060     +++ b/sound/core/pcm_native.c
7061     @@ -74,6 +74,18 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
7062     static DEFINE_RWLOCK(snd_pcm_link_rwlock);
7063     static DECLARE_RWSEM(snd_pcm_link_rwsem);
7064    
7065     +/* Writer in rwsem may block readers even during its waiting in queue,
7066     + * and this may lead to a deadlock when the code path takes read sem
7067     + * twice (e.g. one in snd_pcm_action_nonatomic() and another in
7068     + * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
7069     + * spin until it gets the lock.
7070     + */
7071     +static inline void down_write_nonblock(struct rw_semaphore *lock)
7072     +{
7073     + while (!down_write_trylock(lock))
7074     + cond_resched();
7075     +}
7076     +
7077     /**
7078     * snd_pcm_stream_lock - Lock the PCM stream
7079     * @substream: PCM substream
7080     @@ -1816,7 +1828,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
7081     res = -ENOMEM;
7082     goto _nolock;
7083     }
7084     - down_write(&snd_pcm_link_rwsem);
7085     + down_write_nonblock(&snd_pcm_link_rwsem);
7086     write_lock_irq(&snd_pcm_link_rwlock);
7087     if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
7088     substream->runtime->status->state != substream1->runtime->status->state ||
7089     @@ -1863,7 +1875,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
7090     struct snd_pcm_substream *s;
7091     int res = 0;
7092    
7093     - down_write(&snd_pcm_link_rwsem);
7094     + down_write_nonblock(&snd_pcm_link_rwsem);
7095     write_lock_irq(&snd_pcm_link_rwlock);
7096     if (!snd_pcm_stream_linked(substream)) {
7097     res = -EALREADY;
7098     diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
7099     index 801076687bb1..c850345c43b5 100644
7100     --- a/sound/core/seq/seq_memory.c
7101     +++ b/sound/core/seq/seq_memory.c
7102     @@ -383,15 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
7103    
7104     if (snd_BUG_ON(!pool))
7105     return -EINVAL;
7106     - if (pool->ptr) /* should be atomic? */
7107     - return 0;
7108    
7109     - pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
7110     - if (!pool->ptr)
7111     + cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
7112     + if (!cellptr)
7113     return -ENOMEM;
7114    
7115     /* add new cells to the free cell list */
7116     spin_lock_irqsave(&pool->lock, flags);
7117     + if (pool->ptr) {
7118     + spin_unlock_irqrestore(&pool->lock, flags);
7119     + vfree(cellptr);
7120     + return 0;
7121     + }
7122     +
7123     + pool->ptr = cellptr;
7124     pool->free = NULL;
7125    
7126     for (cell = 0; cell < pool->size; cell++) {
7127     diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
7128     index 921fb2bd8fad..fe686ee41c6d 100644
7129     --- a/sound/core/seq/seq_ports.c
7130     +++ b/sound/core/seq/seq_ports.c
7131     @@ -535,19 +535,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
7132     bool is_src, bool ack)
7133     {
7134     struct snd_seq_port_subs_info *grp;
7135     + struct list_head *list;
7136     + bool empty;
7137    
7138     grp = is_src ? &port->c_src : &port->c_dest;
7139     + list = is_src ? &subs->src_list : &subs->dest_list;
7140     down_write(&grp->list_mutex);
7141     write_lock_irq(&grp->list_lock);
7142     - if (is_src)
7143     - list_del(&subs->src_list);
7144     - else
7145     - list_del(&subs->dest_list);
7146     + empty = list_empty(list);
7147     + if (!empty)
7148     + list_del_init(list);
7149     grp->exclusive = 0;
7150     write_unlock_irq(&grp->list_lock);
7151     up_write(&grp->list_mutex);
7152    
7153     - unsubscribe_port(client, port, grp, &subs->info, ack);
7154     + if (!empty)
7155     + unsubscribe_port(client, port, grp, &subs->info, ack);
7156     }
7157    
7158     /* connect two ports */
7159     diff --git a/sound/core/timer.c b/sound/core/timer.c
7160     index 00e8c5f4de17..bf48e71f73cd 100644
7161     --- a/sound/core/timer.c
7162     +++ b/sound/core/timer.c
7163     @@ -422,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
7164     spin_lock_irqsave(&timer->lock, flags);
7165     list_for_each_entry(ts, &ti->slave_active_head, active_list)
7166     if (ts->ccallback)
7167     - ts->ccallback(ti, event + 100, &tstamp, resolution);
7168     + ts->ccallback(ts, event + 100, &tstamp, resolution);
7169     spin_unlock_irqrestore(&timer->lock, flags);
7170     }
7171    
7172     @@ -518,9 +518,13 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
7173     spin_unlock_irqrestore(&slave_active_lock, flags);
7174     return -EBUSY;
7175     }
7176     + if (timeri->timer)
7177     + spin_lock(&timeri->timer->lock);
7178     timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
7179     list_del_init(&timeri->ack_list);
7180     list_del_init(&timeri->active_list);
7181     + if (timeri->timer)
7182     + spin_unlock(&timeri->timer->lock);
7183     spin_unlock_irqrestore(&slave_active_lock, flags);
7184     goto __end;
7185     }
7186     @@ -1920,6 +1924,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
7187     {
7188     struct snd_timer_user *tu;
7189     long result = 0, unit;
7190     + int qhead;
7191     int err = 0;
7192    
7193     tu = file->private_data;
7194     @@ -1931,7 +1936,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
7195    
7196     if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
7197     err = -EAGAIN;
7198     - break;
7199     + goto _error;
7200     }
7201    
7202     set_current_state(TASK_INTERRUPTIBLE);
7203     @@ -1946,42 +1951,37 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
7204    
7205     if (tu->disconnected) {
7206     err = -ENODEV;
7207     - break;
7208     + goto _error;
7209     }
7210     if (signal_pending(current)) {
7211     err = -ERESTARTSYS;
7212     - break;
7213     + goto _error;
7214     }
7215     }
7216    
7217     + qhead = tu->qhead++;
7218     + tu->qhead %= tu->queue_size;
7219     spin_unlock_irq(&tu->qlock);
7220     - if (err < 0)
7221     - goto _error;
7222    
7223     if (tu->tread) {
7224     - if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
7225     - sizeof(struct snd_timer_tread))) {
7226     + if (copy_to_user(buffer, &tu->tqueue[qhead],
7227     + sizeof(struct snd_timer_tread)))
7228     err = -EFAULT;
7229     - goto _error;
7230     - }
7231     } else {
7232     - if (copy_to_user(buffer, &tu->queue[tu->qhead++],
7233     - sizeof(struct snd_timer_read))) {
7234     + if (copy_to_user(buffer, &tu->queue[qhead],
7235     + sizeof(struct snd_timer_read)))
7236     err = -EFAULT;
7237     - goto _error;
7238     - }
7239     }
7240    
7241     - tu->qhead %= tu->queue_size;
7242     -
7243     - result += unit;
7244     - buffer += unit;
7245     -
7246     spin_lock_irq(&tu->qlock);
7247     tu->qused--;
7248     + if (err < 0)
7249     + goto _error;
7250     + result += unit;
7251     + buffer += unit;
7252     }
7253     - spin_unlock_irq(&tu->qlock);
7254     _error:
7255     + spin_unlock_irq(&tu->qlock);
7256     return result > 0 ? result : err;
7257     }
7258    
7259     diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
7260     index 96592d5ba7bf..c5d5217a4180 100644
7261     --- a/sound/drivers/dummy.c
7262     +++ b/sound/drivers/dummy.c
7263     @@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver.");
7264     module_param(fake_buffer, bool, 0444);
7265     MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
7266     #ifdef CONFIG_HIGH_RES_TIMERS
7267     -module_param(hrtimer, bool, 0444);
7268     +module_param(hrtimer, bool, 0644);
7269     MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
7270     #endif
7271    
7272     @@ -109,6 +109,9 @@ struct dummy_timer_ops {
7273     snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
7274     };
7275    
7276     +#define get_dummy_ops(substream) \
7277     + (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
7278     +
7279     struct dummy_model {
7280     const char *name;
7281     int (*playback_constraints)(struct snd_pcm_runtime *runtime);
7282     @@ -137,7 +140,6 @@ struct snd_dummy {
7283     int iobox;
7284     struct snd_kcontrol *cd_volume_ctl;
7285     struct snd_kcontrol *cd_switch_ctl;
7286     - const struct dummy_timer_ops *timer_ops;
7287     };
7288    
7289     /*
7290     @@ -231,6 +233,8 @@ struct dummy_model *dummy_models[] = {
7291     */
7292    
7293     struct dummy_systimer_pcm {
7294     + /* ops must be the first item */
7295     + const struct dummy_timer_ops *timer_ops;
7296     spinlock_t lock;
7297     struct timer_list timer;
7298     unsigned long base_time;
7299     @@ -366,6 +370,8 @@ static struct dummy_timer_ops dummy_systimer_ops = {
7300     */
7301    
7302     struct dummy_hrtimer_pcm {
7303     + /* ops must be the first item */
7304     + const struct dummy_timer_ops *timer_ops;
7305     ktime_t base_time;
7306     ktime_t period_time;
7307     atomic_t running;
7308     @@ -492,31 +498,25 @@ static struct dummy_timer_ops dummy_hrtimer_ops = {
7309    
7310     static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
7311     {
7312     - struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
7313     -
7314     switch (cmd) {
7315     case SNDRV_PCM_TRIGGER_START:
7316     case SNDRV_PCM_TRIGGER_RESUME:
7317     - return dummy->timer_ops->start(substream);
7318     + return get_dummy_ops(substream)->start(substream);
7319     case SNDRV_PCM_TRIGGER_STOP:
7320     case SNDRV_PCM_TRIGGER_SUSPEND:
7321     - return dummy->timer_ops->stop(substream);
7322     + return get_dummy_ops(substream)->stop(substream);
7323     }
7324     return -EINVAL;
7325     }
7326    
7327     static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
7328     {
7329     - struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
7330     -
7331     - return dummy->timer_ops->prepare(substream);
7332     + return get_dummy_ops(substream)->prepare(substream);
7333     }
7334    
7335     static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
7336     {
7337     - struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
7338     -
7339     - return dummy->timer_ops->pointer(substream);
7340     + return get_dummy_ops(substream)->pointer(substream);
7341     }
7342    
7343     static struct snd_pcm_hardware dummy_pcm_hardware = {
7344     @@ -562,17 +562,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
7345     struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
7346     struct dummy_model *model = dummy->model;
7347     struct snd_pcm_runtime *runtime = substream->runtime;
7348     + const struct dummy_timer_ops *ops;
7349     int err;
7350    
7351     - dummy->timer_ops = &dummy_systimer_ops;
7352     + ops = &dummy_systimer_ops;
7353     #ifdef CONFIG_HIGH_RES_TIMERS
7354     if (hrtimer)
7355     - dummy->timer_ops = &dummy_hrtimer_ops;
7356     + ops = &dummy_hrtimer_ops;
7357     #endif
7358    
7359     - err = dummy->timer_ops->create(substream);
7360     + err = ops->create(substream);
7361     if (err < 0)
7362     return err;
7363     + get_dummy_ops(substream) = ops;
7364    
7365     runtime->hw = dummy->pcm_hw;
7366     if (substream->pcm->device & 1) {
7367     @@ -594,7 +596,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
7368     err = model->capture_constraints(substream->runtime);
7369     }
7370     if (err < 0) {
7371     - dummy->timer_ops->free(substream);
7372     + get_dummy_ops(substream)->free(substream);
7373     return err;
7374     }
7375     return 0;
7376     @@ -602,8 +604,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
7377    
7378     static int dummy_pcm_close(struct snd_pcm_substream *substream)
7379     {
7380     - struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
7381     - dummy->timer_ops->free(substream);
7382     + get_dummy_ops(substream)->free(substream);
7383     return 0;
7384     }
7385    
7386     diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
7387     index 5bc7f2e2715c..194627c6c42b 100644
7388     --- a/sound/pci/hda/hda_generic.c
7389     +++ b/sound/pci/hda/hda_generic.c
7390     @@ -3998,9 +3998,9 @@ static void pin_power_callback(struct hda_codec *codec,
7391     struct hda_jack_callback *jack,
7392     bool on)
7393     {
7394     - if (jack && jack->tbl->nid)
7395     + if (jack && jack->nid)
7396     sync_power_state_change(codec,
7397     - set_pin_power_jack(codec, jack->tbl->nid, on));
7398     + set_pin_power_jack(codec, jack->nid, on));
7399     }
7400    
7401     /* callback only doing power up -- called at first */
7402     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7403     index 09920ba55ba1..69093ce34231 100644
7404     --- a/sound/pci/hda/hda_intel.c
7405     +++ b/sound/pci/hda/hda_intel.c
7406     @@ -1976,10 +1976,10 @@ static void azx_remove(struct pci_dev *pci)
7407     struct hda_intel *hda;
7408    
7409     if (card) {
7410     - /* flush the pending probing work */
7411     + /* cancel the pending probing work */
7412     chip = card->private_data;
7413     hda = container_of(chip, struct hda_intel, chip);
7414     - flush_work(&hda->probe_work);
7415     + cancel_work_sync(&hda->probe_work);
7416    
7417     snd_card_free(card);
7418     }
7419     diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
7420     index d7cfe7b8c32b..52cc36758dd4 100644
7421     --- a/sound/pci/hda/hda_jack.c
7422     +++ b/sound/pci/hda/hda_jack.c
7423     @@ -259,7 +259,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
7424     if (!callback)
7425     return ERR_PTR(-ENOMEM);
7426     callback->func = func;
7427     - callback->tbl = jack;
7428     + callback->nid = jack->nid;
7429     callback->next = jack->callback;
7430     jack->callback = callback;
7431     }
7432     diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
7433     index b279e327a23b..a13c11c3ddbb 100644
7434     --- a/sound/pci/hda/hda_jack.h
7435     +++ b/sound/pci/hda/hda_jack.h
7436     @@ -21,7 +21,7 @@ struct hda_jack_callback;
7437     typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
7438    
7439     struct hda_jack_callback {
7440     - struct hda_jack_tbl *tbl;
7441     + hda_nid_t nid;
7442     hda_jack_callback_fn func;
7443     unsigned int private_data; /* arbitrary data */
7444     struct hda_jack_callback *next;
7445     diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
7446     index 4a4e7b282e4f..0374bd5b61c8 100644
7447     --- a/sound/pci/hda/patch_ca0132.c
7448     +++ b/sound/pci/hda/patch_ca0132.c
7449     @@ -4401,13 +4401,16 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
7450     static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
7451     {
7452     struct ca0132_spec *spec = codec->spec;
7453     + struct hda_jack_tbl *tbl;
7454    
7455     /* Delay enabling the HP amp, to let the mic-detection
7456     * state machine run.
7457     */
7458     cancel_delayed_work_sync(&spec->unsol_hp_work);
7459     schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
7460     - cb->tbl->block_report = 1;
7461     + tbl = snd_hda_jack_tbl_get(codec, cb->nid);
7462     + if (tbl)
7463     + tbl->block_report = 1;
7464     }
7465    
7466     static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
7467     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
7468     index d02eccd51f6e..51d519554744 100644
7469     --- a/sound/pci/hda/patch_hdmi.c
7470     +++ b/sound/pci/hda/patch_hdmi.c
7471     @@ -433,7 +433,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
7472     eld = &per_pin->sink_eld;
7473    
7474     mutex_lock(&per_pin->lock);
7475     - if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
7476     + if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
7477     + eld->eld_size > ELD_MAX_SIZE) {
7478     mutex_unlock(&per_pin->lock);
7479     snd_BUG();
7480     return -EINVAL;
7481     @@ -1178,7 +1179,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
7482     static void jack_callback(struct hda_codec *codec,
7483     struct hda_jack_callback *jack)
7484     {
7485     - check_presence_and_report(codec, jack->tbl->nid);
7486     + check_presence_and_report(codec, jack->nid);
7487     }
7488    
7489     static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
7490     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7491     index 8189f02f8446..df34c78a6ced 100644
7492     --- a/sound/pci/hda/patch_realtek.c
7493     +++ b/sound/pci/hda/patch_realtek.c
7494     @@ -277,7 +277,7 @@ static void alc_update_knob_master(struct hda_codec *codec,
7495     uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
7496     if (!uctl)
7497     return;
7498     - val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
7499     + val = snd_hda_codec_read(codec, jack->nid, 0,
7500     AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
7501     val &= HDA_AMP_VOLMASK;
7502     uctl->value.integer.value[0] = val;
7503     @@ -1792,7 +1792,6 @@ enum {
7504     ALC882_FIXUP_NO_PRIMARY_HP,
7505     ALC887_FIXUP_ASUS_BASS,
7506     ALC887_FIXUP_BASS_CHMAP,
7507     - ALC882_FIXUP_DISABLE_AAMIX,
7508     };
7509    
7510     static void alc889_fixup_coef(struct hda_codec *codec,
7511     @@ -1954,8 +1953,6 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
7512    
7513     static void alc_fixup_bass_chmap(struct hda_codec *codec,
7514     const struct hda_fixup *fix, int action);
7515     -static void alc_fixup_disable_aamix(struct hda_codec *codec,
7516     - const struct hda_fixup *fix, int action);
7517    
7518     static const struct hda_fixup alc882_fixups[] = {
7519     [ALC882_FIXUP_ABIT_AW9D_MAX] = {
7520     @@ -2193,10 +2190,6 @@ static const struct hda_fixup alc882_fixups[] = {
7521     .type = HDA_FIXUP_FUNC,
7522     .v.func = alc_fixup_bass_chmap,
7523     },
7524     - [ALC882_FIXUP_DISABLE_AAMIX] = {
7525     - .type = HDA_FIXUP_FUNC,
7526     - .v.func = alc_fixup_disable_aamix,
7527     - },
7528     };
7529    
7530     static const struct snd_pci_quirk alc882_fixup_tbl[] = {
7531     @@ -2235,6 +2228,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
7532     SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
7533     SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
7534     SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
7535     + SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
7536    
7537     /* All Apple entries are in codec SSIDs */
7538     SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
7539     @@ -2264,7 +2258,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
7540     SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
7541     SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
7542     SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
7543     - SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
7544     SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
7545     SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
7546     SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
7547     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
7548     index 8e7d4c087a7a..840178a26a6b 100644
7549     --- a/sound/pci/hda/patch_sigmatel.c
7550     +++ b/sound/pci/hda/patch_sigmatel.c
7551     @@ -493,9 +493,9 @@ static void jack_update_power(struct hda_codec *codec,
7552     if (!spec->num_pwrs)
7553     return;
7554    
7555     - if (jack && jack->tbl->nid) {
7556     - stac_toggle_power_map(codec, jack->tbl->nid,
7557     - snd_hda_jack_detect(codec, jack->tbl->nid),
7558     + if (jack && jack->nid) {
7559     + stac_toggle_power_map(codec, jack->nid,
7560     + snd_hda_jack_detect(codec, jack->nid),
7561     true);
7562     return;
7563     }
7564     diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
7565     index 2ee44abd56a6..6cbd03a5e53d 100644
7566     --- a/sound/soc/codecs/rt5645.c
7567     +++ b/sound/soc/codecs/rt5645.c
7568     @@ -487,7 +487,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
7569    
7570     /* IN1/IN2 Control */
7571     SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
7572     - RT5645_BST_SFT1, 8, 0, bst_tlv),
7573     + RT5645_BST_SFT1, 12, 0, bst_tlv),
7574     SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
7575     RT5645_BST_SFT2, 8, 0, bst_tlv),
7576    
7577     diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
7578     index 35fe58f4fa86..52fe7eb2dea1 100644
7579     --- a/sound/soc/soc-pcm.c
7580     +++ b/sound/soc/soc-pcm.c
7581     @@ -1661,7 +1661,8 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
7582     (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
7583     (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
7584     (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
7585     - (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
7586     + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
7587     + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
7588     continue;
7589    
7590     dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
7591     diff --git a/sound/usb/midi.c b/sound/usb/midi.c
7592     index bec63e0d2605..f059326a4914 100644
7593     --- a/sound/usb/midi.c
7594     +++ b/sound/usb/midi.c
7595     @@ -2451,7 +2451,6 @@ int snd_usbmidi_create(struct snd_card *card,
7596     else
7597     err = snd_usbmidi_create_endpoints(umidi, endpoints);
7598     if (err < 0) {
7599     - snd_usbmidi_free(umidi);
7600     return err;
7601     }
7602