Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0391-4.9.292-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3693 - (hide annotations) (download)
Mon Oct 24 14:08:07 2022 UTC (18 months, 3 weeks ago) by niro
File size: 114926 byte(s)
-linux-4.9.292
1 niro 3693 diff --git a/Makefile b/Makefile
2     index fa41ff3c7cc38..6e941bc98dbd3 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 291
9     +SUBLEVEL = 292
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
14     index 4616452ce74de..e0f96be549f14 100644
15     --- a/arch/arm/boot/dts/bcm5301x.dtsi
16     +++ b/arch/arm/boot/dts/bcm5301x.dtsi
17     @@ -234,6 +234,8 @@
18    
19     gpio-controller;
20     #gpio-cells = <2>;
21     + interrupt-controller;
22     + #interrupt-cells = <2>;
23     };
24    
25     usb2: usb2@21000 {
26     diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
27     index 1e25cd80589e7..1cee2d5409566 100644
28     --- a/arch/arm/include/asm/tlb.h
29     +++ b/arch/arm/include/asm/tlb.h
30     @@ -278,6 +278,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
31     tlb_add_flush(tlb, addr);
32     }
33    
34     +static inline void
35     +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
36     + unsigned long size)
37     +{
38     + tlb_add_flush(tlb, address);
39     + tlb_add_flush(tlb, address + size - PMD_SIZE);
40     +}
41     +
42     #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
43     #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
44     #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
45     diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
46     index 65e1817d8afe6..692a287a8712d 100644
47     --- a/arch/arm/mach-socfpga/core.h
48     +++ b/arch/arm/mach-socfpga/core.h
49     @@ -48,7 +48,7 @@ extern void __iomem *sdr_ctl_base_addr;
50     u32 socfpga_sdram_self_refresh(u32 sdr_base);
51     extern unsigned int socfpga_sdram_self_refresh_sz;
52    
53     -extern char secondary_trampoline, secondary_trampoline_end;
54     +extern char secondary_trampoline[], secondary_trampoline_end[];
55    
56     extern unsigned long socfpga_cpu1start_addr;
57    
58     diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
59     index 07945748b5714..1dfc9e7389a74 100644
60     --- a/arch/arm/mach-socfpga/platsmp.c
61     +++ b/arch/arm/mach-socfpga/platsmp.c
62     @@ -31,14 +31,14 @@
63    
64     static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
65     {
66     - int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
67     + int trampoline_size = secondary_trampoline_end - secondary_trampoline;
68    
69     if (socfpga_cpu1start_addr) {
70     /* This will put CPU #1 into reset. */
71     writel(RSTMGR_MPUMODRST_CPU1,
72     rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
73    
74     - memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
75     + memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
76    
77     writel(virt_to_phys(secondary_startup),
78     sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
79     @@ -56,12 +56,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
80    
81     static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
82     {
83     - int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
84     + int trampoline_size = secondary_trampoline_end - secondary_trampoline;
85    
86     if (socfpga_cpu1start_addr) {
87     writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
88     SOCFPGA_A10_RSTMGR_MODMPURST);
89     - memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
90     + memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
91    
92     writel(virt_to_phys(secondary_startup),
93     sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
94     diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
95     index 77e541cf0e5d5..34f4a53595619 100644
96     --- a/arch/ia64/include/asm/tlb.h
97     +++ b/arch/ia64/include/asm/tlb.h
98     @@ -272,6 +272,16 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long addre
99     tlb->end_addr = address + PAGE_SIZE;
100     }
101    
102     +static inline void
103     +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
104     + unsigned long size)
105     +{
106     + if (tlb->start_addr > address)
107     + tlb->start_addr = address;
108     + if (tlb->end_addr < address + size)
109     + tlb->end_addr = address + size;
110     +}
111     +
112     #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
113    
114     #define tlb_start_vma(tlb, vma) do { } while (0)
115     diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh
116     index 6f68784fea25f..a8c49815f58c8 100644
117     --- a/arch/parisc/install.sh
118     +++ b/arch/parisc/install.sh
119     @@ -39,6 +39,7 @@ verify "$3"
120     if [ -n "${INSTALLKERNEL}" ]; then
121     if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
122     if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
123     + if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
124     fi
125    
126     # Default install
127     diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
128     index 15711de104035..d2681d5a3d5a0 100644
129     --- a/arch/s390/include/asm/tlb.h
130     +++ b/arch/s390/include/asm/tlb.h
131     @@ -116,6 +116,20 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
132     return tlb_remove_page(tlb, page);
133     }
134    
135     +static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
136     + unsigned long address, unsigned long size)
137     +{
138     + /*
139     + * the range might exceed the original range that was provided to
140     + * tlb_gather_mmu(), so we need to update it despite the fact it is
141     + * usually not updated.
142     + */
143     + if (tlb->start > address)
144     + tlb->start = address;
145     + if (tlb->end < address + size)
146     + tlb->end = address + size;
147     +}
148     +
149     /*
150     * pte_free_tlb frees a pte table and clears the CRSTE for the
151     * page table from the tlb.
152     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
153     index 9939879f5f253..2f3b7802d8b87 100644
154     --- a/arch/s390/kernel/setup.c
155     +++ b/arch/s390/kernel/setup.c
156     @@ -693,9 +693,6 @@ static void __init setup_memory(void)
157     storage_key_init_range(reg->base, reg->base + reg->size);
158     }
159     psw_set_key(PAGE_DEFAULT_KEY);
160     -
161     - /* Only cosmetics */
162     - memblock_enforce_memory_limit(memblock_end_of_DRAM());
163     }
164    
165     /*
166     diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
167     index 025cdb1032f6f..9f6ab2cd10fc8 100644
168     --- a/arch/sh/include/asm/tlb.h
169     +++ b/arch/sh/include/asm/tlb.h
170     @@ -115,6 +115,16 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
171     return __tlb_remove_page(tlb, page);
172     }
173    
174     +static inline void
175     +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
176     + unsigned long size)
177     +{
178     + if (tlb->start > address)
179     + tlb->start = address;
180     + if (tlb->end < address + size)
181     + tlb->end = address + size;
182     +}
183     +
184     static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
185     struct page *page)
186     {
187     diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
188     index 821ff0acfe17f..6fb47b17179ff 100644
189     --- a/arch/um/include/asm/tlb.h
190     +++ b/arch/um/include/asm/tlb.h
191     @@ -128,6 +128,18 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
192     return tlb_remove_page(tlb, page);
193     }
194    
195     +static inline void
196     +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
197     + unsigned long size)
198     +{
199     + tlb->need_flush = 1;
200     +
201     + if (tlb->start > address)
202     + tlb->start = address;
203     + if (tlb->end < address + size)
204     + tlb->end = address + size;
205     +}
206     +
207     /**
208     * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
209     *
210     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
211     index f78f7d06ad9fc..bf047f16fce9e 100644
212     --- a/drivers/android/binder.c
213     +++ b/drivers/android/binder.c
214     @@ -1506,7 +1506,7 @@ static void binder_transaction(struct binder_proc *proc,
215     t->from = thread;
216     else
217     t->from = NULL;
218     - t->sender_euid = proc->cred->euid;
219     + t->sender_euid = task_euid(proc->tsk);
220     t->to_proc = target_proc;
221     t->to_thread = target_thread;
222     t->code = tr->code;
223     diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
224     index 100b5a3621ef3..6d2e54209ae69 100644
225     --- a/drivers/ata/sata_fsl.c
226     +++ b/drivers/ata/sata_fsl.c
227     @@ -1406,6 +1406,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
228     return 0;
229     }
230    
231     +static void sata_fsl_host_stop(struct ata_host *host)
232     +{
233     + struct sata_fsl_host_priv *host_priv = host->private_data;
234     +
235     + iounmap(host_priv->hcr_base);
236     + kfree(host_priv);
237     +}
238     +
239     /*
240     * scsi mid-layer and libata interface structures
241     */
242     @@ -1438,6 +1446,8 @@ static struct ata_port_operations sata_fsl_ops = {
243     .port_start = sata_fsl_port_start,
244     .port_stop = sata_fsl_port_stop,
245    
246     + .host_stop = sata_fsl_host_stop,
247     +
248     .pmp_attach = sata_fsl_pmp_attach,
249     .pmp_detach = sata_fsl_pmp_detach,
250     };
251     @@ -1492,9 +1502,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
252     host_priv->ssr_base = ssr_base;
253     host_priv->csr_base = csr_base;
254    
255     - irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
256     - if (!irq) {
257     - dev_err(&ofdev->dev, "invalid irq from platform\n");
258     + irq = platform_get_irq(ofdev, 0);
259     + if (irq < 0) {
260     + retval = irq;
261     goto error_exit_with_cleanup;
262     }
263     host_priv->irq = irq;
264     @@ -1571,10 +1581,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
265    
266     ata_host_detach(host);
267    
268     - irq_dispose_mapping(host_priv->irq);
269     - iounmap(host_priv->hcr_base);
270     - kfree(host_priv);
271     -
272     return 0;
273     }
274    
275     diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
276     index 6ee3e928ebf18..df58e7d793f52 100644
277     --- a/drivers/block/xen-blkfront.c
278     +++ b/drivers/block/xen-blkfront.c
279     @@ -78,6 +78,7 @@ enum blkif_state {
280     BLKIF_STATE_DISCONNECTED,
281     BLKIF_STATE_CONNECTED,
282     BLKIF_STATE_SUSPENDED,
283     + BLKIF_STATE_ERROR,
284     };
285    
286     struct grant {
287     @@ -87,6 +88,7 @@ struct grant {
288     };
289    
290     enum blk_req_status {
291     + REQ_PROCESSING,
292     REQ_WAITING,
293     REQ_DONE,
294     REQ_ERROR,
295     @@ -529,10 +531,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
296    
297     id = get_id_from_freelist(rinfo);
298     rinfo->shadow[id].request = req;
299     - rinfo->shadow[id].status = REQ_WAITING;
300     + rinfo->shadow[id].status = REQ_PROCESSING;
301     rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
302    
303     - (*ring_req)->u.rw.id = id;
304     + rinfo->shadow[id].req.u.rw.id = id;
305    
306     return id;
307     }
308     @@ -540,11 +542,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
309     static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
310     {
311     struct blkfront_info *info = rinfo->dev_info;
312     - struct blkif_request *ring_req;
313     + struct blkif_request *ring_req, *final_ring_req;
314     unsigned long id;
315    
316     /* Fill out a communications ring structure. */
317     - id = blkif_ring_get_request(rinfo, req, &ring_req);
318     + id = blkif_ring_get_request(rinfo, req, &final_ring_req);
319     + ring_req = &rinfo->shadow[id].req;
320    
321     ring_req->operation = BLKIF_OP_DISCARD;
322     ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
323     @@ -555,8 +558,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
324     else
325     ring_req->u.discard.flag = 0;
326    
327     - /* Keep a private copy so we can reissue requests when recovering. */
328     - rinfo->shadow[id].req = *ring_req;
329     + /* Copy the request to the ring page. */
330     + *final_ring_req = *ring_req;
331     + rinfo->shadow[id].status = REQ_WAITING;
332    
333     return 0;
334     }
335     @@ -689,6 +693,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
336     {
337     struct blkfront_info *info = rinfo->dev_info;
338     struct blkif_request *ring_req, *extra_ring_req = NULL;
339     + struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
340     unsigned long id, extra_id = NO_ASSOCIATED_ID;
341     bool require_extra_req = false;
342     int i;
343     @@ -730,7 +735,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
344     }
345    
346     /* Fill out a communications ring structure. */
347     - id = blkif_ring_get_request(rinfo, req, &ring_req);
348     + id = blkif_ring_get_request(rinfo, req, &final_ring_req);
349     + ring_req = &rinfo->shadow[id].req;
350    
351     num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
352     num_grant = 0;
353     @@ -781,7 +787,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
354     ring_req->u.rw.nr_segments = num_grant;
355     if (unlikely(require_extra_req)) {
356     extra_id = blkif_ring_get_request(rinfo, req,
357     - &extra_ring_req);
358     + &final_extra_ring_req);
359     + extra_ring_req = &rinfo->shadow[extra_id].req;
360     +
361     /*
362     * Only the first request contains the scatter-gather
363     * list.
364     @@ -823,10 +831,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
365     if (setup.segments)
366     kunmap_atomic(setup.segments);
367    
368     - /* Keep a private copy so we can reissue requests when recovering. */
369     - rinfo->shadow[id].req = *ring_req;
370     - if (unlikely(require_extra_req))
371     - rinfo->shadow[extra_id].req = *extra_ring_req;
372     + /* Copy request(s) to the ring page. */
373     + *final_ring_req = *ring_req;
374     + rinfo->shadow[id].status = REQ_WAITING;
375     + if (unlikely(require_extra_req)) {
376     + *final_extra_ring_req = *extra_ring_req;
377     + rinfo->shadow[extra_id].status = REQ_WAITING;
378     + }
379    
380     if (max_grefs > 0)
381     gnttab_free_grant_references(setup.gref_head);
382     @@ -1396,8 +1407,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
383     static int blkif_get_final_status(enum blk_req_status s1,
384     enum blk_req_status s2)
385     {
386     - BUG_ON(s1 == REQ_WAITING);
387     - BUG_ON(s2 == REQ_WAITING);
388     + BUG_ON(s1 < REQ_DONE);
389     + BUG_ON(s2 < REQ_DONE);
390    
391     if (s1 == REQ_ERROR || s2 == REQ_ERROR)
392     return BLKIF_RSP_ERROR;
393     @@ -1430,7 +1441,7 @@ static bool blkif_completion(unsigned long *id,
394     s->status = blkif_rsp_to_req_status(bret->status);
395    
396     /* Wait the second response if not yet here. */
397     - if (s2->status == REQ_WAITING)
398     + if (s2->status < REQ_DONE)
399     return 0;
400    
401     bret->status = blkif_get_final_status(s->status,
402     @@ -1538,7 +1549,7 @@ static bool blkif_completion(unsigned long *id,
403     static irqreturn_t blkif_interrupt(int irq, void *dev_id)
404     {
405     struct request *req;
406     - struct blkif_response *bret;
407     + struct blkif_response bret;
408     RING_IDX i, rp;
409     unsigned long flags;
410     struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
411     @@ -1550,50 +1561,72 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
412    
413     spin_lock_irqsave(&rinfo->ring_lock, flags);
414     again:
415     - rp = rinfo->ring.sring->rsp_prod;
416     - rmb(); /* Ensure we see queued responses up to 'rp'. */
417     + rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
418     + virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
419     + if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
420     + pr_alert("%s: illegal number of responses %u\n",
421     + info->gd->disk_name, rp - rinfo->ring.rsp_cons);
422     + goto err;
423     + }
424    
425     for (i = rinfo->ring.rsp_cons; i != rp; i++) {
426     unsigned long id;
427     + unsigned int op;
428     +
429     + RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
430     + id = bret.id;
431    
432     - bret = RING_GET_RESPONSE(&rinfo->ring, i);
433     - id = bret->id;
434     /*
435     * The backend has messed up and given us an id that we would
436     * never have given to it (we stamp it up to BLK_RING_SIZE -
437     * look in get_id_from_freelist.
438     */
439     if (id >= BLK_RING_SIZE(info)) {
440     - WARN(1, "%s: response to %s has incorrect id (%ld)\n",
441     - info->gd->disk_name, op_name(bret->operation), id);
442     - /* We can't safely get the 'struct request' as
443     - * the id is busted. */
444     - continue;
445     + pr_alert("%s: response has incorrect id (%ld)\n",
446     + info->gd->disk_name, id);
447     + goto err;
448     }
449     + if (rinfo->shadow[id].status != REQ_WAITING) {
450     + pr_alert("%s: response references no pending request\n",
451     + info->gd->disk_name);
452     + goto err;
453     + }
454     +
455     + rinfo->shadow[id].status = REQ_PROCESSING;
456     req = rinfo->shadow[id].request;
457    
458     - if (bret->operation != BLKIF_OP_DISCARD) {
459     + op = rinfo->shadow[id].req.operation;
460     + if (op == BLKIF_OP_INDIRECT)
461     + op = rinfo->shadow[id].req.u.indirect.indirect_op;
462     + if (bret.operation != op) {
463     + pr_alert("%s: response has wrong operation (%u instead of %u)\n",
464     + info->gd->disk_name, bret.operation, op);
465     + goto err;
466     + }
467     +
468     + if (bret.operation != BLKIF_OP_DISCARD) {
469     /*
470     * We may need to wait for an extra response if the
471     * I/O request is split in 2
472     */
473     - if (!blkif_completion(&id, rinfo, bret))
474     + if (!blkif_completion(&id, rinfo, &bret))
475     continue;
476     }
477    
478     if (add_id_to_freelist(rinfo, id)) {
479     WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
480     - info->gd->disk_name, op_name(bret->operation), id);
481     + info->gd->disk_name, op_name(bret.operation), id);
482     continue;
483     }
484    
485     - error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
486     - switch (bret->operation) {
487     + error = (bret.status == BLKIF_RSP_OKAY) ? 0 : -EIO;
488     + switch (bret.operation) {
489     case BLKIF_OP_DISCARD:
490     - if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
491     + if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
492     struct request_queue *rq = info->rq;
493     - printk(KERN_WARNING "blkfront: %s: %s op failed\n",
494     - info->gd->disk_name, op_name(bret->operation));
495     +
496     + pr_warn_ratelimited("blkfront: %s: %s op failed\n",
497     + info->gd->disk_name, op_name(bret.operation));
498     error = -EOPNOTSUPP;
499     info->feature_discard = 0;
500     info->feature_secdiscard = 0;
501     @@ -1604,15 +1637,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
502     break;
503     case BLKIF_OP_FLUSH_DISKCACHE:
504     case BLKIF_OP_WRITE_BARRIER:
505     - if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
506     - printk(KERN_WARNING "blkfront: %s: %s op failed\n",
507     - info->gd->disk_name, op_name(bret->operation));
508     + if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
509     + pr_warn_ratelimited("blkfront: %s: %s op failed\n",
510     + info->gd->disk_name, op_name(bret.operation));
511     error = -EOPNOTSUPP;
512     }
513     - if (unlikely(bret->status == BLKIF_RSP_ERROR &&
514     + if (unlikely(bret.status == BLKIF_RSP_ERROR &&
515     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
516     - printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
517     - info->gd->disk_name, op_name(bret->operation));
518     + pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
519     + info->gd->disk_name, op_name(bret.operation));
520     error = -EOPNOTSUPP;
521     }
522     if (unlikely(error)) {
523     @@ -1625,9 +1658,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
524     /* fall through */
525     case BLKIF_OP_READ:
526     case BLKIF_OP_WRITE:
527     - if (unlikely(bret->status != BLKIF_RSP_OKAY))
528     - dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
529     - "request: %x\n", bret->status);
530     + if (unlikely(bret.status != BLKIF_RSP_OKAY))
531     + dev_dbg_ratelimited(&info->xbdev->dev,
532     + "Bad return from blkdev data request: %#x\n",
533     + bret.status);
534    
535     blk_mq_complete_request(req, error);
536     break;
537     @@ -1651,6 +1685,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
538     spin_unlock_irqrestore(&rinfo->ring_lock, flags);
539    
540     return IRQ_HANDLED;
541     +
542     + err:
543     + info->connected = BLKIF_STATE_ERROR;
544     +
545     + spin_unlock_irqrestore(&rinfo->ring_lock, flags);
546     +
547     + pr_alert("%s disabled for further use\n", info->gd->disk_name);
548     + return IRQ_HANDLED;
549     }
550    
551    
552     diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
553     index d53e805d392f9..64fc99cf54d5b 100644
554     --- a/drivers/gpu/drm/vc4/vc4_bo.c
555     +++ b/drivers/gpu/drm/vc4/vc4_bo.c
556     @@ -198,7 +198,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
557    
558     bo = kzalloc(sizeof(*bo), GFP_KERNEL);
559     if (!bo)
560     - return ERR_PTR(-ENOMEM);
561     + return NULL;
562    
563     mutex_lock(&vc4->bo_lock);
564     vc4->bo_stats.num_allocated++;
565     diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
566     index 005c79b5b3f01..a5a291b848b06 100644
567     --- a/drivers/net/ethernet/dec/tulip/de4x5.c
568     +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
569     @@ -4704,6 +4704,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
570     lp->ibn = 3;
571     lp->active = *p++;
572     if (MOTO_SROM_BUG) lp->active = 0;
573     + /* if (MOTO_SROM_BUG) statement indicates lp->active could
574     + * be 8 (i.e. the size of array lp->phy) */
575     + if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
576     + return -EINVAL;
577     lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
578     lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
579     lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
580     @@ -4995,19 +4999,23 @@ mii_get_phy(struct net_device *dev)
581     }
582     if ((j == limit) && (i < DE4X5_MAX_MII)) {
583     for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
584     - lp->phy[k].addr = i;
585     - lp->phy[k].id = id;
586     - lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
587     - lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
588     - lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
589     - lp->mii_cnt++;
590     - lp->active++;
591     - printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
592     - j = de4x5_debug;
593     - de4x5_debug |= DEBUG_MII;
594     - de4x5_dbg_mii(dev, k);
595     - de4x5_debug = j;
596     - printk("\n");
597     + if (k < DE4X5_MAX_PHY) {
598     + lp->phy[k].addr = i;
599     + lp->phy[k].id = id;
600     + lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
601     + lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
602     + lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
603     + lp->mii_cnt++;
604     + lp->active++;
605     + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
606     + j = de4x5_debug;
607     + de4x5_debug |= DEBUG_MII;
608     + de4x5_dbg_mii(dev, k);
609     + de4x5_debug = j;
610     + printk("\n");
611     + } else {
612     + goto purgatory;
613     + }
614     }
615     }
616     purgatory:
617     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
618     index 67accce1d33d0..e89a62c6f2301 100644
619     --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
620     +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
621     @@ -312,6 +312,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
622     return;
623    
624     if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
625     + /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
626     + We need check to prevent array overflow */
627     + if (port >= DSAF_MAX_PORT_NUM)
628     + return;
629     reg_val_1 = 0x1 << port;
630     port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
631     /* there is difference between V1 and V2 in register.*/
632     diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
633     index 7007d212f3e4e..9b041848b3895 100644
634     --- a/drivers/net/ethernet/natsemi/xtsonic.c
635     +++ b/drivers/net/ethernet/natsemi/xtsonic.c
636     @@ -128,7 +128,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
637     .ndo_set_mac_address = eth_mac_addr,
638     };
639    
640     -static int __init sonic_probe1(struct net_device *dev)
641     +static int sonic_probe1(struct net_device *dev)
642     {
643     static unsigned version_printed = 0;
644     unsigned int silicon_revision;
645     diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
646     index dce36e9e1879c..59b77bb891475 100644
647     --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
648     +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
649     @@ -1078,8 +1078,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
650     sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
651     context_id = recv_ctx->context_id;
652     num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
653     - ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
654     - QLCNIC_CMD_ADD_RCV_RINGS);
655     + err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
656     + QLCNIC_CMD_ADD_RCV_RINGS);
657     + if (err) {
658     + dev_err(&adapter->pdev->dev,
659     + "Failed to alloc mbx args %d\n", err);
660     + return err;
661     + }
662     +
663     cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
664    
665     /* set up status rings, mbx 2-81 */
666     diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
667     index 3b6e908d31646..9ac1dbf0a993c 100644
668     --- a/drivers/net/vrf.c
669     +++ b/drivers/net/vrf.c
670     @@ -226,6 +226,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
671     /* strip the ethernet header added for pass through VRF device */
672     __skb_pull(skb, skb_network_offset(skb));
673    
674     + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
675     ret = vrf_ip6_local_out(net, skb->sk, skb);
676     if (unlikely(net_xmit_eval(ret)))
677     dev->stats.tx_errors++;
678     @@ -332,6 +333,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
679     RT_SCOPE_LINK);
680     }
681    
682     + memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
683     ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
684     if (unlikely(net_xmit_eval(ret)))
685     vrf_dev->stats.tx_errors++;
686     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
687     index 0971c09363cbf..0d2df76902384 100644
688     --- a/drivers/net/xen-netfront.c
689     +++ b/drivers/net/xen-netfront.c
690     @@ -120,21 +120,17 @@ struct netfront_queue {
691    
692     /*
693     * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
694     - * are linked from tx_skb_freelist through skb_entry.link.
695     - *
696     - * NB. Freelist index entries are always going to be less than
697     - * PAGE_OFFSET, whereas pointers to skbs will always be equal or
698     - * greater than PAGE_OFFSET: we use this property to distinguish
699     - * them.
700     + * are linked from tx_skb_freelist through tx_link.
701     */
702     - union skb_entry {
703     - struct sk_buff *skb;
704     - unsigned long link;
705     - } tx_skbs[NET_TX_RING_SIZE];
706     + struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
707     + unsigned short tx_link[NET_TX_RING_SIZE];
708     +#define TX_LINK_NONE 0xffff
709     +#define TX_PENDING 0xfffe
710     grant_ref_t gref_tx_head;
711     grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
712     struct page *grant_tx_page[NET_TX_RING_SIZE];
713     unsigned tx_skb_freelist;
714     + unsigned int tx_pend_queue;
715    
716     spinlock_t rx_lock ____cacheline_aligned_in_smp;
717     struct xen_netif_rx_front_ring rx;
718     @@ -160,6 +156,9 @@ struct netfront_info {
719     struct netfront_stats __percpu *rx_stats;
720     struct netfront_stats __percpu *tx_stats;
721    
722     + /* Is device behaving sane? */
723     + bool broken;
724     +
725     atomic_t rx_gso_checksum_fixup;
726     };
727    
728     @@ -168,33 +167,25 @@ struct netfront_rx_info {
729     struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
730     };
731    
732     -static void skb_entry_set_link(union skb_entry *list, unsigned short id)
733     -{
734     - list->link = id;
735     -}
736     -
737     -static int skb_entry_is_link(const union skb_entry *list)
738     -{
739     - BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
740     - return (unsigned long)list->skb < PAGE_OFFSET;
741     -}
742     -
743     /*
744     * Access macros for acquiring freeing slots in tx_skbs[].
745     */
746    
747     -static void add_id_to_freelist(unsigned *head, union skb_entry *list,
748     - unsigned short id)
749     +static void add_id_to_list(unsigned *head, unsigned short *list,
750     + unsigned short id)
751     {
752     - skb_entry_set_link(&list[id], *head);
753     + list[id] = *head;
754     *head = id;
755     }
756    
757     -static unsigned short get_id_from_freelist(unsigned *head,
758     - union skb_entry *list)
759     +static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
760     {
761     unsigned int id = *head;
762     - *head = list[id].link;
763     +
764     + if (id != TX_LINK_NONE) {
765     + *head = list[id];
766     + list[id] = TX_LINK_NONE;
767     + }
768     return id;
769     }
770    
771     @@ -352,7 +343,7 @@ static int xennet_open(struct net_device *dev)
772     unsigned int i = 0;
773     struct netfront_queue *queue = NULL;
774    
775     - if (!np->queues)
776     + if (!np->queues || np->broken)
777     return -ENODEV;
778    
779     for (i = 0; i < num_queues; ++i) {
780     @@ -380,27 +371,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
781     unsigned short id;
782     struct sk_buff *skb;
783     bool more_to_do;
784     + const struct device *dev = &queue->info->netdev->dev;
785    
786     BUG_ON(!netif_carrier_ok(queue->info->netdev));
787    
788     do {
789     prod = queue->tx.sring->rsp_prod;
790     + if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
791     + dev_alert(dev, "Illegal number of responses %u\n",
792     + prod - queue->tx.rsp_cons);
793     + goto err;
794     + }
795     rmb(); /* Ensure we see responses up to 'rp'. */
796    
797     for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
798     - struct xen_netif_tx_response *txrsp;
799     + struct xen_netif_tx_response txrsp;
800    
801     - txrsp = RING_GET_RESPONSE(&queue->tx, cons);
802     - if (txrsp->status == XEN_NETIF_RSP_NULL)
803     + RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
804     + if (txrsp.status == XEN_NETIF_RSP_NULL)
805     continue;
806    
807     - id = txrsp->id;
808     - skb = queue->tx_skbs[id].skb;
809     + id = txrsp.id;
810     + if (id >= RING_SIZE(&queue->tx)) {
811     + dev_alert(dev,
812     + "Response has incorrect id (%u)\n",
813     + id);
814     + goto err;
815     + }
816     + if (queue->tx_link[id] != TX_PENDING) {
817     + dev_alert(dev,
818     + "Response for inactive request\n");
819     + goto err;
820     + }
821     +
822     + queue->tx_link[id] = TX_LINK_NONE;
823     + skb = queue->tx_skbs[id];
824     + queue->tx_skbs[id] = NULL;
825     if (unlikely(gnttab_query_foreign_access(
826     queue->grant_tx_ref[id]) != 0)) {
827     - pr_alert("%s: warning -- grant still in use by backend domain\n",
828     - __func__);
829     - BUG();
830     + dev_alert(dev,
831     + "Grant still in use by backend domain\n");
832     + goto err;
833     }
834     gnttab_end_foreign_access_ref(
835     queue->grant_tx_ref[id], GNTMAP_readonly);
836     @@ -408,7 +419,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
837     &queue->gref_tx_head, queue->grant_tx_ref[id]);
838     queue->grant_tx_ref[id] = GRANT_INVALID_REF;
839     queue->grant_tx_page[id] = NULL;
840     - add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
841     + add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
842     dev_kfree_skb_irq(skb);
843     }
844    
845     @@ -418,13 +429,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
846     } while (more_to_do);
847    
848     xennet_maybe_wake_tx(queue);
849     +
850     + return;
851     +
852     + err:
853     + queue->info->broken = true;
854     + dev_alert(dev, "Disabled for further use\n");
855     }
856    
857     struct xennet_gnttab_make_txreq {
858     struct netfront_queue *queue;
859     struct sk_buff *skb;
860     struct page *page;
861     - struct xen_netif_tx_request *tx; /* Last request */
862     + struct xen_netif_tx_request *tx; /* Last request on ring page */
863     + struct xen_netif_tx_request tx_local; /* Last request local copy*/
864     unsigned int size;
865     };
866    
867     @@ -440,7 +458,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
868     struct netfront_queue *queue = info->queue;
869     struct sk_buff *skb = info->skb;
870    
871     - id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
872     + id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
873     tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
874     ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
875     WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
876     @@ -448,34 +466,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
877     gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
878     gfn, GNTMAP_readonly);
879    
880     - queue->tx_skbs[id].skb = skb;
881     + queue->tx_skbs[id] = skb;
882     queue->grant_tx_page[id] = page;
883     queue->grant_tx_ref[id] = ref;
884    
885     - tx->id = id;
886     - tx->gref = ref;
887     - tx->offset = offset;
888     - tx->size = len;
889     - tx->flags = 0;
890     + info->tx_local.id = id;
891     + info->tx_local.gref = ref;
892     + info->tx_local.offset = offset;
893     + info->tx_local.size = len;
894     + info->tx_local.flags = 0;
895     +
896     + *tx = info->tx_local;
897     +
898     + /*
899     + * Put the request in the pending queue, it will be set to be pending
900     + * when the producer index is about to be raised.
901     + */
902     + add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
903    
904     info->tx = tx;
905     - info->size += tx->size;
906     + info->size += info->tx_local.size;
907     }
908    
909     static struct xen_netif_tx_request *xennet_make_first_txreq(
910     - struct netfront_queue *queue, struct sk_buff *skb,
911     - struct page *page, unsigned int offset, unsigned int len)
912     + struct xennet_gnttab_make_txreq *info,
913     + unsigned int offset, unsigned int len)
914     {
915     - struct xennet_gnttab_make_txreq info = {
916     - .queue = queue,
917     - .skb = skb,
918     - .page = page,
919     - .size = 0,
920     - };
921     + info->size = 0;
922    
923     - gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
924     + gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
925    
926     - return info.tx;
927     + return info->tx;
928     }
929    
930     static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
931     @@ -488,35 +509,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
932     xennet_tx_setup_grant(gfn, offset, len, data);
933     }
934    
935     -static struct xen_netif_tx_request *xennet_make_txreqs(
936     - struct netfront_queue *queue, struct xen_netif_tx_request *tx,
937     - struct sk_buff *skb, struct page *page,
938     +static void xennet_make_txreqs(
939     + struct xennet_gnttab_make_txreq *info,
940     + struct page *page,
941     unsigned int offset, unsigned int len)
942     {
943     - struct xennet_gnttab_make_txreq info = {
944     - .queue = queue,
945     - .skb = skb,
946     - .tx = tx,
947     - };
948     -
949     /* Skip unused frames from start of page */
950     page += offset >> PAGE_SHIFT;
951     offset &= ~PAGE_MASK;
952    
953     while (len) {
954     - info.page = page;
955     - info.size = 0;
956     + info->page = page;
957     + info->size = 0;
958    
959     gnttab_foreach_grant_in_range(page, offset, len,
960     xennet_make_one_txreq,
961     - &info);
962     + info);
963    
964     page++;
965     offset = 0;
966     - len -= info.size;
967     + len -= info->size;
968     }
969     -
970     - return info.tx;
971     }
972    
973     /*
974     @@ -563,13 +576,22 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
975     return queue_idx;
976     }
977    
978     +static void xennet_mark_tx_pending(struct netfront_queue *queue)
979     +{
980     + unsigned int i;
981     +
982     + while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
983     + TX_LINK_NONE)
984     + queue->tx_link[i] = TX_PENDING;
985     +}
986     +
987     #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
988    
989     static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
990     {
991     struct netfront_info *np = netdev_priv(dev);
992     struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
993     - struct xen_netif_tx_request *tx, *first_tx;
994     + struct xen_netif_tx_request *first_tx;
995     unsigned int i;
996     int notify;
997     int slots;
998     @@ -578,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
999     unsigned int len;
1000     unsigned long flags;
1001     struct netfront_queue *queue = NULL;
1002     + struct xennet_gnttab_make_txreq info = { };
1003     unsigned int num_queues = dev->real_num_tx_queues;
1004     u16 queue_index;
1005     struct sk_buff *nskb;
1006     @@ -585,6 +608,8 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1007     /* Drop the packet if no queues are set up */
1008     if (num_queues < 1)
1009     goto drop;
1010     + if (unlikely(np->broken))
1011     + goto drop;
1012     /* Determine which queue to transmit this SKB on */
1013     queue_index = skb_get_queue_mapping(skb);
1014     queue = &np->queues[queue_index];
1015     @@ -635,21 +660,24 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1016     }
1017    
1018     /* First request for the linear area. */
1019     - first_tx = tx = xennet_make_first_txreq(queue, skb,
1020     - page, offset, len);
1021     - offset += tx->size;
1022     + info.queue = queue;
1023     + info.skb = skb;
1024     + info.page = page;
1025     + first_tx = xennet_make_first_txreq(&info, offset, len);
1026     + offset += info.tx_local.size;
1027     if (offset == PAGE_SIZE) {
1028     page++;
1029     offset = 0;
1030     }
1031     - len -= tx->size;
1032     + len -= info.tx_local.size;
1033    
1034     if (skb->ip_summed == CHECKSUM_PARTIAL)
1035     /* local packet? */
1036     - tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
1037     + first_tx->flags |= XEN_NETTXF_csum_blank |
1038     + XEN_NETTXF_data_validated;
1039     else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1040     /* remote but checksummed. */
1041     - tx->flags |= XEN_NETTXF_data_validated;
1042     + first_tx->flags |= XEN_NETTXF_data_validated;
1043    
1044     /* Optional extra info after the first request. */
1045     if (skb_shinfo(skb)->gso_size) {
1046     @@ -658,7 +686,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1047     gso = (struct xen_netif_extra_info *)
1048     RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
1049    
1050     - tx->flags |= XEN_NETTXF_extra_info;
1051     + first_tx->flags |= XEN_NETTXF_extra_info;
1052    
1053     gso->u.gso.size = skb_shinfo(skb)->gso_size;
1054     gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
1055     @@ -672,19 +700,21 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1056     }
1057    
1058     /* Requests for the rest of the linear area. */
1059     - tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
1060     + xennet_make_txreqs(&info, page, offset, len);
1061    
1062     /* Requests for all the frags. */
1063     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1064     skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1065     - tx = xennet_make_txreqs(queue, tx, skb,
1066     - skb_frag_page(frag), frag->page_offset,
1067     + xennet_make_txreqs(&info, skb_frag_page(frag),
1068     + frag->page_offset,
1069     skb_frag_size(frag));
1070     }
1071    
1072     /* First request has the packet length. */
1073     first_tx->size = skb->len;
1074    
1075     + xennet_mark_tx_pending(queue);
1076     +
1077     RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
1078     if (notify)
1079     notify_remote_via_irq(queue->tx_irq);
1080     @@ -742,7 +772,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
1081     RING_IDX rp)
1082    
1083     {
1084     - struct xen_netif_extra_info *extra;
1085     + struct xen_netif_extra_info extra;
1086     struct device *dev = &queue->info->netdev->dev;
1087     RING_IDX cons = queue->rx.rsp_cons;
1088     int err = 0;
1089     @@ -758,24 +788,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
1090     break;
1091     }
1092    
1093     - extra = (struct xen_netif_extra_info *)
1094     - RING_GET_RESPONSE(&queue->rx, ++cons);
1095     + RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
1096    
1097     - if (unlikely(!extra->type ||
1098     - extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1099     + if (unlikely(!extra.type ||
1100     + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1101     if (net_ratelimit())
1102     dev_warn(dev, "Invalid extra type: %d\n",
1103     - extra->type);
1104     + extra.type);
1105     err = -EINVAL;
1106     } else {
1107     - memcpy(&extras[extra->type - 1], extra,
1108     - sizeof(*extra));
1109     + extras[extra.type - 1] = extra;
1110     }
1111    
1112     skb = xennet_get_rx_skb(queue, cons);
1113     ref = xennet_get_rx_ref(queue, cons);
1114     xennet_move_rx_slot(queue, skb, ref);
1115     - } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1116     + } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1117    
1118     queue->rx.rsp_cons = cons;
1119     return err;
1120     @@ -785,7 +813,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
1121     struct netfront_rx_info *rinfo, RING_IDX rp,
1122     struct sk_buff_head *list)
1123     {
1124     - struct xen_netif_rx_response *rx = &rinfo->rx;
1125     + struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1126     struct xen_netif_extra_info *extras = rinfo->extras;
1127     struct device *dev = &queue->info->netdev->dev;
1128     RING_IDX cons = queue->rx.rsp_cons;
1129     @@ -843,7 +871,8 @@ next:
1130     break;
1131     }
1132    
1133     - rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
1134     + RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1135     + rx = &rx_local;
1136     skb = xennet_get_rx_skb(queue, cons + slots);
1137     ref = xennet_get_rx_ref(queue, cons + slots);
1138     slots++;
1139     @@ -898,10 +927,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1140     struct sk_buff *nskb;
1141    
1142     while ((nskb = __skb_dequeue(list))) {
1143     - struct xen_netif_rx_response *rx =
1144     - RING_GET_RESPONSE(&queue->rx, ++cons);
1145     + struct xen_netif_rx_response rx;
1146     skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1147    
1148     + RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1149     +
1150     if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1151     unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1152    
1153     @@ -916,7 +946,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1154    
1155     skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1156     skb_frag_page(nfrag),
1157     - rx->offset, rx->status, PAGE_SIZE);
1158     + rx.offset, rx.status, PAGE_SIZE);
1159    
1160     skb_shinfo(nskb)->nr_frags = 0;
1161     kfree_skb(nskb);
1162     @@ -1009,12 +1039,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
1163     skb_queue_head_init(&tmpq);
1164    
1165     rp = queue->rx.sring->rsp_prod;
1166     + if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1167     + dev_alert(&dev->dev, "Illegal number of responses %u\n",
1168     + rp - queue->rx.rsp_cons);
1169     + queue->info->broken = true;
1170     + spin_unlock(&queue->rx_lock);
1171     + return 0;
1172     + }
1173     rmb(); /* Ensure we see queued responses up to 'rp'. */
1174    
1175     i = queue->rx.rsp_cons;
1176     work_done = 0;
1177     while ((i != rp) && (work_done < budget)) {
1178     - memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1179     + RING_COPY_RESPONSE(&queue->rx, i, rx);
1180     memset(extras, 0, sizeof(rinfo.extras));
1181    
1182     err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1183     @@ -1138,17 +1175,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
1184    
1185     for (i = 0; i < NET_TX_RING_SIZE; i++) {
1186     /* Skip over entries which are actually freelist references */
1187     - if (skb_entry_is_link(&queue->tx_skbs[i]))
1188     + if (!queue->tx_skbs[i])
1189     continue;
1190    
1191     - skb = queue->tx_skbs[i].skb;
1192     + skb = queue->tx_skbs[i];
1193     + queue->tx_skbs[i] = NULL;
1194     get_page(queue->grant_tx_page[i]);
1195     gnttab_end_foreign_access(queue->grant_tx_ref[i],
1196     GNTMAP_readonly,
1197     (unsigned long)page_address(queue->grant_tx_page[i]));
1198     queue->grant_tx_page[i] = NULL;
1199     queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1200     - add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1201     + add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1202     dev_kfree_skb_irq(skb);
1203     }
1204     }
1205     @@ -1248,6 +1286,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1206     struct netfront_queue *queue = dev_id;
1207     unsigned long flags;
1208    
1209     + if (queue->info->broken)
1210     + return IRQ_HANDLED;
1211     +
1212     spin_lock_irqsave(&queue->tx_lock, flags);
1213     xennet_tx_buf_gc(queue);
1214     spin_unlock_irqrestore(&queue->tx_lock, flags);
1215     @@ -1260,6 +1301,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1216     struct netfront_queue *queue = dev_id;
1217     struct net_device *dev = queue->info->netdev;
1218    
1219     + if (queue->info->broken)
1220     + return IRQ_HANDLED;
1221     +
1222     if (likely(netif_carrier_ok(dev) &&
1223     RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1224     napi_schedule(&queue->napi);
1225     @@ -1281,6 +1325,10 @@ static void xennet_poll_controller(struct net_device *dev)
1226     struct netfront_info *info = netdev_priv(dev);
1227     unsigned int num_queues = dev->real_num_tx_queues;
1228     unsigned int i;
1229     +
1230     + if (info->broken)
1231     + return;
1232     +
1233     for (i = 0; i < num_queues; ++i)
1234     xennet_interrupt(0, &info->queues[i]);
1235     }
1236     @@ -1649,13 +1697,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
1237     snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1238     devid, queue->id);
1239    
1240     - /* Initialise tx_skbs as a free chain containing every entry. */
1241     + /* Initialise tx_skb_freelist as a free chain containing every entry. */
1242     queue->tx_skb_freelist = 0;
1243     + queue->tx_pend_queue = TX_LINK_NONE;
1244     for (i = 0; i < NET_TX_RING_SIZE; i++) {
1245     - skb_entry_set_link(&queue->tx_skbs[i], i+1);
1246     + queue->tx_link[i] = i + 1;
1247     queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1248     queue->grant_tx_page[i] = NULL;
1249     }
1250     + queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
1251    
1252     /* Clear out rx_skbs */
1253     for (i = 0; i < NET_RX_RING_SIZE; i++) {
1254     @@ -1865,6 +1915,9 @@ static int talk_to_netback(struct xenbus_device *dev,
1255     if (info->queues)
1256     xennet_destroy_queues(info);
1257    
1258     + /* For the case of a reconnect reset the "broken" indicator. */
1259     + info->broken = false;
1260     +
1261     err = xennet_create_queues(info, &num_queues);
1262     if (err < 0) {
1263     xenbus_dev_fatal(dev, err, "creating queues");
1264     diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
1265     index 9c929b5ce58e2..b19a51d12651d 100644
1266     --- a/drivers/platform/x86/thinkpad_acpi.c
1267     +++ b/drivers/platform/x86/thinkpad_acpi.c
1268     @@ -1169,15 +1169,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
1269     return status;
1270     }
1271    
1272     -/* Query FW and update rfkill sw state for all rfkill switches */
1273     -static void tpacpi_rfk_update_swstate_all(void)
1274     -{
1275     - unsigned int i;
1276     -
1277     - for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
1278     - tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
1279     -}
1280     -
1281     /*
1282     * Sync the HW-blocking state of all rfkill switches,
1283     * do notice it causes the rfkill core to schedule uevents
1284     @@ -3029,9 +3020,6 @@ static void tpacpi_send_radiosw_update(void)
1285     if (wlsw == TPACPI_RFK_RADIO_OFF)
1286     tpacpi_rfk_update_hwblock_state(true);
1287    
1288     - /* Sync sw blocking state */
1289     - tpacpi_rfk_update_swstate_all();
1290     -
1291     /* Sync hw blocking state last if it is hw-unblocked */
1292     if (wlsw == TPACPI_RFK_RADIO_ON)
1293     tpacpi_rfk_update_hwblock_state(false);
1294     diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1295     index 58876b8a2e9f8..8063b97bf2e9b 100644
1296     --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1297     +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1298     @@ -2927,7 +2927,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
1299    
1300     shost_for_each_device(sdev, ioc->shost) {
1301     sas_device_priv_data = sdev->hostdata;
1302     - if (!sas_device_priv_data)
1303     + if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
1304     continue;
1305     if (sas_device_priv_data->sas_target->sas_address
1306     != sas_address)
1307     diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
1308     index aed17f958448d..acd8eb8c94cf7 100644
1309     --- a/drivers/scsi/scsi_transport_iscsi.c
1310     +++ b/drivers/scsi/scsi_transport_iscsi.c
1311     @@ -1898,12 +1898,12 @@ static void session_recovery_timedout(struct work_struct *work)
1312     }
1313     spin_unlock_irqrestore(&session->lock, flags);
1314    
1315     - if (session->transport->session_recovery_timedout)
1316     - session->transport->session_recovery_timedout(session);
1317     -
1318     ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
1319     scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
1320     ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
1321     +
1322     + if (session->transport->session_recovery_timedout)
1323     + session->transport->session_recovery_timedout(session);
1324     }
1325    
1326     static void __iscsi_unblock_session(struct work_struct *work)
1327     diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
1328     index 806e9b30b9dc8..aac9b38b8c25c 100644
1329     --- a/drivers/staging/android/ion/ion.c
1330     +++ b/drivers/staging/android/ion/ion.c
1331     @@ -489,6 +489,9 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
1332     void *vaddr;
1333    
1334     if (buffer->kmap_cnt) {
1335     + if (buffer->kmap_cnt == INT_MAX)
1336     + return ERR_PTR(-EOVERFLOW);
1337     +
1338     buffer->kmap_cnt++;
1339     return buffer->vaddr;
1340     }
1341     @@ -509,6 +512,9 @@ static void *ion_handle_kmap_get(struct ion_handle *handle)
1342     void *vaddr;
1343    
1344     if (handle->kmap_cnt) {
1345     + if (handle->kmap_cnt == INT_MAX)
1346     + return ERR_PTR(-EOVERFLOW);
1347     +
1348     handle->kmap_cnt++;
1349     return buffer->vaddr;
1350     }
1351     diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
1352     index 0d6fe039ac19f..234a46ff4484a 100644
1353     --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
1354     +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
1355     @@ -2710,13 +2710,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
1356     free_irq(dev->irq, dev);
1357     priv->irq = 0;
1358     }
1359     - free_rtllib(dev);
1360    
1361     if (dev->mem_start != 0) {
1362     iounmap((void __iomem *)dev->mem_start);
1363     release_mem_region(pci_resource_start(pdev, 1),
1364     pci_resource_len(pdev, 1));
1365     }
1366     +
1367     + free_rtllib(dev);
1368     } else {
1369     priv = rtllib_priv(dev);
1370     }
1371     diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
1372     index 4c2dc3a59eb59..5ef30ba3b73a4 100644
1373     --- a/drivers/thermal/thermal_core.c
1374     +++ b/drivers/thermal/thermal_core.c
1375     @@ -601,6 +601,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
1376     {
1377     struct thermal_instance *pos;
1378     tz->temperature = THERMAL_TEMP_INVALID;
1379     + tz->prev_low_trip = -INT_MAX;
1380     + tz->prev_high_trip = INT_MAX;
1381     list_for_each_entry(pos, &tz->thermal_instances, tz_node)
1382     pos->initialized = false;
1383     }
1384     diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
1385     index 5e87e4866bcb7..858c7b4b197cb 100644
1386     --- a/drivers/tty/hvc/hvc_xen.c
1387     +++ b/drivers/tty/hvc/hvc_xen.c
1388     @@ -99,7 +99,11 @@ static int __write_console(struct xencons_info *xencons,
1389     cons = intf->out_cons;
1390     prod = intf->out_prod;
1391     mb(); /* update queue values before going on */
1392     - BUG_ON((prod - cons) > sizeof(intf->out));
1393     +
1394     + if ((prod - cons) > sizeof(intf->out)) {
1395     + pr_err_once("xencons: Illegal ring page indices");
1396     + return -EINVAL;
1397     + }
1398    
1399     while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
1400     intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
1401     @@ -127,7 +131,10 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
1402     */
1403     while (len) {
1404     int sent = __write_console(cons, data, len);
1405     -
1406     +
1407     + if (sent < 0)
1408     + return sent;
1409     +
1410     data += sent;
1411     len -= sent;
1412    
1413     @@ -151,7 +158,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
1414     cons = intf->in_cons;
1415     prod = intf->in_prod;
1416     mb(); /* get pointers before reading ring */
1417     - BUG_ON((prod - cons) > sizeof(intf->in));
1418     +
1419     + if ((prod - cons) > sizeof(intf->in)) {
1420     + pr_err_once("xencons: Illegal ring page indices");
1421     + return -EINVAL;
1422     + }
1423    
1424     while (cons != prod && recv < len)
1425     buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
1426     diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
1427     index 6e27dee3876a4..e91bdd7d4c054 100644
1428     --- a/drivers/tty/serial/amba-pl011.c
1429     +++ b/drivers/tty/serial/amba-pl011.c
1430     @@ -2702,6 +2702,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
1431    
1432     static const struct acpi_device_id sbsa_uart_acpi_match[] = {
1433     { "ARMH0011", 0 },
1434     + { "ARMHB000", 0 },
1435     {},
1436     };
1437     MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
1438     diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
1439     index 9e6d44df3fab7..c284e61ed4fcc 100644
1440     --- a/drivers/tty/serial/msm_serial.c
1441     +++ b/drivers/tty/serial/msm_serial.c
1442     @@ -611,6 +611,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
1443     u32 val;
1444     int ret;
1445    
1446     + if (IS_ENABLED(CONFIG_CONSOLE_POLL))
1447     + return;
1448     +
1449     if (!dma->chan)
1450     return;
1451    
1452     diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
1453     index bbba2174d6e75..e97961dc3622d 100644
1454     --- a/drivers/tty/serial/serial_core.c
1455     +++ b/drivers/tty/serial/serial_core.c
1456     @@ -1522,6 +1522,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
1457     {
1458     struct uart_state *state = container_of(port, struct uart_state, port);
1459     struct uart_port *uport = uart_port_check(state);
1460     + char *buf;
1461    
1462     /*
1463     * At this point, we stop accepting input. To do this, we
1464     @@ -1543,8 +1544,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
1465     */
1466     tty_port_set_suspended(port, 0);
1467    
1468     - uart_change_pm(state, UART_PM_STATE_OFF);
1469     + /*
1470     + * Free the transmit buffer.
1471     + */
1472     + spin_lock_irq(&uport->lock);
1473     + buf = state->xmit.buf;
1474     + state->xmit.buf = NULL;
1475     + spin_unlock_irq(&uport->lock);
1476    
1477     + if (buf)
1478     + free_page((unsigned long)buf);
1479     +
1480     + uart_change_pm(state, UART_PM_STATE_OFF);
1481     }
1482    
1483     static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
1484     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1485     index 92057a3f346f4..0abcf8bbb73fe 100644
1486     --- a/drivers/usb/core/hub.c
1487     +++ b/drivers/usb/core/hub.c
1488     @@ -4460,8 +4460,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
1489     if (oldspeed == USB_SPEED_LOW)
1490     delay = HUB_LONG_RESET_TIME;
1491    
1492     - mutex_lock(hcd->address0_mutex);
1493     -
1494     /* Reset the device; full speed may morph to high speed */
1495     /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
1496     retval = hub_port_reset(hub, port1, udev, delay, false);
1497     @@ -4748,7 +4746,6 @@ fail:
1498     hub_port_disable(hub, port1, 0);
1499     update_devnum(udev, devnum); /* for disconnect processing */
1500     }
1501     - mutex_unlock(hcd->address0_mutex);
1502     return retval;
1503     }
1504    
1505     @@ -4838,6 +4835,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
1506     struct usb_port *port_dev = hub->ports[port1 - 1];
1507     struct usb_device *udev = port_dev->child;
1508     static int unreliable_port = -1;
1509     + bool retry_locked;
1510    
1511     /* Disconnect any existing devices under this port */
1512     if (udev) {
1513     @@ -4893,7 +4891,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
1514     unit_load = 100;
1515    
1516     status = 0;
1517     +
1518     for (i = 0; i < SET_CONFIG_TRIES; i++) {
1519     + usb_lock_port(port_dev);
1520     + mutex_lock(hcd->address0_mutex);
1521     + retry_locked = true;
1522    
1523     /* reallocate for each attempt, since references
1524     * to the previous one can escape in various ways
1525     @@ -4902,6 +4904,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
1526     if (!udev) {
1527     dev_err(&port_dev->dev,
1528     "couldn't allocate usb_device\n");
1529     + mutex_unlock(hcd->address0_mutex);
1530     + usb_unlock_port(port_dev);
1531     goto done;
1532     }
1533    
1534     @@ -4923,12 +4927,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
1535     }
1536    
1537     /* reset (non-USB 3.0 devices) and get descriptor */
1538     - usb_lock_port(port_dev);
1539     status = hub_port_init(hub, udev, port1, i);
1540     - usb_unlock_port(port_dev);
1541     if (status < 0)
1542     goto loop;
1543    
1544     + mutex_unlock(hcd->address0_mutex);
1545     + usb_unlock_port(port_dev);
1546     + retry_locked = false;
1547     +
1548     if (udev->quirks & USB_QUIRK_DELAY_INIT)
1549     msleep(2000);
1550    
1551     @@ -5021,6 +5027,10 @@ loop:
1552     usb_ep0_reinit(udev);
1553     release_devnum(udev);
1554     hub_free_dev(udev);
1555     + if (retry_locked) {
1556     + mutex_unlock(hcd->address0_mutex);
1557     + usb_unlock_port(port_dev);
1558     + }
1559     usb_put_dev(udev);
1560     if ((status == -ENOTCONN) || (status == -ENOTSUPP))
1561     break;
1562     @@ -5572,6 +5582,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
1563     bos = udev->bos;
1564     udev->bos = NULL;
1565    
1566     + mutex_lock(hcd->address0_mutex);
1567     +
1568     for (i = 0; i < SET_CONFIG_TRIES; ++i) {
1569    
1570     /* ep0 maxpacket size may change; let the HCD know about it.
1571     @@ -5581,6 +5593,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
1572     if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
1573     break;
1574     }
1575     + mutex_unlock(hcd->address0_mutex);
1576    
1577     if (ret < 0)
1578     goto re_enumerate;
1579     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1580     index 9a0f8ee8cbd9f..502931f658a8e 100644
1581     --- a/drivers/usb/serial/option.c
1582     +++ b/drivers/usb/serial/option.c
1583     @@ -1243,6 +1243,8 @@ static const struct usb_device_id option_ids[] = {
1584     .driver_info = NCTRL(2) },
1585     { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
1586     .driver_info = NCTRL(0) | ZLP },
1587     + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
1588     + .driver_info = NCTRL(0) | ZLP },
1589     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1590     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
1591     .driver_info = RSVD(1) },
1592     @@ -2072,6 +2074,9 @@ static const struct usb_device_id option_ids[] = {
1593     { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
1594     { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
1595     { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
1596     + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
1597     + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
1598     + .driver_info = RSVD(4) },
1599     { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
1600     { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
1601     { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
1602     diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
1603     index 2ac966400c428..e282e8174a5d9 100644
1604     --- a/drivers/vhost/vsock.c
1605     +++ b/drivers/vhost/vsock.c
1606     @@ -406,7 +406,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
1607     else
1608     virtio_transport_free_pkt(pkt);
1609    
1610     - vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
1611     + vhost_add_used(vq, head, 0);
1612     added = true;
1613     }
1614    
1615     diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
1616     index cb9b879631293..6c5c7d45d9f67 100644
1617     --- a/drivers/video/console/vgacon.c
1618     +++ b/drivers/video/console/vgacon.c
1619     @@ -420,11 +420,17 @@ static void vgacon_init(struct vc_data *c, int init)
1620     struct uni_pagedir *p;
1621    
1622     /*
1623     - * We cannot be loaded as a module, therefore init is always 1,
1624     - * but vgacon_init can be called more than once, and init will
1625     - * not be 1.
1626     + * We cannot be loaded as a module, therefore init will be 1
1627     + * if we are the default console, however if we are a fallback
1628     + * console, for example if fbcon has failed registration, then
1629     + * init will be 0, so we need to make sure our boot parameters
1630     + * have been copied to the console structure for vgacon_resize
1631     + * ultimately called by vc_resize. Any subsequent calls to
1632     + * vgacon_init init will have init set to 0 too.
1633     */
1634     c->vc_can_do_color = vga_can_do_color;
1635     + c->vc_scan_lines = vga_scan_lines;
1636     + c->vc_font.height = c->vc_cell_height = vga_video_font_height;
1637    
1638     /* set dimensions manually if init != 0 since vc_resize() will fail */
1639     if (init) {
1640     @@ -433,8 +439,6 @@ static void vgacon_init(struct vc_data *c, int init)
1641     } else
1642     vc_resize(c, vga_video_num_columns, vga_video_num_lines);
1643    
1644     - c->vc_scan_lines = vga_scan_lines;
1645     - c->vc_font.height = c->vc_cell_height = vga_video_font_height;
1646     c->vc_complement_mask = 0x7700;
1647     if (vga_512_chars)
1648     c->vc_hi_font_mask = 0x0800;
1649     diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
1650     index ba7590d75985e..d7f886dd7b550 100644
1651     --- a/drivers/xen/xenbus/xenbus_probe.c
1652     +++ b/drivers/xen/xenbus/xenbus_probe.c
1653     @@ -764,7 +764,7 @@ static struct notifier_block xenbus_resume_nb = {
1654    
1655     static int __init xenbus_init(void)
1656     {
1657     - int err = 0;
1658     + int err;
1659     uint64_t v = 0;
1660     xen_store_domain_type = XS_UNKNOWN;
1661    
1662     @@ -804,6 +804,29 @@ static int __init xenbus_init(void)
1663     err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
1664     if (err)
1665     goto out_error;
1666     + /*
1667     + * Uninitialized hvm_params are zero and return no error.
1668     + * Although it is theoretically possible to have
1669     + * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
1670     + * not zero when valid. If zero, it means that Xenstore hasn't
1671     + * been properly initialized. Instead of attempting to map a
1672     + * wrong guest physical address return error.
1673     + *
1674     + * Also recognize all bits set as an invalid value.
1675     + */
1676     + if (!v || !~v) {
1677     + err = -ENOENT;
1678     + goto out_error;
1679     + }
1680     + /* Avoid truncation on 32-bit. */
1681     +#if BITS_PER_LONG == 32
1682     + if (v > ULONG_MAX) {
1683     + pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
1684     + __func__, v);
1685     + err = -EINVAL;
1686     + goto out_error;
1687     + }
1688     +#endif
1689     xen_store_gfn = (unsigned long)v;
1690     xen_store_interface =
1691     xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
1692     @@ -832,8 +855,10 @@ static int __init xenbus_init(void)
1693     */
1694     proc_mkdir("xen", NULL);
1695     #endif
1696     + return 0;
1697    
1698     out_error:
1699     + xen_store_domain_type = XS_UNKNOWN;
1700     return err;
1701     }
1702    
1703     diff --git a/fs/file.c b/fs/file.c
1704     index 82d3f925bab39..0e31a66207e86 100644
1705     --- a/fs/file.c
1706     +++ b/fs/file.c
1707     @@ -692,7 +692,7 @@ void do_close_on_exec(struct files_struct *files)
1708     spin_unlock(&files->file_lock);
1709     }
1710    
1711     -static struct file *__fget(unsigned int fd, fmode_t mask)
1712     +static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
1713     {
1714     struct files_struct *files = current->files;
1715     struct file *file;
1716     @@ -707,23 +707,32 @@ loop:
1717     */
1718     if (file->f_mode & mask)
1719     file = NULL;
1720     - else if (!get_file_rcu(file))
1721     + else if (!get_file_rcu_many(file, refs))
1722     goto loop;
1723     + else if (__fcheck_files(files, fd) != file) {
1724     + fput_many(file, refs);
1725     + goto loop;
1726     + }
1727     }
1728     rcu_read_unlock();
1729    
1730     return file;
1731     }
1732    
1733     +struct file *fget_many(unsigned int fd, unsigned int refs)
1734     +{
1735     + return __fget(fd, FMODE_PATH, refs);
1736     +}
1737     +
1738     struct file *fget(unsigned int fd)
1739     {
1740     - return __fget(fd, FMODE_PATH);
1741     + return __fget(fd, FMODE_PATH, 1);
1742     }
1743     EXPORT_SYMBOL(fget);
1744    
1745     struct file *fget_raw(unsigned int fd)
1746     {
1747     - return __fget(fd, 0);
1748     + return __fget(fd, 0, 1);
1749     }
1750     EXPORT_SYMBOL(fget_raw);
1751    
1752     @@ -754,7 +763,7 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
1753     return 0;
1754     return (unsigned long)file;
1755     } else {
1756     - file = __fget(fd, mask);
1757     + file = __fget(fd, mask, 1);
1758     if (!file)
1759     return 0;
1760     return FDPUT_FPUT | (unsigned long)file;
1761     diff --git a/fs/file_table.c b/fs/file_table.c
1762     index ad17e05ebf95f..747bb386b4466 100644
1763     --- a/fs/file_table.c
1764     +++ b/fs/file_table.c
1765     @@ -261,9 +261,9 @@ void flush_delayed_fput(void)
1766    
1767     static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
1768    
1769     -void fput(struct file *file)
1770     +void fput_many(struct file *file, unsigned int refs)
1771     {
1772     - if (atomic_long_dec_and_test(&file->f_count)) {
1773     + if (atomic_long_sub_and_test(refs, &file->f_count)) {
1774     struct task_struct *task = current;
1775    
1776     if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
1777     @@ -282,6 +282,11 @@ void fput(struct file *file)
1778     }
1779     }
1780    
1781     +void fput(struct file *file)
1782     +{
1783     + fput_many(file, 1);
1784     +}
1785     +
1786     /*
1787     * synchronous analog of fput(); for kernel threads that might be needed
1788     * in some umount() (and thus can't use flush_delayed_fput() without
1789     diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
1790     index d21e41735805d..585c52dbb2e39 100644
1791     --- a/fs/fuse/dev.c
1792     +++ b/fs/fuse/dev.c
1793     @@ -903,6 +903,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
1794     if (!(buf->flags & PIPE_BUF_FLAG_LRU))
1795     lru_cache_add_file(newpage);
1796    
1797     + /*
1798     + * Release while we have extra ref on stolen page. Otherwise
1799     + * anon_pipe_buf_release() might think the page can be reused.
1800     + */
1801     + pipe_buf_release(cs->pipe, buf);
1802     +
1803     err = 0;
1804     spin_lock(&cs->req->waitq.lock);
1805     if (test_bit(FR_ABORTED, &cs->req->flags))
1806     @@ -2040,8 +2046,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1807    
1808     pipe_lock(pipe);
1809     out_free:
1810     - for (idx = 0; idx < nbuf; idx++)
1811     - pipe_buf_release(pipe, &bufs[idx]);
1812     + for (idx = 0; idx < nbuf; idx++) {
1813     + struct pipe_buffer *buf = &bufs[idx];
1814     +
1815     + if (buf->ops)
1816     + pipe_buf_release(pipe, buf);
1817     + }
1818     pipe_unlock(pipe);
1819    
1820     kfree(bufs);
1821     diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
1822     index 7efb9e0e9f25b..3038cefff15cc 100644
1823     --- a/fs/nfs/nfs42proc.c
1824     +++ b/fs/nfs/nfs42proc.c
1825     @@ -181,8 +181,9 @@ static ssize_t _nfs42_proc_copy(struct file *src,
1826     return status;
1827     }
1828    
1829     - truncate_pagecache_range(dst_inode, pos_dst,
1830     - pos_dst + res->write_res.count);
1831     + WARN_ON_ONCE(invalidate_inode_pages2_range(dst_inode->i_mapping,
1832     + pos_dst >> PAGE_SHIFT,
1833     + (pos_dst + res->write_res.count - 1) >> PAGE_SHIFT));
1834    
1835     return res->write_res.count;
1836     }
1837     diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
1838     index 8b2605882a201..335c34f0d1303 100644
1839     --- a/fs/nfs/nfs42xdr.c
1840     +++ b/fs/nfs/nfs42xdr.c
1841     @@ -593,8 +593,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
1842     status = decode_clone(xdr);
1843     if (status)
1844     goto out;
1845     - status = decode_getfattr(xdr, res->dst_fattr, res->server);
1846     -
1847     + decode_getfattr(xdr, res->dst_fattr, res->server);
1848     out:
1849     res->rpc_status = status;
1850     return status;
1851     diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
1852     index 8e8012769f3e9..e8b40835770cf 100644
1853     --- a/fs/proc/vmcore.c
1854     +++ b/fs/proc/vmcore.c
1855     @@ -105,14 +105,19 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
1856     nr_bytes = count;
1857    
1858     /* If pfn is not ram, return zeros for sparse dump files */
1859     - if (pfn_is_ram(pfn) == 0)
1860     - memset(buf, 0, nr_bytes);
1861     - else {
1862     + if (pfn_is_ram(pfn) == 0) {
1863     + tmp = 0;
1864     + if (!userbuf)
1865     + memset(buf, 0, nr_bytes);
1866     + else if (clear_user(buf, nr_bytes))
1867     + tmp = -EFAULT;
1868     + } else {
1869     tmp = copy_oldmem_page(pfn, buf, nr_bytes,
1870     offset, userbuf);
1871     - if (tmp < 0)
1872     - return tmp;
1873     }
1874     + if (tmp < 0)
1875     + return tmp;
1876     +
1877     *ppos += nr_bytes;
1878     count -= nr_bytes;
1879     buf += nr_bytes;
1880     diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
1881     index c6d6671876080..e9851100c0f7e 100644
1882     --- a/include/asm-generic/tlb.h
1883     +++ b/include/asm-generic/tlb.h
1884     @@ -123,6 +123,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
1885     unsigned long end);
1886     extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
1887     int page_size);
1888     +void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
1889     + unsigned long size);
1890    
1891     static inline void __tlb_adjust_range(struct mmu_gather *tlb,
1892     unsigned long address)
1893     diff --git a/include/linux/file.h b/include/linux/file.h
1894     index 7444f5feda125..d5baf7194fb0b 100644
1895     --- a/include/linux/file.h
1896     +++ b/include/linux/file.h
1897     @@ -12,6 +12,7 @@
1898     struct file;
1899    
1900     extern void fput(struct file *);
1901     +extern void fput_many(struct file *, unsigned int);
1902    
1903     struct file_operations;
1904     struct vfsmount;
1905     @@ -40,6 +41,7 @@ static inline void fdput(struct fd fd)
1906     }
1907    
1908     extern struct file *fget(unsigned int fd);
1909     +extern struct file *fget_many(unsigned int fd, unsigned int refs);
1910     extern struct file *fget_raw(unsigned int fd);
1911     extern unsigned long __fdget(unsigned int fd);
1912     extern unsigned long __fdget_raw(unsigned int fd);
1913     diff --git a/include/linux/fs.h b/include/linux/fs.h
1914     index b8d65e0ab9341..9e4a75005280f 100644
1915     --- a/include/linux/fs.h
1916     +++ b/include/linux/fs.h
1917     @@ -939,7 +939,9 @@ static inline struct file *get_file(struct file *f)
1918     atomic_long_inc(&f->f_count);
1919     return f;
1920     }
1921     -#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
1922     +#define get_file_rcu_many(x, cnt) \
1923     + atomic_long_add_unless(&(x)->f_count, (cnt), 0)
1924     +#define get_file_rcu(x) get_file_rcu_many((x), 1)
1925     #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
1926     #define file_count(x) atomic_long_read(&(x)->f_count)
1927    
1928     diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
1929     index 848e5796400e7..325e7cbb3d32d 100644
1930     --- a/include/linux/ipc_namespace.h
1931     +++ b/include/linux/ipc_namespace.h
1932     @@ -122,6 +122,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
1933     return ns;
1934     }
1935    
1936     +static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
1937     +{
1938     + if (ns) {
1939     + if (atomic_inc_not_zero(&ns->count))
1940     + return ns;
1941     + }
1942     +
1943     + return NULL;
1944     +}
1945     +
1946     extern void put_ipc_ns(struct ipc_namespace *ns);
1947     #else
1948     static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
1949     @@ -138,6 +148,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
1950     return ns;
1951     }
1952    
1953     +static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
1954     +{
1955     + return ns;
1956     +}
1957     +
1958     static inline void put_ipc_ns(struct ipc_namespace *ns)
1959     {
1960     }
1961     diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
1962     index 4db62045f01ae..006ef813959b4 100644
1963     --- a/include/linux/kprobes.h
1964     +++ b/include/linux/kprobes.h
1965     @@ -192,6 +192,8 @@ struct kretprobe {
1966     raw_spinlock_t lock;
1967     };
1968    
1969     +#define KRETPROBE_MAX_DATA_SIZE 4096
1970     +
1971     struct kretprobe_instance {
1972     struct hlist_node hlist;
1973     struct kretprobe *rp;
1974     diff --git a/include/linux/sched.h b/include/linux/sched.h
1975     index f094882822a63..a6e658ba41c12 100644
1976     --- a/include/linux/sched.h
1977     +++ b/include/linux/sched.h
1978     @@ -3106,7 +3106,7 @@ static inline int thread_group_empty(struct task_struct *p)
1979     * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
1980     * subscriptions and synchronises with wait4(). Also used in procfs. Also
1981     * pins the final release of task.io_context. Also protects ->cpuset and
1982     - * ->cgroup.subsys[]. And ->vfork_done.
1983     + * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
1984     *
1985     * Nests both inside and outside of read_lock(&tasklist_lock).
1986     * It must not be nested with write_lock_irq(&tasklist_lock),
1987     diff --git a/include/linux/shm.h b/include/linux/shm.h
1988     index 04e8818296251..9c8b942bd67f0 100644
1989     --- a/include/linux/shm.h
1990     +++ b/include/linux/shm.h
1991     @@ -19,9 +19,18 @@ struct shmid_kernel /* private to the kernel */
1992     pid_t shm_lprid;
1993     struct user_struct *mlock_user;
1994    
1995     - /* The task created the shm object. NULL if the task is dead. */
1996     + /*
1997     + * The task created the shm object, for
1998     + * task_lock(shp->shm_creator)
1999     + */
2000     struct task_struct *shm_creator;
2001     - struct list_head shm_clist; /* list by creator */
2002     +
2003     + /*
2004     + * List by creator. task_lock(->shm_creator) required for read/write.
2005     + * If list_empty(), then the creator is dead already.
2006     + */
2007     + struct list_head shm_clist;
2008     + struct ipc_namespace *ns;
2009     };
2010    
2011     /* shm_mode upper byte flags */
2012     diff --git a/include/linux/siphash.h b/include/linux/siphash.h
2013     index bf21591a9e5e6..0cda61855d907 100644
2014     --- a/include/linux/siphash.h
2015     +++ b/include/linux/siphash.h
2016     @@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
2017     }
2018    
2019     u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
2020     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2021     u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
2022     -#endif
2023    
2024     u64 siphash_1u64(const u64 a, const siphash_key_t *key);
2025     u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
2026     @@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
2027     static inline u64 siphash(const void *data, size_t len,
2028     const siphash_key_t *key)
2029     {
2030     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2031     - if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
2032     + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
2033     + !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
2034     return __siphash_unaligned(data, len, key);
2035     -#endif
2036     return ___siphash_aligned(data, len, key);
2037     }
2038    
2039     @@ -96,10 +93,8 @@ typedef struct {
2040    
2041     u32 __hsiphash_aligned(const void *data, size_t len,
2042     const hsiphash_key_t *key);
2043     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2044     u32 __hsiphash_unaligned(const void *data, size_t len,
2045     const hsiphash_key_t *key);
2046     -#endif
2047    
2048     u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
2049     u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
2050     @@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
2051     static inline u32 hsiphash(const void *data, size_t len,
2052     const hsiphash_key_t *key)
2053     {
2054     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2055     - if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
2056     + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
2057     + !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
2058     return __hsiphash_unaligned(data, len, key);
2059     -#endif
2060     return ___hsiphash_aligned(data, len, key);
2061     }
2062    
2063     diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
2064     index 2ba054fe14ac1..3bcce1523894e 100644
2065     --- a/include/net/nfc/nci_core.h
2066     +++ b/include/net/nfc/nci_core.h
2067     @@ -42,6 +42,7 @@ enum nci_flag {
2068     NCI_UP,
2069     NCI_DATA_EXCHANGE,
2070     NCI_DATA_EXCHANGE_TO,
2071     + NCI_UNREG,
2072     };
2073    
2074     /* NCI device states */
2075     diff --git a/include/net/nl802154.h b/include/net/nl802154.h
2076     index ddcee128f5d9a..145acb8f25095 100644
2077     --- a/include/net/nl802154.h
2078     +++ b/include/net/nl802154.h
2079     @@ -19,6 +19,8 @@
2080     *
2081     */
2082    
2083     +#include <linux/types.h>
2084     +
2085     #define NL802154_GENL_NAME "nl802154"
2086    
2087     enum nl802154_commands {
2088     @@ -150,10 +152,9 @@ enum nl802154_attrs {
2089     };
2090    
2091     enum nl802154_iftype {
2092     - /* for backwards compatibility TODO */
2093     - NL802154_IFTYPE_UNSPEC = -1,
2094     + NL802154_IFTYPE_UNSPEC = (~(__u32)0),
2095    
2096     - NL802154_IFTYPE_NODE,
2097     + NL802154_IFTYPE_NODE = 0,
2098     NL802154_IFTYPE_MONITOR,
2099     NL802154_IFTYPE_COORD,
2100    
2101     diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
2102     index 21f4fbd55e48e..276b81cf0daf5 100644
2103     --- a/include/xen/interface/io/ring.h
2104     +++ b/include/xen/interface/io/ring.h
2105     @@ -24,82 +24,79 @@ typedef unsigned int RING_IDX;
2106     * A ring contains as many entries as will fit, rounded down to the nearest
2107     * power of two (so we can mask with (size-1) to loop around).
2108     */
2109     -#define __CONST_RING_SIZE(_s, _sz) \
2110     - (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
2111     - sizeof(((struct _s##_sring *)0)->ring[0])))
2112     -
2113     +#define __CONST_RING_SIZE(_s, _sz) \
2114     + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
2115     + sizeof(((struct _s##_sring *)0)->ring[0])))
2116     /*
2117     * The same for passing in an actual pointer instead of a name tag.
2118     */
2119     -#define __RING_SIZE(_s, _sz) \
2120     - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
2121     +#define __RING_SIZE(_s, _sz) \
2122     + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
2123    
2124     /*
2125     * Macros to make the correct C datatypes for a new kind of ring.
2126     *
2127     * To make a new ring datatype, you need to have two message structures,
2128     - * let's say struct request, and struct response already defined.
2129     + * let's say request_t, and response_t already defined.
2130     *
2131     * In a header where you want the ring datatype declared, you then do:
2132     *
2133     - * DEFINE_RING_TYPES(mytag, struct request, struct response);
2134     + * DEFINE_RING_TYPES(mytag, request_t, response_t);
2135     *
2136     * These expand out to give you a set of types, as you can see below.
2137     * The most important of these are:
2138     *
2139     - * struct mytag_sring - The shared ring.
2140     - * struct mytag_front_ring - The 'front' half of the ring.
2141     - * struct mytag_back_ring - The 'back' half of the ring.
2142     + * mytag_sring_t - The shared ring.
2143     + * mytag_front_ring_t - The 'front' half of the ring.
2144     + * mytag_back_ring_t - The 'back' half of the ring.
2145     *
2146     * To initialize a ring in your code you need to know the location and size
2147     * of the shared memory area (PAGE_SIZE, for instance). To initialise
2148     * the front half:
2149     *
2150     - * struct mytag_front_ring front_ring;
2151     - * SHARED_RING_INIT((struct mytag_sring *)shared_page);
2152     - * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
2153     - * PAGE_SIZE);
2154     + * mytag_front_ring_t front_ring;
2155     + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
2156     + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
2157     *
2158     * Initializing the back follows similarly (note that only the front
2159     * initializes the shared ring):
2160     *
2161     - * struct mytag_back_ring back_ring;
2162     - * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
2163     - * PAGE_SIZE);
2164     + * mytag_back_ring_t back_ring;
2165     + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
2166     */
2167    
2168     -#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
2169     - \
2170     -/* Shared ring entry */ \
2171     -union __name##_sring_entry { \
2172     - __req_t req; \
2173     - __rsp_t rsp; \
2174     -}; \
2175     - \
2176     -/* Shared ring page */ \
2177     -struct __name##_sring { \
2178     - RING_IDX req_prod, req_event; \
2179     - RING_IDX rsp_prod, rsp_event; \
2180     - uint8_t pad[48]; \
2181     - union __name##_sring_entry ring[1]; /* variable-length */ \
2182     -}; \
2183     - \
2184     -/* "Front" end's private variables */ \
2185     -struct __name##_front_ring { \
2186     - RING_IDX req_prod_pvt; \
2187     - RING_IDX rsp_cons; \
2188     - unsigned int nr_ents; \
2189     - struct __name##_sring *sring; \
2190     -}; \
2191     - \
2192     -/* "Back" end's private variables */ \
2193     -struct __name##_back_ring { \
2194     - RING_IDX rsp_prod_pvt; \
2195     - RING_IDX req_cons; \
2196     - unsigned int nr_ents; \
2197     - struct __name##_sring *sring; \
2198     -};
2199     -
2200     +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
2201     + \
2202     +/* Shared ring entry */ \
2203     +union __name##_sring_entry { \
2204     + __req_t req; \
2205     + __rsp_t rsp; \
2206     +}; \
2207     + \
2208     +/* Shared ring page */ \
2209     +struct __name##_sring { \
2210     + RING_IDX req_prod, req_event; \
2211     + RING_IDX rsp_prod, rsp_event; \
2212     + uint8_t __pad[48]; \
2213     + union __name##_sring_entry ring[1]; /* variable-length */ \
2214     +}; \
2215     + \
2216     +/* "Front" end's private variables */ \
2217     +struct __name##_front_ring { \
2218     + RING_IDX req_prod_pvt; \
2219     + RING_IDX rsp_cons; \
2220     + unsigned int nr_ents; \
2221     + struct __name##_sring *sring; \
2222     +}; \
2223     + \
2224     +/* "Back" end's private variables */ \
2225     +struct __name##_back_ring { \
2226     + RING_IDX rsp_prod_pvt; \
2227     + RING_IDX req_cons; \
2228     + unsigned int nr_ents; \
2229     + struct __name##_sring *sring; \
2230     +}; \
2231     + \
2232     /*
2233     * Macros for manipulating rings.
2234     *
2235     @@ -116,105 +113,99 @@ struct __name##_back_ring { \
2236     */
2237    
2238     /* Initialising empty rings */
2239     -#define SHARED_RING_INIT(_s) do { \
2240     - (_s)->req_prod = (_s)->rsp_prod = 0; \
2241     - (_s)->req_event = (_s)->rsp_event = 1; \
2242     - memset((_s)->pad, 0, sizeof((_s)->pad)); \
2243     +#define SHARED_RING_INIT(_s) do { \
2244     + (_s)->req_prod = (_s)->rsp_prod = 0; \
2245     + (_s)->req_event = (_s)->rsp_event = 1; \
2246     + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
2247     } while(0)
2248    
2249     -#define FRONT_RING_INIT(_r, _s, __size) do { \
2250     - (_r)->req_prod_pvt = 0; \
2251     - (_r)->rsp_cons = 0; \
2252     - (_r)->nr_ents = __RING_SIZE(_s, __size); \
2253     - (_r)->sring = (_s); \
2254     +#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
2255     + (_r)->req_prod_pvt = (_i); \
2256     + (_r)->rsp_cons = (_i); \
2257     + (_r)->nr_ents = __RING_SIZE(_s, __size); \
2258     + (_r)->sring = (_s); \
2259     } while (0)
2260    
2261     -#define BACK_RING_INIT(_r, _s, __size) do { \
2262     - (_r)->rsp_prod_pvt = 0; \
2263     - (_r)->req_cons = 0; \
2264     - (_r)->nr_ents = __RING_SIZE(_s, __size); \
2265     - (_r)->sring = (_s); \
2266     -} while (0)
2267     +#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
2268    
2269     -/* Initialize to existing shared indexes -- for recovery */
2270     -#define FRONT_RING_ATTACH(_r, _s, __size) do { \
2271     - (_r)->sring = (_s); \
2272     - (_r)->req_prod_pvt = (_s)->req_prod; \
2273     - (_r)->rsp_cons = (_s)->rsp_prod; \
2274     - (_r)->nr_ents = __RING_SIZE(_s, __size); \
2275     +#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
2276     + (_r)->rsp_prod_pvt = (_i); \
2277     + (_r)->req_cons = (_i); \
2278     + (_r)->nr_ents = __RING_SIZE(_s, __size); \
2279     + (_r)->sring = (_s); \
2280     } while (0)
2281    
2282     -#define BACK_RING_ATTACH(_r, _s, __size) do { \
2283     - (_r)->sring = (_s); \
2284     - (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
2285     - (_r)->req_cons = (_s)->req_prod; \
2286     - (_r)->nr_ents = __RING_SIZE(_s, __size); \
2287     -} while (0)
2288     +#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
2289    
2290     /* How big is this ring? */
2291     -#define RING_SIZE(_r) \
2292     +#define RING_SIZE(_r) \
2293     ((_r)->nr_ents)
2294    
2295     /* Number of free requests (for use on front side only). */
2296     -#define RING_FREE_REQUESTS(_r) \
2297     +#define RING_FREE_REQUESTS(_r) \
2298     (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
2299    
2300     /* Test if there is an empty slot available on the front ring.
2301     * (This is only meaningful from the front. )
2302     */
2303     -#define RING_FULL(_r) \
2304     +#define RING_FULL(_r) \
2305     (RING_FREE_REQUESTS(_r) == 0)
2306    
2307     /* Test if there are outstanding messages to be processed on a ring. */
2308     -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
2309     +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
2310     ((_r)->sring->rsp_prod - (_r)->rsp_cons)
2311    
2312     -#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
2313     - ({ \
2314     - unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
2315     - unsigned int rsp = RING_SIZE(_r) - \
2316     - ((_r)->req_cons - (_r)->rsp_prod_pvt); \
2317     - req < rsp ? req : rsp; \
2318     - })
2319     +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
2320     + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
2321     + unsigned int rsp = RING_SIZE(_r) - \
2322     + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
2323     + req < rsp ? req : rsp; \
2324     +})
2325    
2326     /* Direct access to individual ring elements, by index. */
2327     -#define RING_GET_REQUEST(_r, _idx) \
2328     +#define RING_GET_REQUEST(_r, _idx) \
2329     (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
2330    
2331     +#define RING_GET_RESPONSE(_r, _idx) \
2332     + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
2333     +
2334     /*
2335     - * Get a local copy of a request.
2336     + * Get a local copy of a request/response.
2337     *
2338     - * Use this in preference to RING_GET_REQUEST() so all processing is
2339     + * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
2340     * done on a local copy that cannot be modified by the other end.
2341     *
2342     * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
2343     - * to be ineffective where _req is a struct which consists of only bitfields.
2344     + * to be ineffective where dest is a struct which consists of only bitfields.
2345     */
2346     -#define RING_COPY_REQUEST(_r, _idx, _req) do { \
2347     - /* Use volatile to force the copy into _req. */ \
2348     - *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
2349     +#define RING_COPY_(type, r, idx, dest) do { \
2350     + /* Use volatile to force the copy into dest. */ \
2351     + *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
2352     } while (0)
2353    
2354     -#define RING_GET_RESPONSE(_r, _idx) \
2355     - (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
2356     +#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
2357     +#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
2358    
2359     /* Loop termination condition: Would the specified index overflow the ring? */
2360     -#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
2361     +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
2362     (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
2363    
2364     /* Ill-behaved frontend determination: Can there be this many requests? */
2365     -#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
2366     +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
2367     (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
2368    
2369     +/* Ill-behaved backend determination: Can there be this many responses? */
2370     +#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
2371     + (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
2372    
2373     -#define RING_PUSH_REQUESTS(_r) do { \
2374     - virt_wmb(); /* back sees requests /before/ updated producer index */ \
2375     - (_r)->sring->req_prod = (_r)->req_prod_pvt; \
2376     +#define RING_PUSH_REQUESTS(_r) do { \
2377     + virt_wmb(); /* back sees requests /before/ updated producer index */\
2378     + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
2379     } while (0)
2380    
2381     -#define RING_PUSH_RESPONSES(_r) do { \
2382     - virt_wmb(); /* front sees responses /before/ updated producer index */ \
2383     - (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
2384     +#define RING_PUSH_RESPONSES(_r) do { \
2385     + virt_wmb(); /* front sees resps /before/ updated producer index */ \
2386     + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
2387     } while (0)
2388    
2389     /*
2390     @@ -247,40 +238,40 @@ struct __name##_back_ring { \
2391     * field appropriately.
2392     */
2393    
2394     -#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
2395     - RING_IDX __old = (_r)->sring->req_prod; \
2396     - RING_IDX __new = (_r)->req_prod_pvt; \
2397     - virt_wmb(); /* back sees requests /before/ updated producer index */ \
2398     - (_r)->sring->req_prod = __new; \
2399     - virt_mb(); /* back sees new requests /before/ we check req_event */ \
2400     - (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
2401     - (RING_IDX)(__new - __old)); \
2402     +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
2403     + RING_IDX __old = (_r)->sring->req_prod; \
2404     + RING_IDX __new = (_r)->req_prod_pvt; \
2405     + virt_wmb(); /* back sees requests /before/ updated producer index */\
2406     + (_r)->sring->req_prod = __new; \
2407     + virt_mb(); /* back sees new requests /before/ we check req_event */ \
2408     + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
2409     + (RING_IDX)(__new - __old)); \
2410     } while (0)
2411    
2412     -#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
2413     - RING_IDX __old = (_r)->sring->rsp_prod; \
2414     - RING_IDX __new = (_r)->rsp_prod_pvt; \
2415     - virt_wmb(); /* front sees responses /before/ updated producer index */ \
2416     - (_r)->sring->rsp_prod = __new; \
2417     - virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
2418     - (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
2419     - (RING_IDX)(__new - __old)); \
2420     +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
2421     + RING_IDX __old = (_r)->sring->rsp_prod; \
2422     + RING_IDX __new = (_r)->rsp_prod_pvt; \
2423     + virt_wmb(); /* front sees resps /before/ updated producer index */ \
2424     + (_r)->sring->rsp_prod = __new; \
2425     + virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
2426     + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
2427     + (RING_IDX)(__new - __old)); \
2428     } while (0)
2429    
2430     -#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
2431     - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
2432     - if (_work_to_do) break; \
2433     - (_r)->sring->req_event = (_r)->req_cons + 1; \
2434     - virt_mb(); \
2435     - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
2436     +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
2437     + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
2438     + if (_work_to_do) break; \
2439     + (_r)->sring->req_event = (_r)->req_cons + 1; \
2440     + virt_mb(); \
2441     + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
2442     } while (0)
2443    
2444     -#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
2445     - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
2446     - if (_work_to_do) break; \
2447     - (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
2448     - virt_mb(); \
2449     - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
2450     +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
2451     + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
2452     + if (_work_to_do) break; \
2453     + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
2454     + virt_mb(); \
2455     + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
2456     } while (0)
2457    
2458     #endif /* __XEN_PUBLIC_IO_RING_H__ */
2459     diff --git a/ipc/shm.c b/ipc/shm.c
2460     index 9c687cda9b0ab..74e0f2af99982 100644
2461     --- a/ipc/shm.c
2462     +++ b/ipc/shm.c
2463     @@ -90,6 +90,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
2464     {
2465     struct shmid_kernel *shp;
2466     shp = container_of(ipcp, struct shmid_kernel, shm_perm);
2467     + WARN_ON(ns != shp->ns);
2468    
2469     if (shp->shm_nattch) {
2470     shp->shm_perm.mode |= SHM_DEST;
2471     @@ -180,10 +181,43 @@ static void shm_rcu_free(struct rcu_head *head)
2472     ipc_rcu_free(head);
2473     }
2474    
2475     -static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
2476     +/*
2477     + * It has to be called with shp locked.
2478     + * It must be called before ipc_rmid()
2479     + */
2480     +static inline void shm_clist_rm(struct shmid_kernel *shp)
2481     {
2482     - list_del(&s->shm_clist);
2483     - ipc_rmid(&shm_ids(ns), &s->shm_perm);
2484     + struct task_struct *creator;
2485     +
2486     + /* ensure that shm_creator does not disappear */
2487     + rcu_read_lock();
2488     +
2489     + /*
2490     + * A concurrent exit_shm may do a list_del_init() as well.
2491     + * Just do nothing if exit_shm already did the work
2492     + */
2493     + if (!list_empty(&shp->shm_clist)) {
2494     + /*
2495     + * shp->shm_creator is guaranteed to be valid *only*
2496     + * if shp->shm_clist is not empty.
2497     + */
2498     + creator = shp->shm_creator;
2499     +
2500     + task_lock(creator);
2501     + /*
2502     + * list_del_init() is a nop if the entry was already removed
2503     + * from the list.
2504     + */
2505     + list_del_init(&shp->shm_clist);
2506     + task_unlock(creator);
2507     + }
2508     + rcu_read_unlock();
2509     +}
2510     +
2511     +static inline void shm_rmid(struct shmid_kernel *s)
2512     +{
2513     + shm_clist_rm(s);
2514     + ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
2515     }
2516    
2517    
2518     @@ -238,7 +272,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
2519     shm_file = shp->shm_file;
2520     shp->shm_file = NULL;
2521     ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
2522     - shm_rmid(ns, shp);
2523     + shm_rmid(shp);
2524     shm_unlock(shp);
2525     if (!is_file_hugepages(shm_file))
2526     shmem_lock(shm_file, 0, shp->mlock_user);
2527     @@ -259,10 +293,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
2528     *
2529     * 2) sysctl kernel.shm_rmid_forced is set to 1.
2530     */
2531     -static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
2532     +static bool shm_may_destroy(struct shmid_kernel *shp)
2533     {
2534     return (shp->shm_nattch == 0) &&
2535     - (ns->shm_rmid_forced ||
2536     + (shp->ns->shm_rmid_forced ||
2537     (shp->shm_perm.mode & SHM_DEST));
2538     }
2539    
2540     @@ -293,7 +327,7 @@ static void shm_close(struct vm_area_struct *vma)
2541     shp->shm_lprid = task_tgid_vnr(current);
2542     shp->shm_dtim = get_seconds();
2543     shp->shm_nattch--;
2544     - if (shm_may_destroy(ns, shp))
2545     + if (shm_may_destroy(shp))
2546     shm_destroy(ns, shp);
2547     else
2548     shm_unlock(shp);
2549     @@ -314,10 +348,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
2550     *
2551     * As shp->* are changed under rwsem, it's safe to skip shp locking.
2552     */
2553     - if (shp->shm_creator != NULL)
2554     + if (!list_empty(&shp->shm_clist))
2555     return 0;
2556    
2557     - if (shm_may_destroy(ns, shp)) {
2558     + if (shm_may_destroy(shp)) {
2559     shm_lock_by_ptr(shp);
2560     shm_destroy(ns, shp);
2561     }
2562     @@ -335,48 +369,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
2563     /* Locking assumes this will only be called with task == current */
2564     void exit_shm(struct task_struct *task)
2565     {
2566     - struct ipc_namespace *ns = task->nsproxy->ipc_ns;
2567     - struct shmid_kernel *shp, *n;
2568     + for (;;) {
2569     + struct shmid_kernel *shp;
2570     + struct ipc_namespace *ns;
2571    
2572     - if (list_empty(&task->sysvshm.shm_clist))
2573     - return;
2574     + task_lock(task);
2575     +
2576     + if (list_empty(&task->sysvshm.shm_clist)) {
2577     + task_unlock(task);
2578     + break;
2579     + }
2580     +
2581     + shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
2582     + shm_clist);
2583    
2584     - /*
2585     - * If kernel.shm_rmid_forced is not set then only keep track of
2586     - * which shmids are orphaned, so that a later set of the sysctl
2587     - * can clean them up.
2588     - */
2589     - if (!ns->shm_rmid_forced) {
2590     - down_read(&shm_ids(ns).rwsem);
2591     - list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
2592     - shp->shm_creator = NULL;
2593     /*
2594     - * Only under read lock but we are only called on current
2595     - * so no entry on the list will be shared.
2596     + * 1) Get pointer to the ipc namespace. It is worth to say
2597     + * that this pointer is guaranteed to be valid because
2598     + * shp lifetime is always shorter than namespace lifetime
2599     + * in which shp lives.
2600     + * We taken task_lock it means that shp won't be freed.
2601     */
2602     - list_del(&task->sysvshm.shm_clist);
2603     - up_read(&shm_ids(ns).rwsem);
2604     - return;
2605     - }
2606     + ns = shp->ns;
2607    
2608     - /*
2609     - * Destroy all already created segments, that were not yet mapped,
2610     - * and mark any mapped as orphan to cover the sysctl toggling.
2611     - * Destroy is skipped if shm_may_destroy() returns false.
2612     - */
2613     - down_write(&shm_ids(ns).rwsem);
2614     - list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
2615     - shp->shm_creator = NULL;
2616     + /*
2617     + * 2) If kernel.shm_rmid_forced is not set then only keep track of
2618     + * which shmids are orphaned, so that a later set of the sysctl
2619     + * can clean them up.
2620     + */
2621     + if (!ns->shm_rmid_forced)
2622     + goto unlink_continue;
2623    
2624     - if (shm_may_destroy(ns, shp)) {
2625     - shm_lock_by_ptr(shp);
2626     - shm_destroy(ns, shp);
2627     + /*
2628     + * 3) get a reference to the namespace.
2629     + * The refcount could be already 0. If it is 0, then
2630     + * the shm objects will be free by free_ipc_work().
2631     + */
2632     + ns = get_ipc_ns_not_zero(ns);
2633     + if (!ns) {
2634     +unlink_continue:
2635     + list_del_init(&shp->shm_clist);
2636     + task_unlock(task);
2637     + continue;
2638     }
2639     - }
2640    
2641     - /* Remove the list head from any segments still attached. */
2642     - list_del(&task->sysvshm.shm_clist);
2643     - up_write(&shm_ids(ns).rwsem);
2644     + /*
2645     + * 4) get a reference to shp.
2646     + * This cannot fail: shm_clist_rm() is called before
2647     + * ipc_rmid(), thus the refcount cannot be 0.
2648     + */
2649     + WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
2650     +
2651     + /*
2652     + * 5) unlink the shm segment from the list of segments
2653     + * created by current.
2654     + * This must be done last. After unlinking,
2655     + * only the refcounts obtained above prevent IPC_RMID
2656     + * from destroying the segment or the namespace.
2657     + */
2658     + list_del_init(&shp->shm_clist);
2659     +
2660     + task_unlock(task);
2661     +
2662     + /*
2663     + * 6) we have all references
2664     + * Thus lock & if needed destroy shp.
2665     + */
2666     + down_write(&shm_ids(ns).rwsem);
2667     + shm_lock_by_ptr(shp);
2668     + /*
2669     + * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
2670     + * safe to call ipc_rcu_putref here
2671     + */
2672     + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
2673     +
2674     + if (ipc_valid_object(&shp->shm_perm)) {
2675     + if (shm_may_destroy(shp))
2676     + shm_destroy(ns, shp);
2677     + else
2678     + shm_unlock(shp);
2679     + } else {
2680     + /*
2681     + * Someone else deleted the shp from namespace
2682     + * idr/kht while we have waited.
2683     + * Just unlock and continue.
2684     + */
2685     + shm_unlock(shp);
2686     + }
2687     +
2688     + up_write(&shm_ids(ns).rwsem);
2689     + put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
2690     + }
2691     }
2692    
2693     static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2694     @@ -621,7 +704,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
2695     goto no_id;
2696     }
2697    
2698     + shp->ns = ns;
2699     +
2700     + task_lock(current);
2701     list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
2702     + task_unlock(current);
2703    
2704     /*
2705     * shmid gets reported as "inode#" in /proc/pid/maps.
2706     @@ -1270,7 +1357,8 @@ out_nattch:
2707     down_write(&shm_ids(ns).rwsem);
2708     shp = shm_lock(ns, shmid);
2709     shp->shm_nattch--;
2710     - if (shm_may_destroy(ns, shp))
2711     +
2712     + if (shm_may_destroy(shp))
2713     shm_destroy(ns, shp);
2714     else
2715     shm_unlock(shp);
2716     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2717     index 51867a2e537fa..fb2357d0dbc85 100644
2718     --- a/kernel/kprobes.c
2719     +++ b/kernel/kprobes.c
2720     @@ -1899,6 +1899,9 @@ int register_kretprobe(struct kretprobe *rp)
2721     }
2722     }
2723    
2724     + if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2725     + return -E2BIG;
2726     +
2727     rp->kp.pre_handler = pre_handler_kretprobe;
2728     rp->kp.post_handler = NULL;
2729     rp->kp.fault_handler = NULL;
2730     diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
2731     index 7b393faf930f8..e938fd8db056b 100644
2732     --- a/kernel/power/hibernate.c
2733     +++ b/kernel/power/hibernate.c
2734     @@ -672,7 +672,7 @@ static int load_image_and_restore(void)
2735     goto Unlock;
2736    
2737     error = swsusp_read(&flags);
2738     - swsusp_close(FMODE_READ);
2739     + swsusp_close(FMODE_READ | FMODE_EXCL);
2740     if (!error)
2741     hibernation_restore(flags & SF_PLATFORM_MODE);
2742    
2743     @@ -866,7 +866,7 @@ static int software_resume(void)
2744     /* The snapshot device should not be opened while we're running */
2745     if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
2746     error = -EBUSY;
2747     - swsusp_close(FMODE_READ);
2748     + swsusp_close(FMODE_READ | FMODE_EXCL);
2749     goto Unlock;
2750     }
2751    
2752     @@ -900,7 +900,7 @@ static int software_resume(void)
2753     pr_debug("PM: Hibernation image not present or could not be loaded.\n");
2754     return error;
2755     Close_Finish:
2756     - swsusp_close(FMODE_READ);
2757     + swsusp_close(FMODE_READ | FMODE_EXCL);
2758     goto Finish;
2759     }
2760    
2761     diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
2762     index ecea27ba29e68..7eb25e915b723 100644
2763     --- a/kernel/trace/trace.h
2764     +++ b/kernel/trace/trace.h
2765     @@ -1161,14 +1161,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
2766     if (eflags & EVENT_FILE_FL_TRIGGER_COND)
2767     *tt = event_triggers_call(file, entry);
2768    
2769     - if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2770     - (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2771     - !filter_match_preds(file->filter, entry))) {
2772     - __trace_event_discard_commit(buffer, event);
2773     - return true;
2774     - }
2775     + if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
2776     + EVENT_FILE_FL_FILTERED |
2777     + EVENT_FILE_FL_PID_FILTER))))
2778     + return false;
2779     +
2780     + if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
2781     + goto discard;
2782     +
2783     + if (file->flags & EVENT_FILE_FL_FILTERED &&
2784     + !filter_match_preds(file->filter, entry))
2785     + goto discard;
2786     +
2787     + if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
2788     + trace_event_ignore_this_pid(file))
2789     + goto discard;
2790    
2791     return false;
2792     + discard:
2793     + __trace_event_discard_commit(buffer, event);
2794     + return true;
2795     }
2796    
2797     /**
2798     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2799     index 1499b2c2799c7..7532208898d0d 100644
2800     --- a/kernel/trace/trace_events.c
2801     +++ b/kernel/trace/trace_events.c
2802     @@ -2241,12 +2241,19 @@ static struct trace_event_file *
2803     trace_create_new_event(struct trace_event_call *call,
2804     struct trace_array *tr)
2805     {
2806     + struct trace_pid_list *pid_list;
2807     struct trace_event_file *file;
2808    
2809     file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2810     if (!file)
2811     return NULL;
2812    
2813     + pid_list = rcu_dereference_protected(tr->filtered_pids,
2814     + lockdep_is_held(&event_mutex));
2815     +
2816     + if (pid_list)
2817     + file->flags |= EVENT_FILE_FL_PID_FILTER;
2818     +
2819     file->event_call = call;
2820     file->tr = tr;
2821     atomic_set(&file->sm_ref, 0);
2822     diff --git a/lib/siphash.c b/lib/siphash.c
2823     index 3ae58b4edad61..e632ee40aac1a 100644
2824     --- a/lib/siphash.c
2825     +++ b/lib/siphash.c
2826     @@ -49,6 +49,7 @@
2827     SIPROUND; \
2828     return (v0 ^ v1) ^ (v2 ^ v3);
2829    
2830     +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2831     u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
2832     {
2833     const u8 *end = data + len - (len % sizeof(u64));
2834     @@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
2835     POSTAMBLE
2836     }
2837     EXPORT_SYMBOL(__siphash_aligned);
2838     +#endif
2839    
2840     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2841     u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
2842     {
2843     const u8 *end = data + len - (len % sizeof(u64));
2844     @@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
2845     POSTAMBLE
2846     }
2847     EXPORT_SYMBOL(__siphash_unaligned);
2848     -#endif
2849    
2850     /**
2851     * siphash_1u64 - compute 64-bit siphash PRF value of a u64
2852     @@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
2853     HSIPROUND; \
2854     return (v0 ^ v1) ^ (v2 ^ v3);
2855    
2856     +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2857     u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
2858     {
2859     const u8 *end = data + len - (len % sizeof(u64));
2860     @@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
2861     HPOSTAMBLE
2862     }
2863     EXPORT_SYMBOL(__hsiphash_aligned);
2864     +#endif
2865    
2866     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2867     u32 __hsiphash_unaligned(const void *data, size_t len,
2868     const hsiphash_key_t *key)
2869     {
2870     @@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
2871     HPOSTAMBLE
2872     }
2873     EXPORT_SYMBOL(__hsiphash_unaligned);
2874     -#endif
2875    
2876     /**
2877     * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
2878     @@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
2879     HSIPROUND; \
2880     return v1 ^ v3;
2881    
2882     +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2883     u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
2884     {
2885     const u8 *end = data + len - (len % sizeof(u32));
2886     @@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
2887     HPOSTAMBLE
2888     }
2889     EXPORT_SYMBOL(__hsiphash_aligned);
2890     +#endif
2891    
2892     -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2893     u32 __hsiphash_unaligned(const void *data, size_t len,
2894     const hsiphash_key_t *key)
2895     {
2896     @@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
2897     HPOSTAMBLE
2898     }
2899     EXPORT_SYMBOL(__hsiphash_unaligned);
2900     -#endif
2901    
2902     /**
2903     * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
2904     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2905     index de89e9295f6c5..8aad9bd08462e 100644
2906     --- a/mm/hugetlb.c
2907     +++ b/mm/hugetlb.c
2908     @@ -3393,14 +3393,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2909     struct page *page;
2910     struct hstate *h = hstate_vma(vma);
2911     unsigned long sz = huge_page_size(h);
2912     - const unsigned long mmun_start = start; /* For mmu_notifiers */
2913     - const unsigned long mmun_end = end; /* For mmu_notifiers */
2914     + unsigned long mmun_start = start; /* For mmu_notifiers */
2915     + unsigned long mmun_end = end; /* For mmu_notifiers */
2916     + bool force_flush = false;
2917    
2918     WARN_ON(!is_vm_hugetlb_page(vma));
2919     BUG_ON(start & ~huge_page_mask(h));
2920     BUG_ON(end & ~huge_page_mask(h));
2921    
2922     tlb_start_vma(tlb, vma);
2923     +
2924     + /*
2925     + * If sharing possible, alert mmu notifiers of worst case.
2926     + */
2927     + adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
2928     mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2929     address = start;
2930     for (; address < end; address += sz) {
2931     @@ -3411,6 +3417,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2932     ptl = huge_pte_lock(h, mm, ptep);
2933     if (huge_pmd_unshare(mm, &address, ptep)) {
2934     spin_unlock(ptl);
2935     + tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
2936     + force_flush = true;
2937     continue;
2938     }
2939    
2940     @@ -3467,6 +3475,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2941     }
2942     mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2943     tlb_end_vma(tlb, vma);
2944     +
2945     + /*
2946     + * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
2947     + * could defer the flush until now, since by holding i_mmap_rwsem we
2948     + * guaranteed that the last refernece would not be dropped. But we must
2949     + * do the flushing before we return, as otherwise i_mmap_rwsem will be
2950     + * dropped and the last reference to the shared PMDs page might be
2951     + * dropped as well.
2952     + *
2953     + * In theory we could defer the freeing of the PMD pages as well, but
2954     + * huge_pmd_unshare() relies on the exact page_count for the PMD page to
2955     + * detect sharing, so we cannot defer the release of the page either.
2956     + * Instead, do flush now.
2957     + */
2958     + if (force_flush)
2959     + tlb_flush_mmu(tlb);
2960     }
2961    
2962     void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2963     @@ -3493,12 +3517,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2964     {
2965     struct mm_struct *mm;
2966     struct mmu_gather tlb;
2967     + unsigned long tlb_start = start;
2968     + unsigned long tlb_end = end;
2969     +
2970     + /*
2971     + * If shared PMDs were possibly used within this vma range, adjust
2972     + * start/end for worst case tlb flushing.
2973     + * Note that we can not be sure if PMDs are shared until we try to
2974     + * unmap pages. However, we want to make sure TLB flushing covers
2975     + * the largest possible range.
2976     + */
2977     + adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
2978    
2979     mm = vma->vm_mm;
2980    
2981     - tlb_gather_mmu(&tlb, mm, start, end);
2982     + tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
2983     __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2984     - tlb_finish_mmu(&tlb, start, end);
2985     + tlb_finish_mmu(&tlb, tlb_start, tlb_end);
2986     }
2987    
2988     /*
2989     @@ -4186,11 +4221,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
2990     pte_t pte;
2991     struct hstate *h = hstate_vma(vma);
2992     unsigned long pages = 0;
2993     + unsigned long f_start = start;
2994     + unsigned long f_end = end;
2995     + bool shared_pmd = false;
2996     +
2997     + /*
2998     + * In the case of shared PMDs, the area to flush could be beyond
2999     + * start/end. Set f_start/f_end to cover the maximum possible
3000     + * range if PMD sharing is possible.
3001     + */
3002     + adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
3003    
3004     BUG_ON(address >= end);
3005     - flush_cache_range(vma, address, end);
3006     + flush_cache_range(vma, f_start, f_end);
3007    
3008     - mmu_notifier_invalidate_range_start(mm, start, end);
3009     + mmu_notifier_invalidate_range_start(mm, f_start, f_end);
3010     i_mmap_lock_write(vma->vm_file->f_mapping);
3011     for (; address < end; address += huge_page_size(h)) {
3012     spinlock_t *ptl;
3013     @@ -4201,6 +4246,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3014     if (huge_pmd_unshare(mm, &address, ptep)) {
3015     pages++;
3016     spin_unlock(ptl);
3017     + shared_pmd = true;
3018     continue;
3019     }
3020     pte = huge_ptep_get(ptep);
3021     @@ -4235,12 +4281,18 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3022     * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3023     * may have cleared our pud entry and done put_page on the page table:
3024     * once we release i_mmap_rwsem, another task can do the final put_page
3025     - * and that page table be reused and filled with junk.
3026     + * and that page table be reused and filled with junk. If we actually
3027     + * did unshare a page of pmds, flush the range corresponding to the pud.
3028     */
3029     - flush_hugetlb_tlb_range(vma, start, end);
3030     - mmu_notifier_invalidate_range(mm, start, end);
3031     + if (shared_pmd) {
3032     + flush_hugetlb_tlb_range(vma, f_start, f_end);
3033     + mmu_notifier_invalidate_range(mm, f_start, f_end);
3034     + } else {
3035     + flush_hugetlb_tlb_range(vma, start, end);
3036     + mmu_notifier_invalidate_range(mm, start, end);
3037     + }
3038     i_mmap_unlock_write(vma->vm_file->f_mapping);
3039     - mmu_notifier_invalidate_range_end(mm, start, end);
3040     + mmu_notifier_invalidate_range_end(mm, f_start, f_end);
3041    
3042     return pages << h->order;
3043     }
3044     diff --git a/mm/memory.c b/mm/memory.c
3045     index be592d434ad89..c2890dc104d9e 100644
3046     --- a/mm/memory.c
3047     +++ b/mm/memory.c
3048     @@ -320,6 +320,22 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
3049     return false;
3050     }
3051    
3052     +void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
3053     + unsigned long size)
3054     +{
3055     + if (tlb->page_size != 0 && tlb->page_size != PMD_SIZE)
3056     + tlb_flush_mmu(tlb);
3057     +
3058     + tlb->page_size = PMD_SIZE;
3059     + tlb->start = min(tlb->start, address);
3060     + tlb->end = max(tlb->end, address + size);
3061     + /*
3062     + * Track the last address with which we adjusted the range. This
3063     + * will be used later to adjust again after a mmu_flush due to
3064     + * failed __tlb_remove_page
3065     + */
3066     + tlb->addr = address + size - PMD_SIZE;
3067     +}
3068     #endif /* HAVE_GENERIC_MMU_GATHER */
3069    
3070     #ifdef CONFIG_HAVE_RCU_TABLE_FREE
3071     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
3072     index 6f3c529431865..7a2442623d6a6 100644
3073     --- a/net/ipv4/devinet.c
3074     +++ b/net/ipv4/devinet.c
3075     @@ -2271,7 +2271,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
3076     free:
3077     kfree(t);
3078     out:
3079     - return -ENOBUFS;
3080     + return -ENOMEM;
3081     }
3082    
3083     static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
3084     diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
3085     index 00397c6add202..d710c519a0357 100644
3086     --- a/net/ipv4/tcp_cubic.c
3087     +++ b/net/ipv4/tcp_cubic.c
3088     @@ -342,8 +342,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
3089     return;
3090    
3091     if (tcp_in_slow_start(tp)) {
3092     - if (hystart && after(ack, ca->end_seq))
3093     - bictcp_hystart_reset(sk);
3094     acked = tcp_slow_start(tp, acked);
3095     if (!acked)
3096     return;
3097     @@ -394,6 +392,9 @@ static void hystart_update(struct sock *sk, u32 delay)
3098     if (ca->found & hystart_detect)
3099     return;
3100    
3101     + if (after(tp->snd_una, ca->end_seq))
3102     + bictcp_hystart_reset(sk);
3103     +
3104     if (hystart_detect & HYSTART_ACK_TRAIN) {
3105     u32 now = bictcp_clock();
3106    
3107     diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
3108     index 19518a231e571..84eedbd5716d1 100644
3109     --- a/net/nfc/nci/core.c
3110     +++ b/net/nfc/nci/core.c
3111     @@ -486,6 +486,11 @@ static int nci_open_device(struct nci_dev *ndev)
3112    
3113     mutex_lock(&ndev->req_lock);
3114    
3115     + if (test_bit(NCI_UNREG, &ndev->flags)) {
3116     + rc = -ENODEV;
3117     + goto done;
3118     + }
3119     +
3120     if (test_bit(NCI_UP, &ndev->flags)) {
3121     rc = -EALREADY;
3122     goto done;
3123     @@ -549,6 +554,10 @@ done:
3124     static int nci_close_device(struct nci_dev *ndev)
3125     {
3126     nci_req_cancel(ndev, ENODEV);
3127     +
3128     + /* This mutex needs to be held as a barrier for
3129     + * caller nci_unregister_device
3130     + */
3131     mutex_lock(&ndev->req_lock);
3132    
3133     if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
3134     @@ -586,8 +595,8 @@ static int nci_close_device(struct nci_dev *ndev)
3135     /* Flush cmd wq */
3136     flush_workqueue(ndev->cmd_wq);
3137    
3138     - /* Clear flags */
3139     - ndev->flags = 0;
3140     + /* Clear flags except NCI_UNREG */
3141     + ndev->flags &= BIT(NCI_UNREG);
3142    
3143     mutex_unlock(&ndev->req_lock);
3144    
3145     @@ -1271,6 +1280,12 @@ void nci_unregister_device(struct nci_dev *ndev)
3146     {
3147     struct nci_conn_info *conn_info, *n;
3148    
3149     + /* This set_bit is not protected with specialized barrier,
3150     + * However, it is fine because the mutex_lock(&ndev->req_lock);
3151     + * in nci_close_device() will help to emit one.
3152     + */
3153     + set_bit(NCI_UNREG, &ndev->flags);
3154     +
3155     nci_close_device(ndev);
3156    
3157     destroy_workqueue(ndev->cmd_wq);
3158     diff --git a/net/rds/tcp.c b/net/rds/tcp.c
3159     index 2daba5316caa0..192f932bce0dd 100644
3160     --- a/net/rds/tcp.c
3161     +++ b/net/rds/tcp.c
3162     @@ -389,7 +389,7 @@ void rds_tcp_tune(struct socket *sock)
3163     sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
3164     }
3165     if (rtn->rcvbuf_size > 0) {
3166     - sk->sk_sndbuf = rtn->rcvbuf_size;
3167     + sk->sk_rcvbuf = rtn->rcvbuf_size;
3168     sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
3169     }
3170     release_sock(sk);
3171     diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
3172     index 5fcbb065d8702..d32685ce6c059 100644
3173     --- a/sound/pci/ctxfi/ctamixer.c
3174     +++ b/sound/pci/ctxfi/ctamixer.c
3175     @@ -27,16 +27,15 @@
3176    
3177     #define BLANK_SLOT 4094
3178    
3179     -static int amixer_master(struct rsc *rsc)
3180     +static void amixer_master(struct rsc *rsc)
3181     {
3182     rsc->conj = 0;
3183     - return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
3184     + rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
3185     }
3186    
3187     -static int amixer_next_conj(struct rsc *rsc)
3188     +static void amixer_next_conj(struct rsc *rsc)
3189     {
3190     rsc->conj++;
3191     - return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
3192     }
3193    
3194     static int amixer_index(const struct rsc *rsc)
3195     @@ -335,16 +334,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
3196    
3197     /* SUM resource management */
3198    
3199     -static int sum_master(struct rsc *rsc)
3200     +static void sum_master(struct rsc *rsc)
3201     {
3202     rsc->conj = 0;
3203     - return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
3204     + rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
3205     }
3206    
3207     -static int sum_next_conj(struct rsc *rsc)
3208     +static void sum_next_conj(struct rsc *rsc)
3209     {
3210     rsc->conj++;
3211     - return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
3212     }
3213    
3214     static int sum_index(const struct rsc *rsc)
3215     diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
3216     index 7f089cb433e17..df326b7663a2d 100644
3217     --- a/sound/pci/ctxfi/ctdaio.c
3218     +++ b/sound/pci/ctxfi/ctdaio.c
3219     @@ -55,12 +55,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
3220     [SPDIFIO] = {.left = 0x05, .right = 0x85},
3221     };
3222    
3223     -static int daio_master(struct rsc *rsc)
3224     +static void daio_master(struct rsc *rsc)
3225     {
3226     /* Actually, this is not the resource index of DAIO.
3227     * For DAO, it is the input mapper index. And, for DAI,
3228     * it is the output time-slot index. */
3229     - return rsc->conj = rsc->idx;
3230     + rsc->conj = rsc->idx;
3231     }
3232    
3233     static int daio_index(const struct rsc *rsc)
3234     @@ -68,19 +68,19 @@ static int daio_index(const struct rsc *rsc)
3235     return rsc->conj;
3236     }
3237    
3238     -static int daio_out_next_conj(struct rsc *rsc)
3239     +static void daio_out_next_conj(struct rsc *rsc)
3240     {
3241     - return rsc->conj += 2;
3242     + rsc->conj += 2;
3243     }
3244    
3245     -static int daio_in_next_conj_20k1(struct rsc *rsc)
3246     +static void daio_in_next_conj_20k1(struct rsc *rsc)
3247     {
3248     - return rsc->conj += 0x200;
3249     + rsc->conj += 0x200;
3250     }
3251    
3252     -static int daio_in_next_conj_20k2(struct rsc *rsc)
3253     +static void daio_in_next_conj_20k2(struct rsc *rsc)
3254     {
3255     - return rsc->conj += 0x100;
3256     + rsc->conj += 0x100;
3257     }
3258    
3259     static const struct rsc_ops daio_out_rsc_ops = {
3260     diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
3261     index c5124c3c0fd19..f610c32ae5ad2 100644
3262     --- a/sound/pci/ctxfi/ctresource.c
3263     +++ b/sound/pci/ctxfi/ctresource.c
3264     @@ -113,18 +113,17 @@ static int audio_ring_slot(const struct rsc *rsc)
3265     return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
3266     }
3267    
3268     -static int rsc_next_conj(struct rsc *rsc)
3269     +static void rsc_next_conj(struct rsc *rsc)
3270     {
3271     unsigned int i;
3272     for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
3273     i++;
3274     rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
3275     - return rsc->conj;
3276     }
3277    
3278     -static int rsc_master(struct rsc *rsc)
3279     +static void rsc_master(struct rsc *rsc)
3280     {
3281     - return rsc->conj = rsc->idx;
3282     + rsc->conj = rsc->idx;
3283     }
3284    
3285     static const struct rsc_ops rsc_generic_ops = {
3286     diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
3287     index 736d9f7e9e165..29b6fe6de659c 100644
3288     --- a/sound/pci/ctxfi/ctresource.h
3289     +++ b/sound/pci/ctxfi/ctresource.h
3290     @@ -43,8 +43,8 @@ struct rsc {
3291     };
3292    
3293     struct rsc_ops {
3294     - int (*master)(struct rsc *rsc); /* Move to master resource */
3295     - int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
3296     + void (*master)(struct rsc *rsc); /* Move to master resource */
3297     + void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
3298     int (*index)(const struct rsc *rsc); /* Return the index of resource */
3299     /* Return the output slot number */
3300     int (*output_slot)(const struct rsc *rsc);
3301     diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
3302     index a5a72df298013..234a7e96fd08a 100644
3303     --- a/sound/pci/ctxfi/ctsrc.c
3304     +++ b/sound/pci/ctxfi/ctsrc.c
3305     @@ -594,16 +594,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
3306    
3307     /* SRCIMP resource manager operations */
3308    
3309     -static int srcimp_master(struct rsc *rsc)
3310     +static void srcimp_master(struct rsc *rsc)
3311     {
3312     rsc->conj = 0;
3313     - return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
3314     + rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
3315     }
3316    
3317     -static int srcimp_next_conj(struct rsc *rsc)
3318     +static void srcimp_next_conj(struct rsc *rsc)
3319     {
3320     rsc->conj++;
3321     - return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
3322     }
3323    
3324     static int srcimp_index(const struct rsc *rsc)
3325     diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
3326     index e9c57bd3c02bf..6274a50026473 100644
3327     --- a/sound/soc/soc-topology.c
3328     +++ b/sound/soc/soc-topology.c
3329     @@ -2050,6 +2050,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
3330     /* remove dynamic controls from the component driver */
3331     int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
3332     {
3333     + struct snd_card *card = comp->card->snd_card;
3334     struct snd_soc_dobj *dobj, *next_dobj;
3335     int pass = SOC_TPLG_PASS_END;
3336    
3337     @@ -2057,6 +2058,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
3338     while (pass >= SOC_TPLG_PASS_START) {
3339    
3340     /* remove mixer controls */
3341     + down_write(&card->controls_rwsem);
3342     list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
3343     list) {
3344    
3345     @@ -2090,6 +2092,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
3346     break;
3347     }
3348     }
3349     + up_write(&card->controls_rwsem);
3350     pass--;
3351     }
3352