Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0318-4.9.219-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3593 - (hide annotations) (download)
Thu Aug 13 10:21:34 2020 UTC (3 years, 9 months ago) by niro
File size: 34130 byte(s)
linux-219
1 niro 3593 diff --git a/Makefile b/Makefile
2     index 1a491b3afc0c..26ad7b28a193 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 218
9     +SUBLEVEL = 219
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
14     index 3b10b9395960..aba534959377 100644
15     --- a/arch/arm64/kernel/head.S
16     +++ b/arch/arm64/kernel/head.S
17     @@ -650,7 +650,7 @@ ENTRY(__boot_cpu_mode)
18     * with MMU turned off.
19     */
20     ENTRY(__early_cpu_boot_status)
21     - .long 0
22     + .quad 0
23    
24     .popsection
25    
26     diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
27     index 4bc701b32ce2..89bb6250633d 100644
28     --- a/block/blk-mq-tag.c
29     +++ b/block/blk-mq-tag.c
30     @@ -336,6 +336,13 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
31     struct blk_mq_hw_ctx *hctx;
32     int i;
33    
34     + /*
35     + * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
36     + * queue_hw_ctx after freeze the queue, so we use q_usage_counter
37     + * to avoid race with it.
38     + */
39     + if (!percpu_ref_tryget(&q->q_usage_counter))
40     + return;
41    
42     queue_for_each_hw_ctx(q, hctx, i) {
43     struct blk_mq_tags *tags = hctx->tags;
44     @@ -351,7 +358,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
45     bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
46     bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
47     }
48     -
49     + blk_queue_exit(q);
50     }
51    
52     static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
53     diff --git a/block/blk-mq.c b/block/blk-mq.c
54     index 24fc09cf7f17..58be2eaa5aaa 100644
55     --- a/block/blk-mq.c
56     +++ b/block/blk-mq.c
57     @@ -2346,6 +2346,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
58    
59     list_for_each_entry(q, &set->tag_list, tag_set_list)
60     blk_mq_unfreeze_queue(q);
61     + /*
62     + * Sync with blk_mq_queue_tag_busy_iter.
63     + */
64     + synchronize_rcu();
65     }
66     EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
67    
68     diff --git a/drivers/char/random.c b/drivers/char/random.c
69     index 81b65d0e7563..4cbc73173701 100644
70     --- a/drivers/char/random.c
71     +++ b/drivers/char/random.c
72     @@ -2118,8 +2118,8 @@ struct batched_entropy {
73    
74     /*
75     * Get a random word for internal kernel use only. The quality of the random
76     - * number is either as good as RDRAND or as good as /dev/urandom, with the
77     - * goal of being quite fast and not depleting entropy.
78     + * number is good as /dev/urandom, but there is no backtrack protection, with
79     + * the goal of being quite fast and not depleting entropy.
80     */
81     static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
82     unsigned long get_random_long(void)
83     @@ -2127,9 +2127,6 @@ unsigned long get_random_long(void)
84     unsigned long ret;
85     struct batched_entropy *batch;
86    
87     - if (arch_get_random_long(&ret))
88     - return ret;
89     -
90     batch = &get_cpu_var(batched_entropy_long);
91     if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
92     extract_crng((u8 *)batch->entropy_long);
93     @@ -2153,9 +2150,6 @@ unsigned int get_random_int(void)
94     unsigned int ret;
95     struct batched_entropy *batch;
96    
97     - if (arch_get_random_int(&ret))
98     - return ret;
99     -
100     batch = &get_cpu_var(batched_entropy_int);
101     if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
102     extract_crng((u8 *)batch->entropy_int);
103     diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
104     index d8601b138dc1..29abb600d7e1 100644
105     --- a/drivers/clk/qcom/clk-rcg2.c
106     +++ b/drivers/clk/qcom/clk-rcg2.c
107     @@ -107,7 +107,7 @@ static int update_config(struct clk_rcg2 *rcg)
108     }
109    
110     WARN(1, "%s: rcg didn't update its configuration.", name);
111     - return 0;
112     + return -EBUSY;
113     }
114    
115     static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
116     diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
117     index a39b0343c197..401c218567af 100644
118     --- a/drivers/gpu/drm/bochs/bochs_hw.c
119     +++ b/drivers/gpu/drm/bochs/bochs_hw.c
120     @@ -97,10 +97,8 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
121     size = min(size, mem);
122     }
123    
124     - if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
125     - DRM_ERROR("Cannot request framebuffer\n");
126     - return -EBUSY;
127     - }
128     + if (pci_request_region(pdev, 0, "bochs-drm") != 0)
129     + DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
130    
131     bochs->fb_map = ioremap(addr, size);
132     if (bochs->fb_map == NULL) {
133     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
134     index e05dda92398c..592ebcd440b6 100644
135     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
136     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
137     @@ -431,6 +431,7 @@ static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
138     if (idx > raw->curlen)
139     goto fail_len;
140     repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
141     + idx++;
142     if (idx > raw->curlen)
143     goto fail_len;
144    
145     diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
146     index d9230132dfbc..d71fa2d9a196 100644
147     --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
148     +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
149     @@ -257,6 +257,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
150     unsigned int waitlink_offset = buffer->user_size - 16;
151     u32 return_target, return_dwords;
152     u32 link_target, link_dwords;
153     + unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
154     + bool need_flush = gpu->flush_seq != new_flush_seq;
155    
156     if (drm_debug & DRM_UT_DRIVER)
157     etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
158     @@ -269,14 +271,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
159     * need to append a mmu flush load state, followed by a new
160     * link to this buffer - a total of four additional words.
161     */
162     - if (gpu->mmu->need_flush || gpu->switch_context) {
163     + if (need_flush || gpu->switch_context) {
164     u32 target, extra_dwords;
165    
166     /* link command */
167     extra_dwords = 1;
168    
169     /* flush command */
170     - if (gpu->mmu->need_flush) {
171     + if (need_flush) {
172     if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
173     extra_dwords += 1;
174     else
175     @@ -289,7 +291,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
176    
177     target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
178    
179     - if (gpu->mmu->need_flush) {
180     + if (need_flush) {
181     /* Add the MMU flush */
182     if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
183     CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
184     @@ -309,7 +311,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
185     SYNC_RECIPIENT_PE);
186     }
187    
188     - gpu->mmu->need_flush = false;
189     + gpu->flush_seq = new_flush_seq;
190     }
191    
192     if (gpu->switch_context) {
193     diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
194     index a336754698f8..dba0d769d17a 100644
195     --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
196     +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
197     @@ -1313,7 +1313,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
198     gpu->active_fence = submit->fence;
199    
200     if (gpu->lastctx != cmdbuf->ctx) {
201     - gpu->mmu->need_flush = true;
202     + gpu->mmu->flush_seq++;
203     gpu->switch_context = true;
204     gpu->lastctx = cmdbuf->ctx;
205     }
206     diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
207     index 73c278dc3706..416940b254a6 100644
208     --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
209     +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
210     @@ -135,6 +135,7 @@ struct etnaviv_gpu {
211     int irq;
212    
213     struct etnaviv_iommu *mmu;
214     + unsigned int flush_seq;
215    
216     /* Power Control: */
217     struct clk *clk_bus;
218     diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
219     index fe0e85b41310..ef9df6158dc1 100644
220     --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
221     +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
222     @@ -134,7 +134,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
223     */
224     if (mmu->last_iova) {
225     mmu->last_iova = 0;
226     - mmu->need_flush = true;
227     + mmu->flush_seq++;
228     continue;
229     }
230    
231     @@ -197,7 +197,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
232     * associated commit requesting this mapping, and retry the
233     * allocation one more time.
234     */
235     - mmu->need_flush = true;
236     + mmu->flush_seq++;
237     }
238    
239     return ret;
240     @@ -354,7 +354,7 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
241     * that the FE MMU prefetch won't load invalid entries.
242     */
243     mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
244     - gpu->mmu->need_flush = true;
245     + mmu->flush_seq++;
246     mutex_unlock(&mmu->lock);
247    
248     return (u32)buf->vram_node.start;
249     diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
250     index e787e49c9693..5bdc5f5601b1 100644
251     --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
252     +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
253     @@ -44,7 +44,7 @@ struct etnaviv_iommu {
254     struct list_head mappings;
255     struct drm_mm mm;
256     u32 last_iova;
257     - bool need_flush;
258     + unsigned int flush_seq;
259     };
260    
261     struct etnaviv_gem_object;
262     diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
263     index 795660e29b2c..569e8c45a59a 100644
264     --- a/drivers/gpu/drm/msm/msm_gem.c
265     +++ b/drivers/gpu/drm/msm/msm_gem.c
266     @@ -40,6 +40,46 @@ static bool use_pages(struct drm_gem_object *obj)
267     return !msm_obj->vram_node;
268     }
269    
270     +/*
271     + * Cache sync.. this is a bit over-complicated, to fit dma-mapping
272     + * API. Really GPU cache is out of scope here (handled on cmdstream)
273     + * and all we need to do is invalidate newly allocated pages before
274     + * mapping to CPU as uncached/writecombine.
275     + *
276     + * On top of this, we have the added headache, that depending on
277     + * display generation, the display's iommu may be wired up to either
278     + * the toplevel drm device (mdss), or to the mdp sub-node, meaning
279     + * that here we either have dma-direct or iommu ops.
280     + *
281     + * Let this be a cautionary tail of abstraction gone wrong.
282     + */
283     +
284     +static void sync_for_device(struct msm_gem_object *msm_obj)
285     +{
286     + struct device *dev = msm_obj->base.dev->dev;
287     +
288     + if (get_dma_ops(dev)) {
289     + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
290     + msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
291     + } else {
292     + dma_map_sg(dev, msm_obj->sgt->sgl,
293     + msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
294     + }
295     +}
296     +
297     +static void sync_for_cpu(struct msm_gem_object *msm_obj)
298     +{
299     + struct device *dev = msm_obj->base.dev->dev;
300     +
301     + if (get_dma_ops(dev)) {
302     + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
303     + msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
304     + } else {
305     + dma_unmap_sg(dev, msm_obj->sgt->sgl,
306     + msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
307     + }
308     +}
309     +
310     /* allocate pages from VRAM carveout, used when no IOMMU: */
311     static struct page **get_pages_vram(struct drm_gem_object *obj,
312     int npages)
313     @@ -106,8 +146,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
314     * because display controller, GPU, etc. are not coherent:
315     */
316     if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
317     - dma_map_sg(dev->dev, msm_obj->sgt->sgl,
318     - msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
319     + sync_for_device(msm_obj);
320     }
321    
322     return msm_obj->pages;
323     @@ -124,9 +163,7 @@ static void put_pages(struct drm_gem_object *obj)
324     * GPU, etc. are not coherent:
325     */
326     if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
327     - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
328     - msm_obj->sgt->nents,
329     - DMA_BIDIRECTIONAL);
330     + sync_for_cpu(msm_obj);
331    
332     sg_free_table(msm_obj->sgt);
333     kfree(msm_obj->sgt);
334     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
335     index 27653aad8f21..0a6cc78ebcf7 100644
336     --- a/drivers/infiniband/core/cma.c
337     +++ b/drivers/infiniband/core/cma.c
338     @@ -2568,6 +2568,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
339     err2:
340     kfree(route->path_rec);
341     route->path_rec = NULL;
342     + route->num_paths = 0;
343     err1:
344     kfree(work);
345     return ret;
346     diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
347     index 621b60ab74ee..5df1e368096c 100644
348     --- a/drivers/infiniband/hw/hfi1/sysfs.c
349     +++ b/drivers/infiniband/hw/hfi1/sysfs.c
350     @@ -670,7 +670,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
351     dd_dev_err(dd,
352     "Skipping sc2vl sysfs info, (err %d) port %u\n",
353     ret, port_num);
354     - goto bail;
355     + /*
356     + * Based on the documentation for kobject_init_and_add(), the
357     + * caller should call kobject_put even if this call fails.
358     + */
359     + goto bail_sc2vl;
360     }
361     kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
362    
363     @@ -680,7 +684,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
364     dd_dev_err(dd,
365     "Skipping sl2sc sysfs info, (err %d) port %u\n",
366     ret, port_num);
367     - goto bail_sc2vl;
368     + goto bail_sl2sc;
369     }
370     kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
371    
372     @@ -690,7 +694,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
373     dd_dev_err(dd,
374     "Skipping vl2mtu sysfs info, (err %d) port %u\n",
375     ret, port_num);
376     - goto bail_sl2sc;
377     + goto bail_vl2mtu;
378     }
379     kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
380    
381     @@ -700,7 +704,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
382     dd_dev_err(dd,
383     "Skipping Congestion Control sysfs info, (err %d) port %u\n",
384     ret, port_num);
385     - goto bail_vl2mtu;
386     + goto bail_cc;
387     }
388    
389     kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
390     @@ -738,7 +742,6 @@ bail_sl2sc:
391     kobject_put(&ppd->sl2sc_kobj);
392     bail_sc2vl:
393     kobject_put(&ppd->sc2vl_kobj);
394     -bail:
395     return ret;
396     }
397    
398     @@ -858,8 +861,13 @@ bail:
399     for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
400     device_remove_file(&dev->dev, hfi1_attributes[i]);
401    
402     - for (i = 0; i < dd->num_sdma; i++)
403     - kobject_del(&dd->per_sdma[i].kobj);
404     + /*
405     + * The function kobject_put() will call kobject_del() if the kobject
406     + * has been added successfully. The sysfs files created under the
407     + * kobject directory will also be removed during the process.
408     + */
409     + for (; i >= 0; i--)
410     + kobject_put(&dd->per_sdma[i].kobj);
411    
412     return ret;
413     }
414     @@ -872,6 +880,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
415     struct hfi1_pportdata *ppd;
416     int i;
417    
418     + /* Unwind operations in hfi1_verbs_register_sysfs() */
419     + for (i = 0; i < dd->num_sdma; i++)
420     + kobject_put(&dd->per_sdma[i].kobj);
421     +
422     for (i = 0; i < dd->num_pports; i++) {
423     ppd = &dd->pport[i];
424    
425     diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
426     index 94b37c60fdd0..d0435c7631ff 100644
427     --- a/drivers/net/can/slcan.c
428     +++ b/drivers/net/can/slcan.c
429     @@ -147,7 +147,7 @@ static void slc_bump(struct slcan *sl)
430     u32 tmpid;
431     char *cmd = sl->rbuff;
432    
433     - cf.can_id = 0;
434     + memset(&cf, 0, sizeof(cf));
435    
436     switch (*cmd) {
437     case 'r':
438     @@ -186,8 +186,6 @@ static void slc_bump(struct slcan *sl)
439     else
440     return;
441    
442     - *(u64 *) (&cf.data) = 0; /* clear payload */
443     -
444     /* RTR frames may have a dlc > 0 but they never have any data bytes */
445     if (!(cf.can_id & CAN_RTR_FLAG)) {
446     for (i = 0; i < cf.can_dlc; i++) {
447     diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
448     index a3a8d7b62f3f..796571fccba7 100644
449     --- a/drivers/net/dsa/bcm_sf2.c
450     +++ b/drivers/net/dsa/bcm_sf2.c
451     @@ -976,6 +976,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
452     struct device_node *dn = pdev->dev.of_node;
453     struct b53_platform_data *pdata;
454     struct dsa_switch_ops *ops;
455     + struct device_node *ports;
456     struct bcm_sf2_priv *priv;
457     struct b53_device *dev;
458     struct dsa_switch *ds;
459     @@ -1038,7 +1039,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
460     spin_lock_init(&priv->indir_lock);
461     mutex_init(&priv->stats_mutex);
462    
463     - bcm_sf2_identify_ports(priv, dn->child);
464     + ports = of_find_node_by_name(dn, "ports");
465     + if (ports) {
466     + bcm_sf2_identify_ports(priv, ports);
467     + of_node_put(ports);
468     + }
469    
470     priv->irq0 = irq_of_parse_and_map(dn, 0);
471     priv->irq1 = irq_of_parse_and_map(dn, 1);
472     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
473     index 093e58e94075..3a2edf9f51e2 100644
474     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
475     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
476     @@ -214,7 +214,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
477     reg++;
478     }
479    
480     - while (reg <= perfect_addr_number) {
481     + while (reg < perfect_addr_number) {
482     writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
483     writel(0, ioaddr + GMAC_ADDR_LOW(reg));
484     reg++;
485     diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
486     index 16f074408813..adb38a4ec9ac 100644
487     --- a/drivers/net/phy/micrel.c
488     +++ b/drivers/net/phy/micrel.c
489     @@ -28,6 +28,7 @@
490     #include <linux/micrel_phy.h>
491     #include <linux/of.h>
492     #include <linux/clk.h>
493     +#include <linux/delay.h>
494    
495     /* Operation Mode Strap Override */
496     #define MII_KSZPHY_OMSO 0x16
497     @@ -728,6 +729,12 @@ static int kszphy_resume(struct phy_device *phydev)
498     {
499     genphy_resume(phydev);
500    
501     + /* After switching from power-down to normal mode, an internal global
502     + * reset is automatically generated. Wait a minimum of 1 ms before
503     + * read/write access to the PHY registers.
504     + */
505     + usleep_range(1000, 2000);
506     +
507     /* Enable PHY Interrupts */
508     if (phy_interrupt_is_valid(phydev)) {
509     phydev->interrupts = PHY_INTERRUPT_ENABLED;
510     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
511     index 712bd450f857..bf36eda082d6 100644
512     --- a/drivers/usb/dwc3/gadget.c
513     +++ b/drivers/usb/dwc3/gadget.c
514     @@ -2996,7 +2996,6 @@ int dwc3_gadget_init(struct dwc3 *dwc)
515     dwc->gadget.speed = USB_SPEED_UNKNOWN;
516     dwc->gadget.sg_supported = true;
517     dwc->gadget.name = "dwc3-gadget";
518     - dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
519    
520     /*
521     * FIXME We might be setting max_speed to <SUPER, however versions
522     diff --git a/fs/ceph/super.c b/fs/ceph/super.c
523     index c42cbd19ff05..ec1640f3167b 100644
524     --- a/fs/ceph/super.c
525     +++ b/fs/ceph/super.c
526     @@ -85,7 +85,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
527     return 0;
528     }
529    
530     -
531     static int ceph_sync_fs(struct super_block *sb, int wait)
532     {
533     struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
534     @@ -178,6 +177,26 @@ static match_table_t fsopt_tokens = {
535     {-1, NULL}
536     };
537    
538     +/*
539     + * Remove adjacent slashes and then the trailing slash, unless it is
540     + * the only remaining character.
541     + *
542     + * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
543     + */
544     +static void canonicalize_path(char *path)
545     +{
546     + int i, j = 0;
547     +
548     + for (i = 0; path[i] != '\0'; i++) {
549     + if (path[i] != '/' || j < 1 || path[j - 1] != '/')
550     + path[j++] = path[i];
551     + }
552     +
553     + if (j > 1 && path[j - 1] == '/')
554     + j--;
555     + path[j] = '\0';
556     +}
557     +
558     static int parse_fsopt_token(char *c, void *private)
559     {
560     struct ceph_mount_options *fsopt = private;
561     @@ -337,6 +356,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
562     ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
563     if (ret)
564     return ret;
565     +
566     ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
567     if (ret)
568     return ret;
569     @@ -396,13 +416,17 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
570     */
571     dev_name_end = strchr(dev_name, '/');
572     if (dev_name_end) {
573     - if (strlen(dev_name_end) > 1) {
574     - fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
575     - if (!fsopt->server_path) {
576     - err = -ENOMEM;
577     - goto out;
578     - }
579     + /*
580     + * The server_path will include the whole chars from userland
581     + * including the leading '/'.
582     + */
583     + fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
584     + if (!fsopt->server_path) {
585     + err = -ENOMEM;
586     + goto out;
587     }
588     +
589     + canonicalize_path(fsopt->server_path);
590     } else {
591     dev_name_end = dev_name + strlen(dev_name);
592     }
593     @@ -725,7 +749,6 @@ static void destroy_caches(void)
594     ceph_fscache_unregister();
595     }
596    
597     -
598     /*
599     * ceph_umount_begin - initiate forced umount. Tear down down the
600     * mount, skipping steps that may hang while waiting for server(s).
601     @@ -812,9 +835,6 @@ out:
602     return root;
603     }
604    
605     -
606     -
607     -
608     /*
609     * mount: join the ceph cluster, and open root directory.
610     */
611     @@ -828,18 +848,14 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
612     mutex_lock(&fsc->client->mount_mutex);
613    
614     if (!fsc->sb->s_root) {
615     - const char *path;
616     + const char *path = fsc->mount_options->server_path ?
617     + fsc->mount_options->server_path + 1 : "";
618     +
619     err = __ceph_open_session(fsc->client, started);
620     if (err < 0)
621     goto out;
622    
623     - if (!fsc->mount_options->server_path) {
624     - path = "";
625     - dout("mount opening path \\t\n");
626     - } else {
627     - path = fsc->mount_options->server_path + 1;
628     - dout("mount opening path %s\n", path);
629     - }
630     + dout("mount opening path '%s'\n", path);
631    
632     err = ceph_fs_debugfs_init(fsc);
633     if (err < 0)
634     diff --git a/fs/ceph/super.h b/fs/ceph/super.h
635     index 9bd0d928057b..9f18635f78c7 100644
636     --- a/fs/ceph/super.h
637     +++ b/fs/ceph/super.h
638     @@ -70,7 +70,7 @@ struct ceph_mount_options {
639    
640     char *snapdir_name; /* default ".snap" */
641     char *mds_namespace; /* default NULL */
642     - char *server_path; /* default "/" */
643     + char *server_path; /* default NULL (means "/") */
644     };
645    
646     struct ceph_fs_client {
647     diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h
648     index 7e4272cf1fb2..741309cedd2c 100644
649     --- a/include/uapi/linux/coresight-stm.h
650     +++ b/include/uapi/linux/coresight-stm.h
651     @@ -1,8 +1,10 @@
652     #ifndef __UAPI_CORESIGHT_STM_H_
653     #define __UAPI_CORESIGHT_STM_H_
654    
655     -#define STM_FLAG_TIMESTAMPED BIT(3)
656     -#define STM_FLAG_GUARANTEED BIT(7)
657     +#include <linux/const.h>
658     +
659     +#define STM_FLAG_TIMESTAMPED _BITUL(3)
660     +#define STM_FLAG_GUARANTEED _BITUL(7)
661    
662     /*
663     * The CoreSight STM supports guaranteed and invariant timing
664     diff --git a/kernel/padata.c b/kernel/padata.c
665     index 286c5142a0f7..6939111b3cbe 100644
666     --- a/kernel/padata.c
667     +++ b/kernel/padata.c
668     @@ -614,8 +614,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
669     struct cpumask *serial_mask, *parallel_mask;
670     int err = -EINVAL;
671    
672     - mutex_lock(&pinst->lock);
673     get_online_cpus();
674     + mutex_lock(&pinst->lock);
675    
676     switch (cpumask_type) {
677     case PADATA_CPU_PARALLEL:
678     @@ -633,8 +633,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
679     err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
680    
681     out:
682     - put_online_cpus();
683     mutex_unlock(&pinst->lock);
684     + put_online_cpus();
685    
686     return err;
687     }
688     diff --git a/mm/mempolicy.c b/mm/mempolicy.c
689     index da7a932922cb..a2be65bf5d8c 100644
690     --- a/mm/mempolicy.c
691     +++ b/mm/mempolicy.c
692     @@ -2768,7 +2768,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
693     switch (mode) {
694     case MPOL_PREFERRED:
695     /*
696     - * Insist on a nodelist of one node only
697     + * Insist on a nodelist of one node only, although later
698     + * we use first_node(nodes) to grab a single node, so here
699     + * nodelist (or nodes) cannot be empty.
700     */
701     if (nodelist) {
702     char *rest = nodelist;
703     @@ -2776,6 +2778,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
704     rest++;
705     if (*rest)
706     goto out;
707     + if (nodes_empty(nodes))
708     + goto out;
709     }
710     break;
711     case MPOL_INTERLEAVE:
712     diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
713     index 2f2cb5e27cdd..a8c63ef75f73 100644
714     --- a/net/bluetooth/rfcomm/tty.c
715     +++ b/net/bluetooth/rfcomm/tty.c
716     @@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
717     dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
718     if (IS_ERR(dlc))
719     return PTR_ERR(dlc);
720     - else if (dlc) {
721     - rfcomm_dlc_put(dlc);
722     + if (dlc)
723     return -EBUSY;
724     - }
725     dlc = rfcomm_dlc_alloc(GFP_KERNEL);
726     if (!dlc)
727     return -ENOMEM;
728     diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
729     index 76d55a80f3b9..98074338cd83 100644
730     --- a/net/dsa/tag_brcm.c
731     +++ b/net/dsa/tag_brcm.c
732     @@ -84,8 +84,6 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev
733     brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
734     brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
735    
736     - skb->offload_fwd_mark = 1;
737     -
738     return skb;
739    
740     out_free:
741     @@ -148,6 +146,8 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
742     skb->dev->stats.rx_packets++;
743     skb->dev->stats.rx_bytes += skb->len;
744    
745     + skb->offload_fwd_mark = 1;
746     +
747     netif_receive_skb(skb);
748    
749     return 0;
750     diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
751     index 36f0a8c581d0..a1a7ed6fc8dd 100644
752     --- a/net/ipv4/fib_trie.c
753     +++ b/net/ipv4/fib_trie.c
754     @@ -2256,6 +2256,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
755     " %Zd bytes, size of tnode: %Zd bytes.\n",
756     LEAF_SIZE, TNODE_SIZE(0));
757    
758     + rcu_read_lock();
759     for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
760     struct hlist_head *head = &net->ipv4.fib_table_hash[h];
761     struct fib_table *tb;
762     @@ -2275,7 +2276,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
763     trie_show_usage(seq, t->stats);
764     #endif
765     }
766     + cond_resched_rcu();
767     }
768     + rcu_read_unlock();
769    
770     return 0;
771     }
772     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
773     index 65c47b1f0da4..dd5db4cc7d06 100644
774     --- a/net/ipv4/ip_tunnel.c
775     +++ b/net/ipv4/ip_tunnel.c
776     @@ -155,11 +155,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
777     cand = t;
778     }
779    
780     - if (flags & TUNNEL_NO_KEY)
781     - goto skip_key_lookup;
782     -
783     hlist_for_each_entry_rcu(t, head, hash_node) {
784     - if (t->parms.i_key != key ||
785     + if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
786     t->parms.iph.saddr != 0 ||
787     t->parms.iph.daddr != 0 ||
788     !(t->dev->flags & IFF_UP))
789     @@ -171,7 +168,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
790     cand = t;
791     }
792    
793     -skip_key_lookup:
794     if (cand)
795     return cand;
796    
797     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
798     index 394a1ddb0782..7c3da29fad8e 100644
799     --- a/net/l2tp/l2tp_core.c
800     +++ b/net/l2tp/l2tp_core.c
801     @@ -1351,6 +1351,9 @@ again:
802    
803     hlist_del_init(&session->hlist);
804    
805     + if (test_and_set_bit(0, &session->dead))
806     + goto again;
807     +
808     if (session->ref != NULL)
809     (*session->ref)(session);
810    
811     @@ -1799,6 +1802,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
812     */
813     int l2tp_session_delete(struct l2tp_session *session)
814     {
815     + if (test_and_set_bit(0, &session->dead))
816     + return 0;
817     +
818     if (session->ref)
819     (*session->ref)(session);
820     __l2tp_session_unhash(session);
821     diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
822     index 7cc49715606e..7c2037184b6c 100644
823     --- a/net/l2tp/l2tp_core.h
824     +++ b/net/l2tp/l2tp_core.h
825     @@ -84,6 +84,7 @@ struct l2tp_session_cfg {
826     struct l2tp_session {
827     int magic; /* should be
828     * L2TP_SESSION_MAGIC */
829     + long dead;
830    
831     struct l2tp_tunnel *tunnel; /* back pointer to tunnel
832     * context */
833     diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
834     index 16b63e60396f..d919b3e6b548 100644
835     --- a/net/l2tp/l2tp_ppp.c
836     +++ b/net/l2tp/l2tp_ppp.c
837     @@ -437,11 +437,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
838    
839     BUG_ON(session->magic != L2TP_SESSION_MAGIC);
840    
841     - if (sock) {
842     + if (sock)
843     inet_shutdown(sock, SEND_SHUTDOWN);
844     - /* Don't let the session go away before our socket does */
845     - l2tp_session_inc_refcount(session);
846     - }
847     +
848     + /* Don't let the session go away before our socket does */
849     + l2tp_session_inc_refcount(session);
850     }
851    
852     /* Really kill the session socket. (Called from sock_put() if
853     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
854     index 824ebbffea33..34ab7f92f064 100644
855     --- a/net/sctp/ipv6.c
856     +++ b/net/sctp/ipv6.c
857     @@ -235,7 +235,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
858     {
859     struct sctp_association *asoc = t->asoc;
860     struct dst_entry *dst = NULL;
861     - struct flowi6 *fl6 = &fl->u.ip6;
862     + struct flowi _fl;
863     + struct flowi6 *fl6 = &_fl.u.ip6;
864     struct sctp_bind_addr *bp;
865     struct ipv6_pinfo *np = inet6_sk(sk);
866     struct sctp_sockaddr_entry *laddr;
867     @@ -245,7 +246,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
868     __u8 matchlen = 0;
869     sctp_scope_t scope;
870    
871     - memset(fl6, 0, sizeof(struct flowi6));
872     + memset(&_fl, 0, sizeof(_fl));
873     fl6->daddr = daddr->v6.sin6_addr;
874     fl6->fl6_dport = daddr->v6.sin6_port;
875     fl6->flowi6_proto = IPPROTO_SCTP;
876     @@ -269,8 +270,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
877     rcu_read_unlock();
878    
879     dst = ip6_dst_lookup_flow(sk, fl6, final_p);
880     - if (!asoc || saddr)
881     + if (!asoc || saddr) {
882     + t->dst = dst;
883     + memcpy(fl, &_fl, sizeof(_fl));
884     goto out;
885     + }
886    
887     bp = &asoc->base.bind_addr;
888     scope = sctp_scope(daddr);
889     @@ -293,6 +297,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
890     if ((laddr->a.sa.sa_family == AF_INET6) &&
891     (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
892     rcu_read_unlock();
893     + t->dst = dst;
894     + memcpy(fl, &_fl, sizeof(_fl));
895     goto out;
896     }
897     }
898     @@ -331,6 +337,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
899     if (!IS_ERR_OR_NULL(dst))
900     dst_release(dst);
901     dst = bdst;
902     + t->dst = dst;
903     + memcpy(fl, &_fl, sizeof(_fl));
904     break;
905     }
906    
907     @@ -344,6 +352,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
908     dst_release(dst);
909     dst = bdst;
910     matchlen = bmatchlen;
911     + t->dst = dst;
912     + memcpy(fl, &_fl, sizeof(_fl));
913     }
914     rcu_read_unlock();
915    
916     @@ -352,14 +362,12 @@ out:
917     struct rt6_info *rt;
918    
919     rt = (struct rt6_info *)dst;
920     - t->dst = dst;
921     t->dst_cookie = rt6_get_cookie(rt);
922     pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
923     &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
924     - &fl6->saddr);
925     + &fl->u.ip6.saddr);
926     } else {
927     t->dst = NULL;
928     -
929     pr_debug("no route\n");
930     }
931     }
932     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
933     index 446503d3b80c..c5a2a538279b 100644
934     --- a/net/sctp/protocol.c
935     +++ b/net/sctp/protocol.c
936     @@ -430,14 +430,15 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
937     {
938     struct sctp_association *asoc = t->asoc;
939     struct rtable *rt;
940     - struct flowi4 *fl4 = &fl->u.ip4;
941     + struct flowi _fl;
942     + struct flowi4 *fl4 = &_fl.u.ip4;
943     struct sctp_bind_addr *bp;
944     struct sctp_sockaddr_entry *laddr;
945     struct dst_entry *dst = NULL;
946     union sctp_addr *daddr = &t->ipaddr;
947     union sctp_addr dst_saddr;
948    
949     - memset(fl4, 0x0, sizeof(struct flowi4));
950     + memset(&_fl, 0x0, sizeof(_fl));
951     fl4->daddr = daddr->v4.sin_addr.s_addr;
952     fl4->fl4_dport = daddr->v4.sin_port;
953     fl4->flowi4_proto = IPPROTO_SCTP;
954     @@ -455,8 +456,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
955     &fl4->saddr);
956    
957     rt = ip_route_output_key(sock_net(sk), fl4);
958     - if (!IS_ERR(rt))
959     + if (!IS_ERR(rt)) {
960     dst = &rt->dst;
961     + t->dst = dst;
962     + memcpy(fl, &_fl, sizeof(_fl));
963     + }
964    
965     /* If there is no association or if a source address is passed, no
966     * more validation is required.
967     @@ -519,27 +523,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
968     odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
969     false);
970     if (!odev || odev->ifindex != fl4->flowi4_oif) {
971     - if (!dst)
972     + if (!dst) {
973     dst = &rt->dst;
974     - else
975     + t->dst = dst;
976     + memcpy(fl, &_fl, sizeof(_fl));
977     + } else {
978     dst_release(&rt->dst);
979     + }
980     continue;
981     }
982    
983     dst_release(dst);
984     dst = &rt->dst;
985     + t->dst = dst;
986     + memcpy(fl, &_fl, sizeof(_fl));
987     break;
988     }
989    
990     out_unlock:
991     rcu_read_unlock();
992     out:
993     - t->dst = dst;
994     - if (dst)
995     + if (dst) {
996     pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
997     - &fl4->daddr, &fl4->saddr);
998     - else
999     + &fl->u.ip4.daddr, &fl->u.ip4.saddr);
1000     + } else {
1001     + t->dst = NULL;
1002     pr_debug("no route\n");
1003     + }
1004     }
1005    
1006     /* For v4, the source address is cached in the route entry(dst). So no need
1007     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1008     index 21ec92011585..95f39dde1e08 100644
1009     --- a/net/sctp/socket.c
1010     +++ b/net/sctp/socket.c
1011     @@ -173,29 +173,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk)
1012     skb_orphan(chunk->skb);
1013     }
1014    
1015     +#define traverse_and_process() \
1016     +do { \
1017     + msg = chunk->msg; \
1018     + if (msg == prev_msg) \
1019     + continue; \
1020     + list_for_each_entry(c, &msg->chunks, frag_list) { \
1021     + if ((clear && asoc->base.sk == c->skb->sk) || \
1022     + (!clear && asoc->base.sk != c->skb->sk)) \
1023     + cb(c); \
1024     + } \
1025     + prev_msg = msg; \
1026     +} while (0)
1027     +
1028     static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
1029     + bool clear,
1030     void (*cb)(struct sctp_chunk *))
1031    
1032     {
1033     + struct sctp_datamsg *msg, *prev_msg = NULL;
1034     struct sctp_outq *q = &asoc->outqueue;
1035     + struct sctp_chunk *chunk, *c;
1036     struct sctp_transport *t;
1037     - struct sctp_chunk *chunk;
1038    
1039     list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
1040     list_for_each_entry(chunk, &t->transmitted, transmitted_list)
1041     - cb(chunk);
1042     + traverse_and_process();
1043    
1044     list_for_each_entry(chunk, &q->retransmit, transmitted_list)
1045     - cb(chunk);
1046     + traverse_and_process();
1047    
1048     list_for_each_entry(chunk, &q->sacked, transmitted_list)
1049     - cb(chunk);
1050     + traverse_and_process();
1051    
1052     list_for_each_entry(chunk, &q->abandoned, transmitted_list)
1053     - cb(chunk);
1054     + traverse_and_process();
1055    
1056     list_for_each_entry(chunk, &q->out_chunk_list, list)
1057     - cb(chunk);
1058     + traverse_and_process();
1059     }
1060    
1061     /* Verify that this is a valid address. */
1062     @@ -7878,9 +7893,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
1063     * paths won't try to lock it and then oldsk.
1064     */
1065     lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
1066     - sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
1067     + sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
1068     sctp_assoc_migrate(assoc, newsk);
1069     - sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
1070     + sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);
1071    
1072     /* If the association on the newsk is already closed before accept()
1073     * is called, set RCV_SHUTDOWN flag.
1074     diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
1075     index 794a3499e567..0dc1ab48fceb 100644
1076     --- a/sound/soc/jz4740/jz4740-i2s.c
1077     +++ b/sound/soc/jz4740/jz4740-i2s.c
1078     @@ -92,7 +92,7 @@
1079     #define JZ_AIC_I2S_STATUS_BUSY BIT(2)
1080    
1081     #define JZ_AIC_CLK_DIV_MASK 0xf
1082     -#define I2SDIV_DV_SHIFT 8
1083     +#define I2SDIV_DV_SHIFT 0
1084     #define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
1085     #define I2SDIV_IDV_SHIFT 8
1086     #define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
1087     diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
1088     index b5ca536e56a8..34df10a43ef0 100644
1089     --- a/tools/accounting/getdelays.c
1090     +++ b/tools/accounting/getdelays.c
1091     @@ -135,7 +135,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
1092     msg.g.version = 0x1;
1093     na = (struct nlattr *) GENLMSG_DATA(&msg);
1094     na->nla_type = nla_type;
1095     - na->nla_len = nla_len + 1 + NLA_HDRLEN;
1096     + na->nla_len = nla_len + NLA_HDRLEN;
1097     memcpy(NLA_DATA(na), nla_data, nla_len);
1098     msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
1099