Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0191-4.9.92-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3168 - (hide annotations) (download)
Wed Aug 8 14:17:24 2018 UTC (5 years, 9 months ago) by niro
File size: 29379 byte(s)
-linux-4.9.92
1 niro 3168 diff --git a/Makefile b/Makefile
2     index db3d37e18723..3ab3b8203bf6 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 91
9     +SUBLEVEL = 92
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
14     index c6163874e4e7..c770ca37c9b2 100644
15     --- a/drivers/net/ethernet/arc/emac_rockchip.c
16     +++ b/drivers/net/ethernet/arc/emac_rockchip.c
17     @@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
18     /* Optional regulator for PHY */
19     priv->regulator = devm_regulator_get_optional(dev, "phy");
20     if (IS_ERR(priv->regulator)) {
21     - if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
22     - return -EPROBE_DEFER;
23     + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
24     + err = -EPROBE_DEFER;
25     + goto out_clk_disable;
26     + }
27     dev_err(dev, "no regulator found\n");
28     priv->regulator = NULL;
29     }
30     diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
31     index 744ed6ddaf37..91fbba58d033 100644
32     --- a/drivers/net/ethernet/broadcom/bcmsysport.c
33     +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
34     @@ -707,37 +707,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
35     struct bcm_sysport_tx_ring *ring)
36     {
37     struct net_device *ndev = priv->netdev;
38     - unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
39     unsigned int pkts_compl = 0, bytes_compl = 0;
40     + unsigned int txbds_processed = 0;
41     struct bcm_sysport_cb *cb;
42     + unsigned int txbds_ready;
43     + unsigned int c_index;
44     u32 hw_ind;
45    
46     /* Compute how many descriptors have been processed since last call */
47     hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
48     c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
49     - ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
50     -
51     - last_c_index = ring->c_index;
52     - num_tx_cbs = ring->size;
53     -
54     - c_index &= (num_tx_cbs - 1);
55     -
56     - if (c_index >= last_c_index)
57     - last_tx_cn = c_index - last_c_index;
58     - else
59     - last_tx_cn = num_tx_cbs - last_c_index + c_index;
60     + txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
61    
62     netif_dbg(priv, tx_done, ndev,
63     - "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
64     - ring->index, c_index, last_tx_cn, last_c_index);
65     + "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
66     + ring->index, ring->c_index, c_index, txbds_ready);
67    
68     - while (last_tx_cn-- > 0) {
69     - cb = ring->cbs + last_c_index;
70     + while (txbds_processed < txbds_ready) {
71     + cb = &ring->cbs[ring->clean_index];
72     bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
73    
74     ring->desc_count++;
75     - last_c_index++;
76     - last_c_index &= (num_tx_cbs - 1);
77     + txbds_processed++;
78     +
79     + if (likely(ring->clean_index < ring->size - 1))
80     + ring->clean_index++;
81     + else
82     + ring->clean_index = 0;
83     }
84    
85     ring->c_index = c_index;
86     @@ -1207,6 +1203,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
87     netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
88     ring->index = index;
89     ring->size = size;
90     + ring->clean_index = 0;
91     ring->alloc_size = ring->size;
92     ring->desc_cpu = p;
93     ring->desc_count = ring->size;
94     diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
95     index 1c82e3da69a7..07b0aaa98de0 100644
96     --- a/drivers/net/ethernet/broadcom/bcmsysport.h
97     +++ b/drivers/net/ethernet/broadcom/bcmsysport.h
98     @@ -638,7 +638,7 @@ struct bcm_sysport_tx_ring {
99     unsigned int desc_count; /* Number of descriptors */
100     unsigned int curr_desc; /* Current descriptor */
101     unsigned int c_index; /* Last consumer index */
102     - unsigned int p_index; /* Current producer index */
103     + unsigned int clean_index; /* Current clean index */
104     struct bcm_sysport_cb *cbs; /* Transmit control blocks */
105     struct dma_desc *desc_cpu; /* CPU view of the descriptor */
106     struct bcm_sysport_priv *priv; /* private context backpointer */
107     diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
108     index dd6e07c748f5..05e5b38e4891 100644
109     --- a/drivers/net/ethernet/freescale/fec_main.c
110     +++ b/drivers/net/ethernet/freescale/fec_main.c
111     @@ -3533,6 +3533,8 @@ fec_drv_remove(struct platform_device *pdev)
112     fec_enet_mii_remove(fep);
113     if (fep->reg_phy)
114     regulator_disable(fep->reg_phy);
115     + pm_runtime_put(&pdev->dev);
116     + pm_runtime_disable(&pdev->dev);
117     if (of_phy_is_fixed_link(np))
118     of_phy_deregister_fixed_link(np);
119     of_node_put(fep->phy_node);
120     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
121     index a79e0a1100aa..111e1aab7d83 100644
122     --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
123     +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
124     @@ -299,9 +299,9 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
125     mtu);
126     }
127    
128     -int hns_nic_net_xmit_hw(struct net_device *ndev,
129     - struct sk_buff *skb,
130     - struct hns_nic_ring_data *ring_data)
131     +netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
132     + struct sk_buff *skb,
133     + struct hns_nic_ring_data *ring_data)
134     {
135     struct hns_nic_priv *priv = netdev_priv(ndev);
136     struct hnae_ring *ring = ring_data->ring;
137     @@ -360,6 +360,10 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
138     dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
139     netdev_tx_sent_queue(dev_queue, skb->len);
140    
141     + netif_trans_update(ndev);
142     + ndev->stats.tx_bytes += skb->len;
143     + ndev->stats.tx_packets++;
144     +
145     wmb(); /* commit all data before submit */
146     assert(skb->queue_mapping < priv->ae_handle->q_num);
147     hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
148     @@ -1408,17 +1412,11 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
149     struct net_device *ndev)
150     {
151     struct hns_nic_priv *priv = netdev_priv(ndev);
152     - int ret;
153    
154     assert(skb->queue_mapping < ndev->ae_handle->q_num);
155     - ret = hns_nic_net_xmit_hw(ndev, skb,
156     - &tx_ring_data(priv, skb->queue_mapping));
157     - if (ret == NETDEV_TX_OK) {
158     - netif_trans_update(ndev);
159     - ndev->stats.tx_bytes += skb->len;
160     - ndev->stats.tx_packets++;
161     - }
162     - return (netdev_tx_t)ret;
163     +
164     + return hns_nic_net_xmit_hw(ndev, skb,
165     + &tx_ring_data(priv, skb->queue_mapping));
166     }
167    
168     static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
169     diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
170     index 5b412de350aa..7bc6a6ecd666 100644
171     --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
172     +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
173     @@ -91,8 +91,8 @@ void hns_ethtool_set_ops(struct net_device *ndev);
174     void hns_nic_net_reset(struct net_device *ndev);
175     void hns_nic_net_reinit(struct net_device *netdev);
176     int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
177     -int hns_nic_net_xmit_hw(struct net_device *ndev,
178     - struct sk_buff *skb,
179     - struct hns_nic_ring_data *ring_data);
180     +netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
181     + struct sk_buff *skb,
182     + struct hns_nic_ring_data *ring_data);
183    
184     #endif /**__HNS_ENET_H */
185     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
186     index 3f1971d485f3..2bd1282735b0 100644
187     --- a/drivers/net/ethernet/ti/cpsw.c
188     +++ b/drivers/net/ethernet/ti/cpsw.c
189     @@ -901,7 +901,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
190     /* set speed_in input in case RMII mode is used in 100Mbps */
191     if (phy->speed == 100)
192     mac_control |= BIT(15);
193     - else if (phy->speed == 10)
194     + /* in band mode only works in 10Mbps RGMII mode */
195     + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
196     mac_control |= BIT(18); /* In Band mode */
197    
198     if (priv->rx_pause)
199     diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
200     index 114457921890..1e4969d90f1a 100644
201     --- a/drivers/net/ppp/ppp_generic.c
202     +++ b/drivers/net/ppp/ppp_generic.c
203     @@ -255,7 +255,7 @@ struct ppp_net {
204     /* Prototypes. */
205     static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
206     struct file *file, unsigned int cmd, unsigned long arg);
207     -static void ppp_xmit_process(struct ppp *ppp);
208     +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
209     static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
210     static void ppp_push(struct ppp *ppp);
211     static void ppp_channel_push(struct channel *pch);
212     @@ -511,13 +511,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
213     goto out;
214     }
215    
216     - skb_queue_tail(&pf->xq, skb);
217     -
218     switch (pf->kind) {
219     case INTERFACE:
220     - ppp_xmit_process(PF_TO_PPP(pf));
221     + ppp_xmit_process(PF_TO_PPP(pf), skb);
222     break;
223     case CHANNEL:
224     + skb_queue_tail(&pf->xq, skb);
225     ppp_channel_push(PF_TO_CHANNEL(pf));
226     break;
227     }
228     @@ -1261,8 +1260,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
229     put_unaligned_be16(proto, pp);
230    
231     skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
232     - skb_queue_tail(&ppp->file.xq, skb);
233     - ppp_xmit_process(ppp);
234     + ppp_xmit_process(ppp, skb);
235     +
236     return NETDEV_TX_OK;
237    
238     outf:
239     @@ -1416,13 +1415,14 @@ static void ppp_setup(struct net_device *dev)
240     */
241    
242     /* Called to do any work queued up on the transmit side that can now be done */
243     -static void __ppp_xmit_process(struct ppp *ppp)
244     +static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
245     {
246     - struct sk_buff *skb;
247     -
248     ppp_xmit_lock(ppp);
249     if (!ppp->closing) {
250     ppp_push(ppp);
251     +
252     + if (skb)
253     + skb_queue_tail(&ppp->file.xq, skb);
254     while (!ppp->xmit_pending &&
255     (skb = skb_dequeue(&ppp->file.xq)))
256     ppp_send_frame(ppp, skb);
257     @@ -1436,7 +1436,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
258     ppp_xmit_unlock(ppp);
259     }
260    
261     -static void ppp_xmit_process(struct ppp *ppp)
262     +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
263     {
264     local_bh_disable();
265    
266     @@ -1444,7 +1444,7 @@ static void ppp_xmit_process(struct ppp *ppp)
267     goto err;
268    
269     (*this_cpu_ptr(ppp->xmit_recursion))++;
270     - __ppp_xmit_process(ppp);
271     + __ppp_xmit_process(ppp, skb);
272     (*this_cpu_ptr(ppp->xmit_recursion))--;
273    
274     local_bh_enable();
275     @@ -1454,6 +1454,8 @@ static void ppp_xmit_process(struct ppp *ppp)
276     err:
277     local_bh_enable();
278    
279     + kfree_skb(skb);
280     +
281     if (net_ratelimit())
282     netdev_err(ppp->dev, "recursion detected\n");
283     }
284     @@ -1938,7 +1940,7 @@ static void __ppp_channel_push(struct channel *pch)
285     if (skb_queue_empty(&pch->file.xq)) {
286     ppp = pch->ppp;
287     if (ppp)
288     - __ppp_xmit_process(ppp);
289     + __ppp_xmit_process(ppp, NULL);
290     }
291     }
292    
293     diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
294     index 26681707fc7a..a0a9c9d39f01 100644
295     --- a/drivers/net/team/team.c
296     +++ b/drivers/net/team/team.c
297     @@ -2403,7 +2403,7 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
298     if (!nlh) {
299     err = __send_and_alloc_skb(&skb, team, portid, send_func);
300     if (err)
301     - goto errout;
302     + return err;
303     goto send_done;
304     }
305    
306     @@ -2688,7 +2688,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
307     if (!nlh) {
308     err = __send_and_alloc_skb(&skb, team, portid, send_func);
309     if (err)
310     - goto errout;
311     + return err;
312     goto send_done;
313     }
314    
315     diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
316     index cc28dda322b5..283416aefa56 100644
317     --- a/drivers/s390/net/qeth_core_main.c
318     +++ b/drivers/s390/net/qeth_core_main.c
319     @@ -522,8 +522,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
320     queue == card->qdio.no_in_queues - 1;
321     }
322    
323     -
324     -static int qeth_issue_next_read(struct qeth_card *card)
325     +static int __qeth_issue_next_read(struct qeth_card *card)
326     {
327     int rc;
328     struct qeth_cmd_buffer *iob;
329     @@ -554,6 +553,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
330     return rc;
331     }
332    
333     +static int qeth_issue_next_read(struct qeth_card *card)
334     +{
335     + int ret;
336     +
337     + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
338     + ret = __qeth_issue_next_read(card);
339     + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
340     +
341     + return ret;
342     +}
343     +
344     static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
345     {
346     struct qeth_reply *reply;
347     @@ -957,7 +967,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
348     spin_lock_irqsave(&card->thread_mask_lock, flags);
349     card->thread_running_mask &= ~thread;
350     spin_unlock_irqrestore(&card->thread_mask_lock, flags);
351     - wake_up(&card->wait_q);
352     + wake_up_all(&card->wait_q);
353     }
354     EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
355    
356     @@ -1161,6 +1171,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
357     }
358     rc = qeth_get_problem(cdev, irb);
359     if (rc) {
360     + card->read_or_write_problem = 1;
361     qeth_clear_ipacmd_list(card);
362     qeth_schedule_recovery(card);
363     goto out;
364     @@ -1179,7 +1190,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
365     return;
366     if (channel == &card->read &&
367     channel->state == CH_STATE_UP)
368     - qeth_issue_next_read(card);
369     + __qeth_issue_next_read(card);
370    
371     iob = channel->iob;
372     index = channel->buf_no;
373     @@ -4989,8 +5000,6 @@ static void qeth_core_free_card(struct qeth_card *card)
374     QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
375     qeth_clean_channel(&card->read);
376     qeth_clean_channel(&card->write);
377     - if (card->dev)
378     - free_netdev(card->dev);
379     qeth_free_qdio_buffers(card);
380     unregister_service_level(&card->qeth_service_level);
381     kfree(card);
382     diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
383     index 5082dfeacb95..e94e9579914e 100644
384     --- a/drivers/s390/net/qeth_l2_main.c
385     +++ b/drivers/s390/net/qeth_l2_main.c
386     @@ -1057,8 +1057,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
387     qeth_l2_set_offline(cgdev);
388    
389     if (card->dev) {
390     - netif_napi_del(&card->napi);
391     unregister_netdev(card->dev);
392     + free_netdev(card->dev);
393     card->dev = NULL;
394     }
395     return;
396     diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
397     index a668e6b71a29..4ca161bdc696 100644
398     --- a/drivers/s390/net/qeth_l3_main.c
399     +++ b/drivers/s390/net/qeth_l3_main.c
400     @@ -3192,8 +3192,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
401     qeth_l3_set_offline(cgdev);
402    
403     if (card->dev) {
404     - netif_napi_del(&card->napi);
405     unregister_netdev(card->dev);
406     + free_netdev(card->dev);
407     card->dev = NULL;
408     }
409    
410     diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
411     index 7592ac8514d2..f61b37109e5c 100644
412     --- a/drivers/scsi/sg.c
413     +++ b/drivers/scsi/sg.c
414     @@ -2064,11 +2064,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
415     if ((1 == resp->done) && (!resp->sg_io_owned) &&
416     ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
417     resp->done = 2; /* guard against other readers */
418     - break;
419     + write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
420     + return resp;
421     }
422     }
423     write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
424     - return resp;
425     + return NULL;
426     }
427    
428     /* always adds to end of list */
429     diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
430     index 119054bc922b..2caacd9d2526 100644
431     --- a/drivers/soc/fsl/qbman/qman.c
432     +++ b/drivers/soc/fsl/qbman/qman.c
433     @@ -2429,39 +2429,21 @@ struct cgr_comp {
434     struct completion completion;
435     };
436    
437     -static int qman_delete_cgr_thread(void *p)
438     +static void qman_delete_cgr_smp_call(void *p)
439     {
440     - struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
441     - int ret;
442     -
443     - ret = qman_delete_cgr(cgr_comp->cgr);
444     - complete(&cgr_comp->completion);
445     -
446     - return ret;
447     + qman_delete_cgr((struct qman_cgr *)p);
448     }
449    
450     void qman_delete_cgr_safe(struct qman_cgr *cgr)
451     {
452     - struct task_struct *thread;
453     - struct cgr_comp cgr_comp;
454     -
455     preempt_disable();
456     if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
457     - init_completion(&cgr_comp.completion);
458     - cgr_comp.cgr = cgr;
459     - thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
460     - "cgr_del");
461     -
462     - if (IS_ERR(thread))
463     - goto out;
464     -
465     - kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
466     - wake_up_process(thread);
467     - wait_for_completion(&cgr_comp.completion);
468     + smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
469     + qman_delete_cgr_smp_call, cgr, true);
470     preempt_enable();
471     return;
472     }
473     -out:
474     +
475     qman_delete_cgr(cgr);
476     preempt_enable();
477     }
478     diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
479     index 6fb1c34cf805..1619a3213af5 100644
480     --- a/include/linux/cgroup-defs.h
481     +++ b/include/linux/cgroup-defs.h
482     @@ -609,13 +609,13 @@ struct sock_cgroup_data {
483     * updaters and return part of the previous pointer as the prioidx or
484     * classid. Such races are short-lived and the result isn't critical.
485     */
486     -static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd)
487     +static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
488     {
489     /* fallback to 1 which is always the ID of the root cgroup */
490     return (skcd->is_data & 1) ? skcd->prioidx : 1;
491     }
492    
493     -static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd)
494     +static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
495     {
496     /* fallback to 0 which is the unconfigured default classid */
497     return (skcd->is_data & 1) ? skcd->classid : 0;
498     diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
499     index 5c132d3188be..85d1ffc90285 100644
500     --- a/include/linux/rhashtable.h
501     +++ b/include/linux/rhashtable.h
502     @@ -706,8 +706,10 @@ static inline void *__rhashtable_insert_fast(
503     if (!key ||
504     (params.obj_cmpfn ?
505     params.obj_cmpfn(&arg, rht_obj(ht, head)) :
506     - rhashtable_compare(&arg, rht_obj(ht, head))))
507     + rhashtable_compare(&arg, rht_obj(ht, head)))) {
508     + pprev = &head->next;
509     continue;
510     + }
511    
512     data = rht_obj(ht, head);
513    
514     diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
515     index f18fc1a0321f..538f3c4458b0 100644
516     --- a/include/net/sch_generic.h
517     +++ b/include/net/sch_generic.h
518     @@ -675,6 +675,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
519     *to_free = skb;
520     }
521    
522     +static inline void __qdisc_drop_all(struct sk_buff *skb,
523     + struct sk_buff **to_free)
524     +{
525     + if (skb->prev)
526     + skb->prev->next = *to_free;
527     + else
528     + skb->next = *to_free;
529     + *to_free = skb;
530     +}
531     +
532     static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
533     struct qdisc_skb_head *qh,
534     struct sk_buff **to_free)
535     @@ -795,6 +805,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
536     return NET_XMIT_DROP;
537     }
538    
539     +static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
540     + struct sk_buff **to_free)
541     +{
542     + __qdisc_drop_all(skb, to_free);
543     + qdisc_qstats_drop(sch);
544     +
545     + return NET_XMIT_DROP;
546     +}
547     +
548     /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
549     long it will take to send a packet given its size.
550     */
551     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
552     index ed18aa4dceab..ea41820ab12e 100644
553     --- a/kernel/irq/manage.c
554     +++ b/kernel/irq/manage.c
555     @@ -1210,10 +1210,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
556     * set the trigger type must match. Also all must
557     * agree on ONESHOT.
558     */
559     - unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
560     -
561     if (!((old->flags & new->flags) & IRQF_SHARED) ||
562     - (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
563     + ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
564     ((old->flags ^ new->flags) & IRQF_ONESHOT))
565     goto mismatch;
566    
567     diff --git a/lib/rhashtable.c b/lib/rhashtable.c
568     index 32d0ad058380..895961c53385 100644
569     --- a/lib/rhashtable.c
570     +++ b/lib/rhashtable.c
571     @@ -448,8 +448,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
572     if (!key ||
573     (ht->p.obj_cmpfn ?
574     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
575     - rhashtable_compare(&arg, rht_obj(ht, head))))
576     + rhashtable_compare(&arg, rht_obj(ht, head)))) {
577     + pprev = &head->next;
578     continue;
579     + }
580    
581     if (!ht->rhlist)
582     return rht_obj(ht, head);
583     diff --git a/net/core/dev.c b/net/core/dev.c
584     index 272f84ad16e0..07d2c93c9636 100644
585     --- a/net/core/dev.c
586     +++ b/net/core/dev.c
587     @@ -3179,15 +3179,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
588     #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
589     static void skb_update_prio(struct sk_buff *skb)
590     {
591     - struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
592     + const struct netprio_map *map;
593     + const struct sock *sk;
594     + unsigned int prioidx;
595    
596     - if (!skb->priority && skb->sk && map) {
597     - unsigned int prioidx =
598     - sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
599     + if (skb->priority)
600     + return;
601     + map = rcu_dereference_bh(skb->dev->priomap);
602     + if (!map)
603     + return;
604     + sk = skb_to_full_sk(skb);
605     + if (!sk)
606     + return;
607    
608     - if (prioidx < map->priomap_len)
609     - skb->priority = map->priomap[prioidx];
610     - }
611     + prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
612     +
613     + if (prioidx < map->priomap_len)
614     + skb->priority = map->priomap[prioidx];
615     }
616     #else
617     #define skb_update_prio(skb)
618     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
619     index a64515583bc1..c5ac9f48f058 100644
620     --- a/net/core/skbuff.c
621     +++ b/net/core/skbuff.c
622     @@ -3717,7 +3717,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
623    
624     skb_queue_tail(&sk->sk_error_queue, skb);
625     if (!sock_flag(sk, SOCK_DEAD))
626     - sk->sk_data_ready(sk);
627     + sk->sk_error_report(sk);
628     return 0;
629     }
630     EXPORT_SYMBOL(sock_queue_err_skb);
631     diff --git a/net/dccp/proto.c b/net/dccp/proto.c
632     index 9d43c1f40274..ff3b058cf58c 100644
633     --- a/net/dccp/proto.c
634     +++ b/net/dccp/proto.c
635     @@ -789,6 +789,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
636     if (skb == NULL)
637     goto out_release;
638    
639     + if (sk->sk_state == DCCP_CLOSED) {
640     + rc = -ENOTCONN;
641     + goto out_discard;
642     + }
643     +
644     skb_reserve(skb, sk->sk_prot->max_header);
645     rc = memcpy_from_msg(skb_put(skb, len), msg, len);
646     if (rc != 0)
647     diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
648     index d7efbf0dad20..83af5339e582 100644
649     --- a/net/ieee802154/6lowpan/core.c
650     +++ b/net/ieee802154/6lowpan/core.c
651     @@ -204,9 +204,13 @@ static inline void lowpan_netlink_fini(void)
652     static int lowpan_device_event(struct notifier_block *unused,
653     unsigned long event, void *ptr)
654     {
655     - struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
656     + struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
657     + struct wpan_dev *wpan_dev;
658    
659     - if (wdev->type != ARPHRD_IEEE802154)
660     + if (ndev->type != ARPHRD_IEEE802154)
661     + return NOTIFY_DONE;
662     + wpan_dev = ndev->ieee802154_ptr;
663     + if (!wpan_dev)
664     return NOTIFY_DONE;
665    
666     switch (event) {
667     @@ -215,8 +219,8 @@ static int lowpan_device_event(struct notifier_block *unused,
668     * also delete possible lowpan interfaces which belongs
669     * to the wpan interface.
670     */
671     - if (wdev->ieee802154_ptr->lowpan_dev)
672     - lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
673     + if (wpan_dev->lowpan_dev)
674     + lowpan_dellink(wpan_dev->lowpan_dev, NULL);
675     break;
676     default:
677     return NOTIFY_DONE;
678     diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
679     index 631c0d0d7cf8..8effac0f2219 100644
680     --- a/net/ipv4/inet_fragment.c
681     +++ b/net/ipv4/inet_fragment.c
682     @@ -119,6 +119,9 @@ static void inet_frag_secret_rebuild(struct inet_frags *f)
683    
684     static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
685     {
686     + if (!hlist_unhashed(&q->list_evictor))
687     + return false;
688     +
689     return q->net->low_thresh == 0 ||
690     frag_mem_limit(q->net) >= q->net->low_thresh;
691     }
692     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
693     index fd1e6b8562e0..5ddd64995e73 100644
694     --- a/net/ipv4/ip_sockglue.c
695     +++ b/net/ipv4/ip_sockglue.c
696     @@ -242,7 +242,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
697     src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
698     if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
699     return -EINVAL;
700     - ipc->oif = src_info->ipi6_ifindex;
701     + if (src_info->ipi6_ifindex)
702     + ipc->oif = src_info->ipi6_ifindex;
703     ipc->addr = src_info->ipi6_addr.s6_addr32[3];
704     continue;
705     }
706     @@ -272,7 +273,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
707     if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
708     return -EINVAL;
709     info = (struct in_pktinfo *)CMSG_DATA(cmsg);
710     - ipc->oif = info->ipi_ifindex;
711     + if (info->ipi_ifindex)
712     + ipc->oif = info->ipi_ifindex;
713     ipc->addr = info->ipi_spec_dst.s_addr;
714     break;
715     }
716     diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
717     index 41c22cb33424..3fe80e104b58 100644
718     --- a/net/ipv6/ndisc.c
719     +++ b/net/ipv6/ndisc.c
720     @@ -1516,7 +1516,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
721     *(opt++) = (rd_len >> 3);
722     opt += 6;
723    
724     - memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
725     + skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
726     + rd_len - 8);
727     }
728    
729     void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
730     diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
731     index 91cbbf1c3f82..c2dfc32eb9f2 100644
732     --- a/net/iucv/af_iucv.c
733     +++ b/net/iucv/af_iucv.c
734     @@ -2418,9 +2418,11 @@ static int afiucv_iucv_init(void)
735     af_iucv_dev->driver = &af_iucv_driver;
736     err = device_register(af_iucv_dev);
737     if (err)
738     - goto out_driver;
739     + goto out_iucv_dev;
740     return 0;
741    
742     +out_iucv_dev:
743     + put_device(af_iucv_dev);
744     out_driver:
745     driver_unregister(&af_iucv_driver);
746     out_iucv:
747     diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
748     index 179cd9b1b1f4..63e6d08388ab 100644
749     --- a/net/kcm/kcmsock.c
750     +++ b/net/kcm/kcmsock.c
751     @@ -1375,24 +1375,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
752     struct list_head *head;
753     int index = 0;
754     struct strp_callbacks cb;
755     - int err;
756     + int err = 0;
757    
758     csk = csock->sk;
759     if (!csk)
760     return -EINVAL;
761    
762     + lock_sock(csk);
763     +
764     /* Only allow TCP sockets to be attached for now */
765     if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
766     - csk->sk_protocol != IPPROTO_TCP)
767     - return -EOPNOTSUPP;
768     + csk->sk_protocol != IPPROTO_TCP) {
769     + err = -EOPNOTSUPP;
770     + goto out;
771     + }
772    
773     /* Don't allow listeners or closed sockets */
774     - if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE)
775     - return -EOPNOTSUPP;
776     + if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
777     + err = -EOPNOTSUPP;
778     + goto out;
779     + }
780    
781     psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
782     - if (!psock)
783     - return -ENOMEM;
784     + if (!psock) {
785     + err = -ENOMEM;
786     + goto out;
787     + }
788    
789     psock->mux = mux;
790     psock->sk = csk;
791     @@ -1406,7 +1414,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
792     err = strp_init(&psock->strp, csk, &cb);
793     if (err) {
794     kmem_cache_free(kcm_psockp, psock);
795     - return err;
796     + goto out;
797     }
798    
799     write_lock_bh(&csk->sk_callback_lock);
800     @@ -1418,7 +1426,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
801     write_unlock_bh(&csk->sk_callback_lock);
802     strp_done(&psock->strp);
803     kmem_cache_free(kcm_psockp, psock);
804     - return -EALREADY;
805     + err = -EALREADY;
806     + goto out;
807     }
808    
809     psock->save_data_ready = csk->sk_data_ready;
810     @@ -1454,7 +1463,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
811     /* Schedule RX work in case there are already bytes queued */
812     strp_check_rcv(&psock->strp);
813    
814     - return 0;
815     +out:
816     + release_sock(csk);
817     +
818     + return err;
819     }
820    
821     static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
822     @@ -1506,6 +1518,7 @@ static void kcm_unattach(struct kcm_psock *psock)
823    
824     if (WARN_ON(psock->rx_kcm)) {
825     write_unlock_bh(&csk->sk_callback_lock);
826     + release_sock(csk);
827     return;
828     }
829    
830     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
831     index cfc4dd8997e5..ead98e8e0b1f 100644
832     --- a/net/l2tp/l2tp_core.c
833     +++ b/net/l2tp/l2tp_core.c
834     @@ -1612,9 +1612,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
835     encap = cfg->encap;
836    
837     /* Quick sanity checks */
838     + err = -EPROTONOSUPPORT;
839     + if (sk->sk_type != SOCK_DGRAM) {
840     + pr_debug("tunl %hu: fd %d wrong socket type\n",
841     + tunnel_id, fd);
842     + goto err;
843     + }
844     switch (encap) {
845     case L2TP_ENCAPTYPE_UDP:
846     - err = -EPROTONOSUPPORT;
847     if (sk->sk_protocol != IPPROTO_UDP) {
848     pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
849     tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
850     @@ -1622,7 +1627,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
851     }
852     break;
853     case L2TP_ENCAPTYPE_IP:
854     - err = -EPROTONOSUPPORT;
855     if (sk->sk_protocol != IPPROTO_L2TP) {
856     pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
857     tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
858     diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
859     index 11702016c900..9192a6143523 100644
860     --- a/net/netlink/genetlink.c
861     +++ b/net/netlink/genetlink.c
862     @@ -1128,7 +1128,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
863     if (!err)
864     delivered = true;
865     else if (err != -ESRCH)
866     - goto error;
867     + return err;
868     return delivered ? 0 : -ESRCH;
869     error:
870     kfree_skb(skb);
871     diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
872     index af47bdf2f483..b6e3abe505ac 100644
873     --- a/net/sched/act_tunnel_key.c
874     +++ b/net/sched/act_tunnel_key.c
875     @@ -141,6 +141,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
876     metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
877     break;
878     default:
879     + ret = -EINVAL;
880     goto err_out;
881     }
882    
883     diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
884     index c73d58872cf8..e899d9eb76cb 100644
885     --- a/net/sched/sch_netem.c
886     +++ b/net/sched/sch_netem.c
887     @@ -513,7 +513,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
888     }
889    
890     if (unlikely(sch->q.qlen >= sch->limit))
891     - return qdisc_drop(skb, sch, to_free);
892     + return qdisc_drop_all(skb, sch, to_free);
893    
894     qdisc_qstats_backlog_inc(sch, skb);
895