Contents of /trunk/kernel-alx/patches-4.14/0131-4.14.32-all-fixes.patch
Parent Directory | Revision Log
Revision 3238 -
(show annotations)
(download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 44301 byte(s)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 44301 byte(s)
-added up to patches-4.14.79
1 | diff --git a/Makefile b/Makefile |
2 | index 99e31da48422..c4c681b53ff0 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 14 |
9 | -SUBLEVEL = 31 |
10 | +SUBLEVEL = 32 |
11 | EXTRAVERSION = |
12 | NAME = Petit Gorille |
13 | |
14 | diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c |
15 | index c6163874e4e7..c770ca37c9b2 100644 |
16 | --- a/drivers/net/ethernet/arc/emac_rockchip.c |
17 | +++ b/drivers/net/ethernet/arc/emac_rockchip.c |
18 | @@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev) |
19 | /* Optional regulator for PHY */ |
20 | priv->regulator = devm_regulator_get_optional(dev, "phy"); |
21 | if (IS_ERR(priv->regulator)) { |
22 | - if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) |
23 | - return -EPROBE_DEFER; |
24 | + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) { |
25 | + err = -EPROBE_DEFER; |
26 | + goto out_clk_disable; |
27 | + } |
28 | dev_err(dev, "no regulator found\n"); |
29 | priv->regulator = NULL; |
30 | } |
31 | diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c |
32 | index eb441e5e2cd8..1e856e8b9a92 100644 |
33 | --- a/drivers/net/ethernet/broadcom/bcmsysport.c |
34 | +++ b/drivers/net/ethernet/broadcom/bcmsysport.c |
35 | @@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, |
36 | static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, |
37 | struct bcm_sysport_tx_ring *ring) |
38 | { |
39 | - unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; |
40 | unsigned int pkts_compl = 0, bytes_compl = 0; |
41 | struct net_device *ndev = priv->netdev; |
42 | + unsigned int txbds_processed = 0; |
43 | struct bcm_sysport_cb *cb; |
44 | + unsigned int txbds_ready; |
45 | + unsigned int c_index; |
46 | u32 hw_ind; |
47 | |
48 | /* Clear status before servicing to reduce spurious interrupts */ |
49 | @@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, |
50 | /* Compute how many descriptors have been processed since last call */ |
51 | hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); |
52 | c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; |
53 | - ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); |
54 | - |
55 | - last_c_index = ring->c_index; |
56 | - num_tx_cbs = ring->size; |
57 | - |
58 | - c_index &= (num_tx_cbs - 1); |
59 | - |
60 | - if (c_index >= last_c_index) |
61 | - last_tx_cn = c_index - last_c_index; |
62 | - else |
63 | - last_tx_cn = num_tx_cbs - last_c_index + c_index; |
64 | + txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; |
65 | |
66 | netif_dbg(priv, tx_done, ndev, |
67 | - "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", |
68 | - ring->index, c_index, last_tx_cn, last_c_index); |
69 | + "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", |
70 | + ring->index, ring->c_index, c_index, txbds_ready); |
71 | |
72 | - while (last_tx_cn-- > 0) { |
73 | - cb = ring->cbs + last_c_index; |
74 | + while (txbds_processed < txbds_ready) { |
75 | + cb = &ring->cbs[ring->clean_index]; |
76 | bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); |
77 | |
78 | ring->desc_count++; |
79 | - last_c_index++; |
80 | - last_c_index &= (num_tx_cbs - 1); |
81 | + txbds_processed++; |
82 | + |
83 | + if (likely(ring->clean_index < ring->size - 1)) |
84 | + ring->clean_index++; |
85 | + else |
86 | + ring->clean_index = 0; |
87 | } |
88 | |
89 | u64_stats_update_begin(&priv->syncp); |
90 | @@ -1406,6 +1402,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, |
91 | netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); |
92 | ring->index = index; |
93 | ring->size = size; |
94 | + ring->clean_index = 0; |
95 | ring->alloc_size = ring->size; |
96 | ring->desc_cpu = p; |
97 | ring->desc_count = ring->size; |
98 | diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h |
99 | index 82e401df199e..a2006f5fc26f 100644 |
100 | --- a/drivers/net/ethernet/broadcom/bcmsysport.h |
101 | +++ b/drivers/net/ethernet/broadcom/bcmsysport.h |
102 | @@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring { |
103 | unsigned int desc_count; /* Number of descriptors */ |
104 | unsigned int curr_desc; /* Current descriptor */ |
105 | unsigned int c_index; /* Last consumer index */ |
106 | - unsigned int p_index; /* Current producer index */ |
107 | + unsigned int clean_index; /* Current clean index */ |
108 | struct bcm_sysport_cb *cbs; /* Transmit control blocks */ |
109 | struct dma_desc *desc_cpu; /* CPU view of the descriptor */ |
110 | struct bcm_sysport_priv *priv; /* private context backpointer */ |
111 | diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c |
112 | index 42258060f142..4f6e9d3470d5 100644 |
113 | --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c |
114 | +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c |
115 | @@ -2022,7 +2022,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, |
116 | } |
117 | |
118 | if (unlikely(err < 0)) { |
119 | - percpu_stats->tx_errors++; |
120 | percpu_stats->tx_fifo_errors++; |
121 | return err; |
122 | } |
123 | @@ -2292,7 +2291,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, |
124 | vaddr = phys_to_virt(addr); |
125 | prefetch(vaddr + qm_fd_get_offset(fd)); |
126 | |
127 | - fd_format = qm_fd_get_format(fd); |
128 | /* The only FD types that we may receive are contig and S/G */ |
129 | WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); |
130 | |
131 | @@ -2325,8 +2323,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, |
132 | |
133 | skb_len = skb->len; |
134 | |
135 | - if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) |
136 | + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { |
137 | + percpu_stats->rx_dropped++; |
138 | return qman_cb_dqrr_consume; |
139 | + } |
140 | |
141 | percpu_stats->rx_packets++; |
142 | percpu_stats->rx_bytes += skb_len; |
143 | @@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev) |
144 | struct device *dev; |
145 | int err; |
146 | |
147 | - dev = &pdev->dev; |
148 | + dev = pdev->dev.parent; |
149 | net_dev = dev_get_drvdata(dev); |
150 | |
151 | priv = netdev_priv(net_dev); |
152 | diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c |
153 | index 311539c6625f..eb2ea231c7ca 100644 |
154 | --- a/drivers/net/ethernet/freescale/fec_main.c |
155 | +++ b/drivers/net/ethernet/freescale/fec_main.c |
156 | @@ -3565,6 +3565,8 @@ fec_drv_remove(struct platform_device *pdev) |
157 | fec_enet_mii_remove(fep); |
158 | if (fep->reg_phy) |
159 | regulator_disable(fep->reg_phy); |
160 | + pm_runtime_put(&pdev->dev); |
161 | + pm_runtime_disable(&pdev->dev); |
162 | if (of_phy_is_fixed_link(np)) |
163 | of_phy_deregister_fixed_link(np); |
164 | of_node_put(fep->phy_node); |
165 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c |
166 | index 93728c694e6d..0a9adc5962fb 100644 |
167 | --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c |
168 | +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c |
169 | @@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { |
170 | |
171 | static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { |
172 | MLXSW_SP_CPU_PORT_SB_CM, |
173 | + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), |
174 | + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), |
175 | + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), |
176 | + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), |
177 | + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), |
178 | MLXSW_SP_CPU_PORT_SB_CM, |
179 | - MLXSW_SP_CPU_PORT_SB_CM, |
180 | - MLXSW_SP_CPU_PORT_SB_CM, |
181 | - MLXSW_SP_CPU_PORT_SB_CM, |
182 | - MLXSW_SP_CPU_PORT_SB_CM, |
183 | - MLXSW_SP_CPU_PORT_SB_CM, |
184 | - MLXSW_SP_SB_CM(10000, 0, 0), |
185 | + MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), |
186 | MLXSW_SP_CPU_PORT_SB_CM, |
187 | MLXSW_SP_CPU_PORT_SB_CM, |
188 | MLXSW_SP_CPU_PORT_SB_CM, |
189 | diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c |
190 | index 9d989c96278c..e41f28602535 100644 |
191 | --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c |
192 | +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c |
193 | @@ -1663,6 +1663,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, |
194 | iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); |
195 | |
196 | if (eth_type == ETH_P_IP) { |
197 | + if (iph->protocol != IPPROTO_TCP) { |
198 | + DP_NOTICE(p_hwfn, |
199 | + "Unexpected ip protocol on ll2 %x\n", |
200 | + iph->protocol); |
201 | + return -EINVAL; |
202 | + } |
203 | + |
204 | cm_info->local_ip[0] = ntohl(iph->daddr); |
205 | cm_info->remote_ip[0] = ntohl(iph->saddr); |
206 | cm_info->ip_version = TCP_IPV4; |
207 | @@ -1671,6 +1678,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, |
208 | *payload_len = ntohs(iph->tot_len) - ip_hlen; |
209 | } else if (eth_type == ETH_P_IPV6) { |
210 | ip6h = (struct ipv6hdr *)iph; |
211 | + |
212 | + if (ip6h->nexthdr != IPPROTO_TCP) { |
213 | + DP_NOTICE(p_hwfn, |
214 | + "Unexpected ip protocol on ll2 %x\n", |
215 | + iph->protocol); |
216 | + return -EINVAL; |
217 | + } |
218 | + |
219 | for (i = 0; i < 4; i++) { |
220 | cm_info->local_ip[i] = |
221 | ntohl(ip6h->daddr.in6_u.u6_addr32[i]); |
222 | diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c |
223 | index e5ee9f274a71..6eab2c632c75 100644 |
224 | --- a/drivers/net/ethernet/qlogic/qede/qede_main.c |
225 | +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c |
226 | @@ -2066,8 +2066,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, |
227 | link_params.link_up = true; |
228 | edev->ops->common->set_link(edev->cdev, &link_params); |
229 | |
230 | - qede_rdma_dev_event_open(edev); |
231 | - |
232 | edev->state = QEDE_STATE_OPEN; |
233 | |
234 | DP_INFO(edev, "Ending successfully qede load\n"); |
235 | @@ -2168,12 +2166,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link) |
236 | DP_NOTICE(edev, "Link is up\n"); |
237 | netif_tx_start_all_queues(edev->ndev); |
238 | netif_carrier_on(edev->ndev); |
239 | + qede_rdma_dev_event_open(edev); |
240 | } |
241 | } else { |
242 | if (netif_carrier_ok(edev->ndev)) { |
243 | DP_NOTICE(edev, "Link is down\n"); |
244 | netif_tx_disable(edev->ndev); |
245 | netif_carrier_off(edev->ndev); |
246 | + qede_rdma_dev_event_close(edev); |
247 | } |
248 | } |
249 | } |
250 | diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c |
251 | index 14b646b3b084..a5bb7b19040e 100644 |
252 | --- a/drivers/net/ethernet/ti/cpsw.c |
253 | +++ b/drivers/net/ethernet/ti/cpsw.c |
254 | @@ -996,7 +996,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, |
255 | /* set speed_in input in case RMII mode is used in 100Mbps */ |
256 | if (phy->speed == 100) |
257 | mac_control |= BIT(15); |
258 | - else if (phy->speed == 10) |
259 | + /* in band mode only works in 10Mbps RGMII mode */ |
260 | + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) |
261 | mac_control |= BIT(18); /* In Band mode */ |
262 | |
263 | if (priv->rx_pause) |
264 | diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c |
265 | index 176fc0906bfe..0f35597553f4 100644 |
266 | --- a/drivers/net/macvlan.c |
267 | +++ b/drivers/net/macvlan.c |
268 | @@ -1037,7 +1037,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, |
269 | lowerdev_features &= (features | ~NETIF_F_LRO); |
270 | features = netdev_increment_features(lowerdev_features, features, mask); |
271 | features |= ALWAYS_ON_FEATURES; |
272 | - features &= ~NETIF_F_NETNS_LOCAL; |
273 | + features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES); |
274 | |
275 | return features; |
276 | } |
277 | diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
278 | index 39de77a8bb63..dba6d17ad885 100644 |
279 | --- a/drivers/net/phy/phy.c |
280 | +++ b/drivers/net/phy/phy.c |
281 | @@ -614,6 +614,91 @@ static void phy_error(struct phy_device *phydev) |
282 | phy_trigger_machine(phydev, false); |
283 | } |
284 | |
285 | +/** |
286 | + * phy_disable_interrupts - Disable the PHY interrupts from the PHY side |
287 | + * @phydev: target phy_device struct |
288 | + */ |
289 | +static int phy_disable_interrupts(struct phy_device *phydev) |
290 | +{ |
291 | + int err; |
292 | + |
293 | + /* Disable PHY interrupts */ |
294 | + err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); |
295 | + if (err) |
296 | + goto phy_err; |
297 | + |
298 | + /* Clear the interrupt */ |
299 | + err = phy_clear_interrupt(phydev); |
300 | + if (err) |
301 | + goto phy_err; |
302 | + |
303 | + return 0; |
304 | + |
305 | +phy_err: |
306 | + phy_error(phydev); |
307 | + |
308 | + return err; |
309 | +} |
310 | + |
311 | +/** |
312 | + * phy_change - Called by the phy_interrupt to handle PHY changes |
313 | + * @phydev: phy_device struct that interrupted |
314 | + */ |
315 | +static irqreturn_t phy_change(struct phy_device *phydev) |
316 | +{ |
317 | + if (phy_interrupt_is_valid(phydev)) { |
318 | + if (phydev->drv->did_interrupt && |
319 | + !phydev->drv->did_interrupt(phydev)) |
320 | + goto ignore; |
321 | + |
322 | + if (phy_disable_interrupts(phydev)) |
323 | + goto phy_err; |
324 | + } |
325 | + |
326 | + mutex_lock(&phydev->lock); |
327 | + if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) |
328 | + phydev->state = PHY_CHANGELINK; |
329 | + mutex_unlock(&phydev->lock); |
330 | + |
331 | + if (phy_interrupt_is_valid(phydev)) { |
332 | + atomic_dec(&phydev->irq_disable); |
333 | + enable_irq(phydev->irq); |
334 | + |
335 | + /* Reenable interrupts */ |
336 | + if (PHY_HALTED != phydev->state && |
337 | + phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) |
338 | + goto irq_enable_err; |
339 | + } |
340 | + |
341 | + /* reschedule state queue work to run as soon as possible */ |
342 | + phy_trigger_machine(phydev, true); |
343 | + return IRQ_HANDLED; |
344 | + |
345 | +ignore: |
346 | + atomic_dec(&phydev->irq_disable); |
347 | + enable_irq(phydev->irq); |
348 | + return IRQ_NONE; |
349 | + |
350 | +irq_enable_err: |
351 | + disable_irq(phydev->irq); |
352 | + atomic_inc(&phydev->irq_disable); |
353 | +phy_err: |
354 | + phy_error(phydev); |
355 | + return IRQ_NONE; |
356 | +} |
357 | + |
358 | +/** |
359 | + * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes |
360 | + * @work: work_struct that describes the work to be done |
361 | + */ |
362 | +void phy_change_work(struct work_struct *work) |
363 | +{ |
364 | + struct phy_device *phydev = |
365 | + container_of(work, struct phy_device, phy_queue); |
366 | + |
367 | + phy_change(phydev); |
368 | +} |
369 | + |
370 | /** |
371 | * phy_interrupt - PHY interrupt handler |
372 | * @irq: interrupt line |
373 | @@ -632,9 +717,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) |
374 | disable_irq_nosync(irq); |
375 | atomic_inc(&phydev->irq_disable); |
376 | |
377 | - phy_change(phydev); |
378 | - |
379 | - return IRQ_HANDLED; |
380 | + return phy_change(phydev); |
381 | } |
382 | |
383 | /** |
384 | @@ -651,32 +734,6 @@ static int phy_enable_interrupts(struct phy_device *phydev) |
385 | return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); |
386 | } |
387 | |
388 | -/** |
389 | - * phy_disable_interrupts - Disable the PHY interrupts from the PHY side |
390 | - * @phydev: target phy_device struct |
391 | - */ |
392 | -static int phy_disable_interrupts(struct phy_device *phydev) |
393 | -{ |
394 | - int err; |
395 | - |
396 | - /* Disable PHY interrupts */ |
397 | - err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); |
398 | - if (err) |
399 | - goto phy_err; |
400 | - |
401 | - /* Clear the interrupt */ |
402 | - err = phy_clear_interrupt(phydev); |
403 | - if (err) |
404 | - goto phy_err; |
405 | - |
406 | - return 0; |
407 | - |
408 | -phy_err: |
409 | - phy_error(phydev); |
410 | - |
411 | - return err; |
412 | -} |
413 | - |
414 | /** |
415 | * phy_start_interrupts - request and enable interrupts for a PHY device |
416 | * @phydev: target phy_device struct |
417 | @@ -727,64 +784,6 @@ int phy_stop_interrupts(struct phy_device *phydev) |
418 | } |
419 | EXPORT_SYMBOL(phy_stop_interrupts); |
420 | |
421 | -/** |
422 | - * phy_change - Called by the phy_interrupt to handle PHY changes |
423 | - * @phydev: phy_device struct that interrupted |
424 | - */ |
425 | -void phy_change(struct phy_device *phydev) |
426 | -{ |
427 | - if (phy_interrupt_is_valid(phydev)) { |
428 | - if (phydev->drv->did_interrupt && |
429 | - !phydev->drv->did_interrupt(phydev)) |
430 | - goto ignore; |
431 | - |
432 | - if (phy_disable_interrupts(phydev)) |
433 | - goto phy_err; |
434 | - } |
435 | - |
436 | - mutex_lock(&phydev->lock); |
437 | - if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) |
438 | - phydev->state = PHY_CHANGELINK; |
439 | - mutex_unlock(&phydev->lock); |
440 | - |
441 | - if (phy_interrupt_is_valid(phydev)) { |
442 | - atomic_dec(&phydev->irq_disable); |
443 | - enable_irq(phydev->irq); |
444 | - |
445 | - /* Reenable interrupts */ |
446 | - if (PHY_HALTED != phydev->state && |
447 | - phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) |
448 | - goto irq_enable_err; |
449 | - } |
450 | - |
451 | - /* reschedule state queue work to run as soon as possible */ |
452 | - phy_trigger_machine(phydev, true); |
453 | - return; |
454 | - |
455 | -ignore: |
456 | - atomic_dec(&phydev->irq_disable); |
457 | - enable_irq(phydev->irq); |
458 | - return; |
459 | - |
460 | -irq_enable_err: |
461 | - disable_irq(phydev->irq); |
462 | - atomic_inc(&phydev->irq_disable); |
463 | -phy_err: |
464 | - phy_error(phydev); |
465 | -} |
466 | - |
467 | -/** |
468 | - * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes |
469 | - * @work: work_struct that describes the work to be done |
470 | - */ |
471 | -void phy_change_work(struct work_struct *work) |
472 | -{ |
473 | - struct phy_device *phydev = |
474 | - container_of(work, struct phy_device, phy_queue); |
475 | - |
476 | - phy_change(phydev); |
477 | -} |
478 | - |
479 | /** |
480 | * phy_stop - Bring down the PHY link, and stop checking the status |
481 | * @phydev: target phy_device struct |
482 | diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c |
483 | index d312b314825e..a1e7ea4d4b16 100644 |
484 | --- a/drivers/net/phy/phy_device.c |
485 | +++ b/drivers/net/phy/phy_device.c |
486 | @@ -999,10 +999,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, |
487 | err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, |
488 | "attached_dev"); |
489 | if (!err) { |
490 | - err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, |
491 | - "phydev"); |
492 | - if (err) |
493 | - goto error; |
494 | + err = sysfs_create_link_nowarn(&dev->dev.kobj, |
495 | + &phydev->mdio.dev.kobj, |
496 | + "phydev"); |
497 | + if (err) { |
498 | + dev_err(&dev->dev, "could not add device link to %s err %d\n", |
499 | + kobject_name(&phydev->mdio.dev.kobj), |
500 | + err); |
501 | + /* non-fatal - some net drivers can use one netdevice |
502 | + * with more then one phy |
503 | + */ |
504 | + } |
505 | |
506 | phydev->sysfs_links = true; |
507 | } |
508 | diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c |
509 | index 38cd2e8fae23..34b24d7e1e2f 100644 |
510 | --- a/drivers/net/ppp/ppp_generic.c |
511 | +++ b/drivers/net/ppp/ppp_generic.c |
512 | @@ -256,7 +256,7 @@ struct ppp_net { |
513 | /* Prototypes. */ |
514 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, |
515 | struct file *file, unsigned int cmd, unsigned long arg); |
516 | -static void ppp_xmit_process(struct ppp *ppp); |
517 | +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb); |
518 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); |
519 | static void ppp_push(struct ppp *ppp); |
520 | static void ppp_channel_push(struct channel *pch); |
521 | @@ -512,13 +512,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, |
522 | goto out; |
523 | } |
524 | |
525 | - skb_queue_tail(&pf->xq, skb); |
526 | - |
527 | switch (pf->kind) { |
528 | case INTERFACE: |
529 | - ppp_xmit_process(PF_TO_PPP(pf)); |
530 | + ppp_xmit_process(PF_TO_PPP(pf), skb); |
531 | break; |
532 | case CHANNEL: |
533 | + skb_queue_tail(&pf->xq, skb); |
534 | ppp_channel_push(PF_TO_CHANNEL(pf)); |
535 | break; |
536 | } |
537 | @@ -1264,8 +1263,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) |
538 | put_unaligned_be16(proto, pp); |
539 | |
540 | skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); |
541 | - skb_queue_tail(&ppp->file.xq, skb); |
542 | - ppp_xmit_process(ppp); |
543 | + ppp_xmit_process(ppp, skb); |
544 | + |
545 | return NETDEV_TX_OK; |
546 | |
547 | outf: |
548 | @@ -1417,13 +1416,14 @@ static void ppp_setup(struct net_device *dev) |
549 | */ |
550 | |
551 | /* Called to do any work queued up on the transmit side that can now be done */ |
552 | -static void __ppp_xmit_process(struct ppp *ppp) |
553 | +static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) |
554 | { |
555 | - struct sk_buff *skb; |
556 | - |
557 | ppp_xmit_lock(ppp); |
558 | if (!ppp->closing) { |
559 | ppp_push(ppp); |
560 | + |
561 | + if (skb) |
562 | + skb_queue_tail(&ppp->file.xq, skb); |
563 | while (!ppp->xmit_pending && |
564 | (skb = skb_dequeue(&ppp->file.xq))) |
565 | ppp_send_frame(ppp, skb); |
566 | @@ -1437,7 +1437,7 @@ static void __ppp_xmit_process(struct ppp *ppp) |
567 | ppp_xmit_unlock(ppp); |
568 | } |
569 | |
570 | -static void ppp_xmit_process(struct ppp *ppp) |
571 | +static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) |
572 | { |
573 | local_bh_disable(); |
574 | |
575 | @@ -1445,7 +1445,7 @@ static void ppp_xmit_process(struct ppp *ppp) |
576 | goto err; |
577 | |
578 | (*this_cpu_ptr(ppp->xmit_recursion))++; |
579 | - __ppp_xmit_process(ppp); |
580 | + __ppp_xmit_process(ppp, skb); |
581 | (*this_cpu_ptr(ppp->xmit_recursion))--; |
582 | |
583 | local_bh_enable(); |
584 | @@ -1455,6 +1455,8 @@ static void ppp_xmit_process(struct ppp *ppp) |
585 | err: |
586 | local_bh_enable(); |
587 | |
588 | + kfree_skb(skb); |
589 | + |
590 | if (net_ratelimit()) |
591 | netdev_err(ppp->dev, "recursion detected\n"); |
592 | } |
593 | @@ -1939,7 +1941,7 @@ static void __ppp_channel_push(struct channel *pch) |
594 | if (skb_queue_empty(&pch->file.xq)) { |
595 | ppp = pch->ppp; |
596 | if (ppp) |
597 | - __ppp_xmit_process(ppp); |
598 | + __ppp_xmit_process(ppp, NULL); |
599 | } |
600 | } |
601 | |
602 | diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c |
603 | index ae53e899259f..23cd41c82210 100644 |
604 | --- a/drivers/net/team/team.c |
605 | +++ b/drivers/net/team/team.c |
606 | @@ -2394,7 +2394,7 @@ static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, |
607 | if (!nlh) { |
608 | err = __send_and_alloc_skb(&skb, team, portid, send_func); |
609 | if (err) |
610 | - goto errout; |
611 | + return err; |
612 | goto send_done; |
613 | } |
614 | |
615 | @@ -2680,7 +2680,7 @@ static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, |
616 | if (!nlh) { |
617 | err = __send_and_alloc_skb(&skb, team, portid, send_func); |
618 | if (err) |
619 | - goto errout; |
620 | + return err; |
621 | goto send_done; |
622 | } |
623 | |
624 | diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c |
625 | index 145b57762d8f..939b5b5e97ef 100644 |
626 | --- a/drivers/s390/net/qeth_core_main.c |
627 | +++ b/drivers/s390/net/qeth_core_main.c |
628 | @@ -526,8 +526,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) |
629 | queue == card->qdio.no_in_queues - 1; |
630 | } |
631 | |
632 | - |
633 | -static int qeth_issue_next_read(struct qeth_card *card) |
634 | +static int __qeth_issue_next_read(struct qeth_card *card) |
635 | { |
636 | int rc; |
637 | struct qeth_cmd_buffer *iob; |
638 | @@ -558,6 +557,17 @@ static int qeth_issue_next_read(struct qeth_card *card) |
639 | return rc; |
640 | } |
641 | |
642 | +static int qeth_issue_next_read(struct qeth_card *card) |
643 | +{ |
644 | + int ret; |
645 | + |
646 | + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); |
647 | + ret = __qeth_issue_next_read(card); |
648 | + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); |
649 | + |
650 | + return ret; |
651 | +} |
652 | + |
653 | static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) |
654 | { |
655 | struct qeth_reply *reply; |
656 | @@ -961,7 +971,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) |
657 | spin_lock_irqsave(&card->thread_mask_lock, flags); |
658 | card->thread_running_mask &= ~thread; |
659 | spin_unlock_irqrestore(&card->thread_mask_lock, flags); |
660 | - wake_up(&card->wait_q); |
661 | + wake_up_all(&card->wait_q); |
662 | } |
663 | EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); |
664 | |
665 | @@ -1165,6 +1175,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, |
666 | } |
667 | rc = qeth_get_problem(cdev, irb); |
668 | if (rc) { |
669 | + card->read_or_write_problem = 1; |
670 | qeth_clear_ipacmd_list(card); |
671 | qeth_schedule_recovery(card); |
672 | goto out; |
673 | @@ -1183,7 +1194,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, |
674 | return; |
675 | if (channel == &card->read && |
676 | channel->state == CH_STATE_UP) |
677 | - qeth_issue_next_read(card); |
678 | + __qeth_issue_next_read(card); |
679 | |
680 | iob = channel->iob; |
681 | index = channel->buf_no; |
682 | @@ -5061,8 +5072,6 @@ static void qeth_core_free_card(struct qeth_card *card) |
683 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
684 | qeth_clean_channel(&card->read); |
685 | qeth_clean_channel(&card->write); |
686 | - if (card->dev) |
687 | - free_netdev(card->dev); |
688 | qeth_free_qdio_buffers(card); |
689 | unregister_service_level(&card->qeth_service_level); |
690 | kfree(card); |
691 | diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c |
692 | index 5a973ebcb13c..521293b1f4fa 100644 |
693 | --- a/drivers/s390/net/qeth_l2_main.c |
694 | +++ b/drivers/s390/net/qeth_l2_main.c |
695 | @@ -935,8 +935,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) |
696 | qeth_l2_set_offline(cgdev); |
697 | |
698 | if (card->dev) { |
699 | - netif_napi_del(&card->napi); |
700 | unregister_netdev(card->dev); |
701 | + free_netdev(card->dev); |
702 | card->dev = NULL; |
703 | } |
704 | return; |
705 | diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c |
706 | index 96576e729222..1c62cbbaa66f 100644 |
707 | --- a/drivers/s390/net/qeth_l3_main.c |
708 | +++ b/drivers/s390/net/qeth_l3_main.c |
709 | @@ -3046,8 +3046,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) |
710 | qeth_l3_set_offline(cgdev); |
711 | |
712 | if (card->dev) { |
713 | - netif_napi_del(&card->napi); |
714 | unregister_netdev(card->dev); |
715 | + free_netdev(card->dev); |
716 | card->dev = NULL; |
717 | } |
718 | |
719 | diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c |
720 | index 18eefc3f1abe..0c6065dba48a 100644 |
721 | --- a/drivers/soc/fsl/qbman/qman.c |
722 | +++ b/drivers/soc/fsl/qbman/qman.c |
723 | @@ -2414,39 +2414,21 @@ struct cgr_comp { |
724 | struct completion completion; |
725 | }; |
726 | |
727 | -static int qman_delete_cgr_thread(void *p) |
728 | +static void qman_delete_cgr_smp_call(void *p) |
729 | { |
730 | - struct cgr_comp *cgr_comp = (struct cgr_comp *)p; |
731 | - int ret; |
732 | - |
733 | - ret = qman_delete_cgr(cgr_comp->cgr); |
734 | - complete(&cgr_comp->completion); |
735 | - |
736 | - return ret; |
737 | + qman_delete_cgr((struct qman_cgr *)p); |
738 | } |
739 | |
740 | void qman_delete_cgr_safe(struct qman_cgr *cgr) |
741 | { |
742 | - struct task_struct *thread; |
743 | - struct cgr_comp cgr_comp; |
744 | - |
745 | preempt_disable(); |
746 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { |
747 | - init_completion(&cgr_comp.completion); |
748 | - cgr_comp.cgr = cgr; |
749 | - thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, |
750 | - "cgr_del"); |
751 | - |
752 | - if (IS_ERR(thread)) |
753 | - goto out; |
754 | - |
755 | - kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); |
756 | - wake_up_process(thread); |
757 | - wait_for_completion(&cgr_comp.completion); |
758 | + smp_call_function_single(qman_cgr_cpus[cgr->cgrid], |
759 | + qman_delete_cgr_smp_call, cgr, true); |
760 | preempt_enable(); |
761 | return; |
762 | } |
763 | -out: |
764 | + |
765 | qman_delete_cgr(cgr); |
766 | preempt_enable(); |
767 | } |
768 | diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c |
769 | index aecb15f84557..808f018fa976 100644 |
770 | --- a/fs/sysfs/symlink.c |
771 | +++ b/fs/sysfs/symlink.c |
772 | @@ -107,6 +107,7 @@ int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, |
773 | { |
774 | return sysfs_do_create_link(kobj, target, name, 0); |
775 | } |
776 | +EXPORT_SYMBOL_GPL(sysfs_create_link_nowarn); |
777 | |
778 | /** |
779 | * sysfs_delete_link - remove symlink in object's directory. |
780 | diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h |
781 | index 1dff0a478b45..4e8f77504a57 100644 |
782 | --- a/include/linux/cgroup-defs.h |
783 | +++ b/include/linux/cgroup-defs.h |
784 | @@ -696,13 +696,13 @@ struct sock_cgroup_data { |
785 | * updaters and return part of the previous pointer as the prioidx or |
786 | * classid. Such races are short-lived and the result isn't critical. |
787 | */ |
788 | -static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd) |
789 | +static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) |
790 | { |
791 | /* fallback to 1 which is always the ID of the root cgroup */ |
792 | return (skcd->is_data & 1) ? skcd->prioidx : 1; |
793 | } |
794 | |
795 | -static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd) |
796 | +static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) |
797 | { |
798 | /* fallback to 0 which is the unconfigured default classid */ |
799 | return (skcd->is_data & 1) ? skcd->classid : 0; |
800 | diff --git a/include/linux/phy.h b/include/linux/phy.h |
801 | index 600076e1ce84..dca9e926b88f 100644 |
802 | --- a/include/linux/phy.h |
803 | +++ b/include/linux/phy.h |
804 | @@ -895,7 +895,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); |
805 | int phy_drivers_register(struct phy_driver *new_driver, int n, |
806 | struct module *owner); |
807 | void phy_state_machine(struct work_struct *work); |
808 | -void phy_change(struct phy_device *phydev); |
809 | void phy_change_work(struct work_struct *work); |
810 | void phy_mac_interrupt(struct phy_device *phydev, int new_link); |
811 | void phy_start_machine(struct phy_device *phydev); |
812 | diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h |
813 | index 361c08e35dbc..7fd514f36e74 100644 |
814 | --- a/include/linux/rhashtable.h |
815 | +++ b/include/linux/rhashtable.h |
816 | @@ -750,8 +750,10 @@ static inline void *__rhashtable_insert_fast( |
817 | if (!key || |
818 | (params.obj_cmpfn ? |
819 | params.obj_cmpfn(&arg, rht_obj(ht, head)) : |
820 | - rhashtable_compare(&arg, rht_obj(ht, head)))) |
821 | + rhashtable_compare(&arg, rht_obj(ht, head)))) { |
822 | + pprev = &head->next; |
823 | continue; |
824 | + } |
825 | |
826 | data = rht_obj(ht, head); |
827 | |
828 | diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h |
829 | index 6073e8bae025..f59acacaa265 100644 |
830 | --- a/include/net/sch_generic.h |
831 | +++ b/include/net/sch_generic.h |
832 | @@ -723,6 +723,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free) |
833 | *to_free = skb; |
834 | } |
835 | |
836 | +static inline void __qdisc_drop_all(struct sk_buff *skb, |
837 | + struct sk_buff **to_free) |
838 | +{ |
839 | + if (skb->prev) |
840 | + skb->prev->next = *to_free; |
841 | + else |
842 | + skb->next = *to_free; |
843 | + *to_free = skb; |
844 | +} |
845 | + |
846 | static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, |
847 | struct qdisc_skb_head *qh, |
848 | struct sk_buff **to_free) |
849 | @@ -843,6 +853,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, |
850 | return NET_XMIT_DROP; |
851 | } |
852 | |
853 | +static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch, |
854 | + struct sk_buff **to_free) |
855 | +{ |
856 | + __qdisc_drop_all(skb, to_free); |
857 | + qdisc_qstats_drop(sch); |
858 | + |
859 | + return NET_XMIT_DROP; |
860 | +} |
861 | + |
862 | /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how |
863 | long it will take to send a packet given its size. |
864 | */ |
865 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
866 | index 0a13574134b8..d323d4fa742c 100644 |
867 | --- a/include/net/tcp.h |
868 | +++ b/include/net/tcp.h |
869 | @@ -1600,6 +1600,11 @@ enum tcp_chrono { |
870 | void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type); |
871 | void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type); |
872 | |
873 | +static inline void tcp_init_send_head(struct sock *sk) |
874 | +{ |
875 | + sk->sk_send_head = NULL; |
876 | +} |
877 | + |
878 | /* write queue abstraction */ |
879 | static inline void tcp_write_queue_purge(struct sock *sk) |
880 | { |
881 | @@ -1610,6 +1615,7 @@ static inline void tcp_write_queue_purge(struct sock *sk) |
882 | sk_wmem_free_skb(sk, skb); |
883 | sk_mem_reclaim(sk); |
884 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
885 | + tcp_init_send_head(sk); |
886 | } |
887 | |
888 | static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) |
889 | @@ -1672,11 +1678,6 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli |
890 | tcp_sk(sk)->highest_sack = NULL; |
891 | } |
892 | |
893 | -static inline void tcp_init_send_head(struct sock *sk) |
894 | -{ |
895 | - sk->sk_send_head = NULL; |
896 | -} |
897 | - |
898 | static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) |
899 | { |
900 | __skb_queue_tail(&sk->sk_write_queue, skb); |
901 | diff --git a/lib/rhashtable.c b/lib/rhashtable.c |
902 | index ddd7dde87c3c..b734ce731a7a 100644 |
903 | --- a/lib/rhashtable.c |
904 | +++ b/lib/rhashtable.c |
905 | @@ -537,8 +537,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, |
906 | if (!key || |
907 | (ht->p.obj_cmpfn ? |
908 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : |
909 | - rhashtable_compare(&arg, rht_obj(ht, head)))) |
910 | + rhashtable_compare(&arg, rht_obj(ht, head)))) { |
911 | + pprev = &head->next; |
912 | continue; |
913 | + } |
914 | |
915 | if (!ht->rhlist) |
916 | return rht_obj(ht, head); |
917 | diff --git a/net/core/dev.c b/net/core/dev.c |
918 | index c75ef9d8105a..387af3415385 100644 |
919 | --- a/net/core/dev.c |
920 | +++ b/net/core/dev.c |
921 | @@ -3224,15 +3224,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, |
922 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
923 | static void skb_update_prio(struct sk_buff *skb) |
924 | { |
925 | - struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); |
926 | + const struct netprio_map *map; |
927 | + const struct sock *sk; |
928 | + unsigned int prioidx; |
929 | |
930 | - if (!skb->priority && skb->sk && map) { |
931 | - unsigned int prioidx = |
932 | - sock_cgroup_prioidx(&skb->sk->sk_cgrp_data); |
933 | + if (skb->priority) |
934 | + return; |
935 | + map = rcu_dereference_bh(skb->dev->priomap); |
936 | + if (!map) |
937 | + return; |
938 | + sk = skb_to_full_sk(skb); |
939 | + if (!sk) |
940 | + return; |
941 | |
942 | - if (prioidx < map->priomap_len) |
943 | - skb->priority = map->priomap[prioidx]; |
944 | - } |
945 | + prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data); |
946 | + |
947 | + if (prioidx < map->priomap_len) |
948 | + skb->priority = map->priomap[prioidx]; |
949 | } |
950 | #else |
951 | #define skb_update_prio(skb) |
952 | diff --git a/net/core/devlink.c b/net/core/devlink.c |
953 | index 7d430c1d9c3e..5ba973311025 100644 |
954 | --- a/net/core/devlink.c |
955 | +++ b/net/core/devlink.c |
956 | @@ -1776,7 +1776,7 @@ static int devlink_dpipe_tables_fill(struct genl_info *info, |
957 | if (!nlh) { |
958 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); |
959 | if (err) |
960 | - goto err_skb_send_alloc; |
961 | + return err; |
962 | goto send_done; |
963 | } |
964 | |
965 | @@ -1785,7 +1785,6 @@ static int devlink_dpipe_tables_fill(struct genl_info *info, |
966 | nla_put_failure: |
967 | err = -EMSGSIZE; |
968 | err_table_put: |
969 | -err_skb_send_alloc: |
970 | genlmsg_cancel(skb, hdr); |
971 | nlmsg_free(skb); |
972 | return err; |
973 | @@ -2051,7 +2050,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info, |
974 | table->counters_enabled, |
975 | &dump_ctx); |
976 | if (err) |
977 | - goto err_entries_dump; |
978 | + return err; |
979 | |
980 | send_done: |
981 | nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, |
982 | @@ -2059,16 +2058,10 @@ static int devlink_dpipe_entries_fill(struct genl_info *info, |
983 | if (!nlh) { |
984 | err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); |
985 | if (err) |
986 | - goto err_skb_send_alloc; |
987 | + return err; |
988 | goto send_done; |
989 | } |
990 | return genlmsg_reply(dump_ctx.skb, info); |
991 | - |
992 | -err_entries_dump: |
993 | -err_skb_send_alloc: |
994 | - genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr); |
995 | - nlmsg_free(dump_ctx.skb); |
996 | - return err; |
997 | } |
998 | |
999 | static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, |
1000 | @@ -2207,7 +2200,7 @@ static int devlink_dpipe_headers_fill(struct genl_info *info, |
1001 | if (!nlh) { |
1002 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); |
1003 | if (err) |
1004 | - goto err_skb_send_alloc; |
1005 | + return err; |
1006 | goto send_done; |
1007 | } |
1008 | return genlmsg_reply(skb, info); |
1009 | @@ -2215,7 +2208,6 @@ static int devlink_dpipe_headers_fill(struct genl_info *info, |
1010 | nla_put_failure: |
1011 | err = -EMSGSIZE; |
1012 | err_table_put: |
1013 | -err_skb_send_alloc: |
1014 | genlmsg_cancel(skb, hdr); |
1015 | nlmsg_free(skb); |
1016 | return err; |
1017 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
1018 | index cc811add68c6..564beb7e6d1c 100644 |
1019 | --- a/net/core/skbuff.c |
1020 | +++ b/net/core/skbuff.c |
1021 | @@ -4171,7 +4171,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) |
1022 | |
1023 | skb_queue_tail(&sk->sk_error_queue, skb); |
1024 | if (!sock_flag(sk, SOCK_DEAD)) |
1025 | - sk->sk_data_ready(sk); |
1026 | + sk->sk_error_report(sk); |
1027 | return 0; |
1028 | } |
1029 | EXPORT_SYMBOL(sock_queue_err_skb); |
1030 | diff --git a/net/dccp/proto.c b/net/dccp/proto.c |
1031 | index 9d43c1f40274..ff3b058cf58c 100644 |
1032 | --- a/net/dccp/proto.c |
1033 | +++ b/net/dccp/proto.c |
1034 | @@ -789,6 +789,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
1035 | if (skb == NULL) |
1036 | goto out_release; |
1037 | |
1038 | + if (sk->sk_state == DCCP_CLOSED) { |
1039 | + rc = -ENOTCONN; |
1040 | + goto out_discard; |
1041 | + } |
1042 | + |
1043 | skb_reserve(skb, sk->sk_prot->max_header); |
1044 | rc = memcpy_from_msg(skb_put(skb, len), msg, len); |
1045 | if (rc != 0) |
1046 | diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c |
1047 | index 974765b7d92a..e9f0489e4229 100644 |
1048 | --- a/net/ieee802154/6lowpan/core.c |
1049 | +++ b/net/ieee802154/6lowpan/core.c |
1050 | @@ -206,9 +206,13 @@ static inline void lowpan_netlink_fini(void) |
1051 | static int lowpan_device_event(struct notifier_block *unused, |
1052 | unsigned long event, void *ptr) |
1053 | { |
1054 | - struct net_device *wdev = netdev_notifier_info_to_dev(ptr); |
1055 | + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); |
1056 | + struct wpan_dev *wpan_dev; |
1057 | |
1058 | - if (wdev->type != ARPHRD_IEEE802154) |
1059 | + if (ndev->type != ARPHRD_IEEE802154) |
1060 | + return NOTIFY_DONE; |
1061 | + wpan_dev = ndev->ieee802154_ptr; |
1062 | + if (!wpan_dev) |
1063 | return NOTIFY_DONE; |
1064 | |
1065 | switch (event) { |
1066 | @@ -217,8 +221,8 @@ static int lowpan_device_event(struct notifier_block *unused, |
1067 | * also delete possible lowpan interfaces which belongs |
1068 | * to the wpan interface. |
1069 | */ |
1070 | - if (wdev->ieee802154_ptr->lowpan_dev) |
1071 | - lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL); |
1072 | + if (wpan_dev->lowpan_dev) |
1073 | + lowpan_dellink(wpan_dev->lowpan_dev, NULL); |
1074 | break; |
1075 | default: |
1076 | return NOTIFY_DONE; |
1077 | diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c |
1078 | index af74d0433453..e691705f0a85 100644 |
1079 | --- a/net/ipv4/inet_fragment.c |
1080 | +++ b/net/ipv4/inet_fragment.c |
1081 | @@ -119,6 +119,9 @@ static void inet_frag_secret_rebuild(struct inet_frags *f) |
1082 | |
1083 | static bool inet_fragq_should_evict(const struct inet_frag_queue *q) |
1084 | { |
1085 | + if (!hlist_unhashed(&q->list_evictor)) |
1086 | + return false; |
1087 | + |
1088 | return q->net->low_thresh == 0 || |
1089 | frag_mem_limit(q->net) >= q->net->low_thresh; |
1090 | } |
1091 | diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
1092 | index f56aab54e0c8..1e70ed5244ea 100644 |
1093 | --- a/net/ipv4/ip_sockglue.c |
1094 | +++ b/net/ipv4/ip_sockglue.c |
1095 | @@ -258,7 +258,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, |
1096 | src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); |
1097 | if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) |
1098 | return -EINVAL; |
1099 | - ipc->oif = src_info->ipi6_ifindex; |
1100 | + if (src_info->ipi6_ifindex) |
1101 | + ipc->oif = src_info->ipi6_ifindex; |
1102 | ipc->addr = src_info->ipi6_addr.s6_addr32[3]; |
1103 | continue; |
1104 | } |
1105 | @@ -288,7 +289,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, |
1106 | if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) |
1107 | return -EINVAL; |
1108 | info = (struct in_pktinfo *)CMSG_DATA(cmsg); |
1109 | - ipc->oif = info->ipi_ifindex; |
1110 | + if (info->ipi_ifindex) |
1111 | + ipc->oif = info->ipi_ifindex; |
1112 | ipc->addr = info->ipi_spec_dst.s_addr; |
1113 | break; |
1114 | } |
1115 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
1116 | index fe11128d7df4..38b9a6276a9d 100644 |
1117 | --- a/net/ipv4/tcp.c |
1118 | +++ b/net/ipv4/tcp.c |
1119 | @@ -3445,6 +3445,7 @@ int tcp_abort(struct sock *sk, int err) |
1120 | |
1121 | bh_unlock_sock(sk); |
1122 | local_bh_enable(); |
1123 | + tcp_write_queue_purge(sk); |
1124 | release_sock(sk); |
1125 | return 0; |
1126 | } |
1127 | diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c |
1128 | index 14ac7df95380..a845b7692c1b 100644 |
1129 | --- a/net/ipv4/tcp_timer.c |
1130 | +++ b/net/ipv4/tcp_timer.c |
1131 | @@ -36,6 +36,7 @@ static void tcp_write_err(struct sock *sk) |
1132 | sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; |
1133 | sk->sk_error_report(sk); |
1134 | |
1135 | + tcp_write_queue_purge(sk); |
1136 | tcp_done(sk); |
1137 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
1138 | } |
1139 | diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
1140 | index a1f918713006..287112da3c06 100644 |
1141 | --- a/net/ipv6/datagram.c |
1142 | +++ b/net/ipv6/datagram.c |
1143 | @@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, |
1144 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
1145 | struct inet_sock *inet = inet_sk(sk); |
1146 | struct ipv6_pinfo *np = inet6_sk(sk); |
1147 | - struct in6_addr *daddr; |
1148 | + struct in6_addr *daddr, old_daddr; |
1149 | + __be32 fl6_flowlabel = 0; |
1150 | + __be32 old_fl6_flowlabel; |
1151 | + __be16 old_dport; |
1152 | int addr_type; |
1153 | int err; |
1154 | - __be32 fl6_flowlabel = 0; |
1155 | |
1156 | if (usin->sin6_family == AF_INET) { |
1157 | if (__ipv6_only_sock(sk)) |
1158 | @@ -239,9 +241,13 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, |
1159 | } |
1160 | } |
1161 | |
1162 | + /* save the current peer information before updating it */ |
1163 | + old_daddr = sk->sk_v6_daddr; |
1164 | + old_fl6_flowlabel = np->flow_label; |
1165 | + old_dport = inet->inet_dport; |
1166 | + |
1167 | sk->sk_v6_daddr = *daddr; |
1168 | np->flow_label = fl6_flowlabel; |
1169 | - |
1170 | inet->inet_dport = usin->sin6_port; |
1171 | |
1172 | /* |
1173 | @@ -251,11 +257,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, |
1174 | |
1175 | err = ip6_datagram_dst_update(sk, true); |
1176 | if (err) { |
1177 | - /* Reset daddr and dport so that udp_v6_early_demux() |
1178 | - * fails to find this socket |
1179 | + /* Restore the socket peer info, to keep it consistent with |
1180 | + * the old socket state |
1181 | */ |
1182 | - memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); |
1183 | - inet->inet_dport = 0; |
1184 | + sk->sk_v6_daddr = old_daddr; |
1185 | + np->flow_label = old_fl6_flowlabel; |
1186 | + inet->inet_dport = old_dport; |
1187 | goto out; |
1188 | } |
1189 | |
1190 | diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c |
1191 | index 2a937c8d19e9..dd28005efb97 100644 |
1192 | --- a/net/ipv6/ndisc.c |
1193 | +++ b/net/ipv6/ndisc.c |
1194 | @@ -1546,7 +1546,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb, |
1195 | *(opt++) = (rd_len >> 3); |
1196 | opt += 6; |
1197 | |
1198 | - memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8); |
1199 | + skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt, |
1200 | + rd_len - 8); |
1201 | } |
1202 | |
1203 | void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) |
1204 | diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c |
1205 | index bd6cc688bd19..7a78dcfda68a 100644 |
1206 | --- a/net/ipv6/seg6_iptunnel.c |
1207 | +++ b/net/ipv6/seg6_iptunnel.c |
1208 | @@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev, |
1209 | /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ |
1210 | int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) |
1211 | { |
1212 | - struct net *net = dev_net(skb_dst(skb)->dev); |
1213 | + struct dst_entry *dst = skb_dst(skb); |
1214 | + struct net *net = dev_net(dst->dev); |
1215 | struct ipv6hdr *hdr, *inner_hdr; |
1216 | struct ipv6_sr_hdr *isrh; |
1217 | int hdrlen, tot_len, err; |
1218 | @@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) |
1219 | isrh->nexthdr = proto; |
1220 | |
1221 | hdr->daddr = isrh->segments[isrh->first_segment]; |
1222 | - set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr); |
1223 | + set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr); |
1224 | |
1225 | #ifdef CONFIG_IPV6_SEG6_HMAC |
1226 | if (sr_has_hmac(isrh)) { |
1227 | @@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla, |
1228 | |
1229 | slwt = seg6_lwt_lwtunnel(newts); |
1230 | |
1231 | - err = dst_cache_init(&slwt->cache, GFP_KERNEL); |
1232 | + err = dst_cache_init(&slwt->cache, GFP_ATOMIC); |
1233 | if (err) { |
1234 | kfree(newts); |
1235 | return err; |
1236 | diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c |
1237 | index 148533169b1d..ca98276c2709 100644 |
1238 | --- a/net/iucv/af_iucv.c |
1239 | +++ b/net/iucv/af_iucv.c |
1240 | @@ -2433,9 +2433,11 @@ static int afiucv_iucv_init(void) |
1241 | af_iucv_dev->driver = &af_iucv_driver; |
1242 | err = device_register(af_iucv_dev); |
1243 | if (err) |
1244 | - goto out_driver; |
1245 | + goto out_iucv_dev; |
1246 | return 0; |
1247 | |
1248 | +out_iucv_dev: |
1249 | + put_device(af_iucv_dev); |
1250 | out_driver: |
1251 | driver_unregister(&af_iucv_driver); |
1252 | out_iucv: |
1253 | diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c |
1254 | index 58d53b907d53..9db49805b7be 100644 |
1255 | --- a/net/kcm/kcmsock.c |
1256 | +++ b/net/kcm/kcmsock.c |
1257 | @@ -1381,24 +1381,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock, |
1258 | .parse_msg = kcm_parse_func_strparser, |
1259 | .read_sock_done = kcm_read_sock_done, |
1260 | }; |
1261 | - int err; |
1262 | + int err = 0; |
1263 | |
1264 | csk = csock->sk; |
1265 | if (!csk) |
1266 | return -EINVAL; |
1267 | |
1268 | + lock_sock(csk); |
1269 | + |
1270 | /* Only allow TCP sockets to be attached for now */ |
1271 | if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || |
1272 | - csk->sk_protocol != IPPROTO_TCP) |
1273 | - return -EOPNOTSUPP; |
1274 | + csk->sk_protocol != IPPROTO_TCP) { |
1275 | + err = -EOPNOTSUPP; |
1276 | + goto out; |
1277 | + } |
1278 | |
1279 | /* Don't allow listeners or closed sockets */ |
1280 | - if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) |
1281 | - return -EOPNOTSUPP; |
1282 | + if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) { |
1283 | + err = -EOPNOTSUPP; |
1284 | + goto out; |
1285 | + } |
1286 | |
1287 | psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); |
1288 | - if (!psock) |
1289 | - return -ENOMEM; |
1290 | + if (!psock) { |
1291 | + err = -ENOMEM; |
1292 | + goto out; |
1293 | + } |
1294 | |
1295 | psock->mux = mux; |
1296 | psock->sk = csk; |
1297 | @@ -1407,7 +1415,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock, |
1298 | err = strp_init(&psock->strp, csk, &cb); |
1299 | if (err) { |
1300 | kmem_cache_free(kcm_psockp, psock); |
1301 | - return err; |
1302 | + goto out; |
1303 | } |
1304 | |
1305 | write_lock_bh(&csk->sk_callback_lock); |
1306 | @@ -1419,7 +1427,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock, |
1307 | write_unlock_bh(&csk->sk_callback_lock); |
1308 | strp_done(&psock->strp); |
1309 | kmem_cache_free(kcm_psockp, psock); |
1310 | - return -EALREADY; |
1311 | + err = -EALREADY; |
1312 | + goto out; |
1313 | } |
1314 | |
1315 | psock->save_data_ready = csk->sk_data_ready; |
1316 | @@ -1455,7 +1464,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock, |
1317 | /* Schedule RX work in case there are already bytes queued */ |
1318 | strp_check_rcv(&psock->strp); |
1319 | |
1320 | - return 0; |
1321 | +out: |
1322 | + release_sock(csk); |
1323 | + |
1324 | + return err; |
1325 | } |
1326 | |
1327 | static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info) |
1328 | @@ -1507,6 +1519,7 @@ static void kcm_unattach(struct kcm_psock *psock) |
1329 | |
1330 | if (WARN_ON(psock->rx_kcm)) { |
1331 | write_unlock_bh(&csk->sk_callback_lock); |
1332 | + release_sock(csk); |
1333 | return; |
1334 | } |
1335 | |
1336 | diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c |
1337 | index af22aa8ae35b..490d7360222e 100644 |
1338 | --- a/net/l2tp/l2tp_core.c |
1339 | +++ b/net/l2tp/l2tp_core.c |
1340 | @@ -1562,9 +1562,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 |
1341 | encap = cfg->encap; |
1342 | |
1343 | /* Quick sanity checks */ |
1344 | + err = -EPROTONOSUPPORT; |
1345 | + if (sk->sk_type != SOCK_DGRAM) { |
1346 | + pr_debug("tunl %hu: fd %d wrong socket type\n", |
1347 | + tunnel_id, fd); |
1348 | + goto err; |
1349 | + } |
1350 | switch (encap) { |
1351 | case L2TP_ENCAPTYPE_UDP: |
1352 | - err = -EPROTONOSUPPORT; |
1353 | if (sk->sk_protocol != IPPROTO_UDP) { |
1354 | pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", |
1355 | tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); |
1356 | @@ -1572,7 +1577,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 |
1357 | } |
1358 | break; |
1359 | case L2TP_ENCAPTYPE_IP: |
1360 | - err = -EPROTONOSUPPORT; |
1361 | if (sk->sk_protocol != IPPROTO_L2TP) { |
1362 | pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", |
1363 | tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); |
1364 | diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c |
1365 | index 6f02499ef007..b9ce82c9440f 100644 |
1366 | --- a/net/netlink/genetlink.c |
1367 | +++ b/net/netlink/genetlink.c |
1368 | @@ -1106,7 +1106,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, |
1369 | if (!err) |
1370 | delivered = true; |
1371 | else if (err != -ESRCH) |
1372 | - goto error; |
1373 | + return err; |
1374 | return delivered ? 0 : -ESRCH; |
1375 | error: |
1376 | kfree_skb(skb); |
1377 | diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c |
1378 | index 30c96274c638..22bf1a376b91 100644 |
1379 | --- a/net/sched/act_tunnel_key.c |
1380 | +++ b/net/sched/act_tunnel_key.c |
1381 | @@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, |
1382 | metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; |
1383 | break; |
1384 | default: |
1385 | + ret = -EINVAL; |
1386 | goto err_out; |
1387 | } |
1388 | |
1389 | diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c |
1390 | index b1266e75ca43..8c8df75dbead 100644 |
1391 | --- a/net/sched/sch_netem.c |
1392 | +++ b/net/sched/sch_netem.c |
1393 | @@ -513,7 +513,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
1394 | } |
1395 | |
1396 | if (unlikely(sch->q.qlen >= sch->limit)) |
1397 | - return qdisc_drop(skb, sch, to_free); |
1398 | + return qdisc_drop_all(skb, sch, to_free); |
1399 | |
1400 | qdisc_qstats_backlog_inc(sch, skb); |
1401 |