Contents of /trunk/kernel-alx/patches-4.19/0124-4.19.25-all-fixes.patch
Parent Directory | Revision Log
Revision 3403 -
(show annotations)
(download)
Fri Aug 2 11:47:36 2019 UTC (5 years, 1 month ago) by niro
File size: 29306 byte(s)
Fri Aug 2 11:47:36 2019 UTC (5 years, 1 month ago) by niro
File size: 29306 byte(s)
-linux-4.19.25
1 | diff --git a/Makefile b/Makefile |
2 | index 370ad0d34076..2caa131ff306 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 19 |
9 | -SUBLEVEL = 24 |
10 | +SUBLEVEL = 25 |
11 | EXTRAVERSION = |
12 | NAME = "People's Front" |
13 | |
14 | diff --git a/crypto/af_alg.c b/crypto/af_alg.c |
15 | index 17eb09d222ff..ec78a04eb136 100644 |
16 | --- a/crypto/af_alg.c |
17 | +++ b/crypto/af_alg.c |
18 | @@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) |
19 | |
20 | int af_alg_release(struct socket *sock) |
21 | { |
22 | - if (sock->sk) |
23 | + if (sock->sk) { |
24 | sock_put(sock->sk); |
25 | + sock->sk = NULL; |
26 | + } |
27 | return 0; |
28 | } |
29 | EXPORT_SYMBOL_GPL(af_alg_release); |
30 | diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c |
31 | index 0e30fa00204c..f9b8e3e23a8e 100644 |
32 | --- a/drivers/hwmon/lm80.c |
33 | +++ b/drivers/hwmon/lm80.c |
34 | @@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, |
35 | } |
36 | |
37 | rv = lm80_read_value(client, LM80_REG_FANDIV); |
38 | - if (rv < 0) |
39 | + if (rv < 0) { |
40 | + mutex_unlock(&data->update_lock); |
41 | return rv; |
42 | + } |
43 | reg = (rv & ~(3 << (2 * (nr + 1)))) |
44 | | (data->fan_div[nr] << (2 * (nr + 1))); |
45 | lm80_write_value(client, LM80_REG_FANDIV, reg); |
46 | diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c |
47 | index 211ed6cffd10..578978711887 100644 |
48 | --- a/drivers/isdn/mISDN/timerdev.c |
49 | +++ b/drivers/isdn/mISDN/timerdev.c |
50 | @@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t) |
51 | spin_lock_irqsave(&timer->dev->lock, flags); |
52 | if (timer->id >= 0) |
53 | list_move_tail(&timer->list, &timer->dev->expired); |
54 | - spin_unlock_irqrestore(&timer->dev->lock, flags); |
55 | wake_up_interruptible(&timer->dev->wait); |
56 | + spin_unlock_irqrestore(&timer->dev->lock, flags); |
57 | } |
58 | |
59 | static int |
60 | diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c |
61 | index ef9deaa361c7..ddd98cdd33bc 100644 |
62 | --- a/drivers/mmc/host/meson-gx-mmc.c |
63 | +++ b/drivers/mmc/host/meson-gx-mmc.c |
64 | @@ -1286,7 +1286,8 @@ static int meson_mmc_probe(struct platform_device *pdev) |
65 | host->regs + SD_EMMC_IRQ_EN); |
66 | |
67 | ret = request_threaded_irq(host->irq, meson_mmc_irq, |
68 | - meson_mmc_irq_thread, IRQF_SHARED, NULL, host); |
69 | + meson_mmc_irq_thread, IRQF_SHARED, |
70 | + dev_name(&pdev->dev), host); |
71 | if (ret) |
72 | goto err_init_clk; |
73 | |
74 | diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c |
75 | index 258918d8a416..9f697a5b8e3d 100644 |
76 | --- a/drivers/net/dsa/mv88e6xxx/chip.c |
77 | +++ b/drivers/net/dsa/mv88e6xxx/chip.c |
78 | @@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) |
79 | unsigned int sub_irq; |
80 | unsigned int n; |
81 | u16 reg; |
82 | + u16 ctl1; |
83 | int err; |
84 | |
85 | mutex_lock(&chip->reg_lock); |
86 | @@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) |
87 | if (err) |
88 | goto out; |
89 | |
90 | - for (n = 0; n < chip->g1_irq.nirqs; ++n) { |
91 | - if (reg & (1 << n)) { |
92 | - sub_irq = irq_find_mapping(chip->g1_irq.domain, n); |
93 | - handle_nested_irq(sub_irq); |
94 | - ++nhandled; |
95 | + do { |
96 | + for (n = 0; n < chip->g1_irq.nirqs; ++n) { |
97 | + if (reg & (1 << n)) { |
98 | + sub_irq = irq_find_mapping(chip->g1_irq.domain, |
99 | + n); |
100 | + handle_nested_irq(sub_irq); |
101 | + ++nhandled; |
102 | + } |
103 | } |
104 | - } |
105 | + |
106 | + mutex_lock(&chip->reg_lock); |
107 | + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); |
108 | + if (err) |
109 | + goto unlock; |
110 | + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); |
111 | +unlock: |
112 | + mutex_unlock(&chip->reg_lock); |
113 | + if (err) |
114 | + goto out; |
115 | + ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); |
116 | + } while (reg & ctl1); |
117 | + |
118 | out: |
119 | return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); |
120 | } |
121 | diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c |
122 | index 697d9b374f5e..ae2f35039343 100644 |
123 | --- a/drivers/net/ethernet/marvell/sky2.c |
124 | +++ b/drivers/net/ethernet/marvell/sky2.c |
125 | @@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
126 | INIT_WORK(&hw->restart_work, sky2_restart); |
127 | |
128 | pci_set_drvdata(pdev, hw); |
129 | - pdev->d3_delay = 200; |
130 | + pdev->d3_delay = 300; |
131 | |
132 | return 0; |
133 | |
134 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c |
135 | index d64cd8d44d83..a12b5710891e 100644 |
136 | --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c |
137 | +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c |
138 | @@ -841,8 +841,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, |
139 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { |
140 | bool configure = false; |
141 | bool pfc = false; |
142 | + u16 thres_cells; |
143 | + u16 delay_cells; |
144 | bool lossy; |
145 | - u16 thres; |
146 | |
147 | for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { |
148 | if (prio_tc[j] == i) { |
149 | @@ -856,10 +857,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, |
150 | continue; |
151 | |
152 | lossy = !(pfc || pause_en); |
153 | - thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); |
154 | - delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, |
155 | - pause_en); |
156 | - mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); |
157 | + thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); |
158 | + delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, |
159 | + pfc, pause_en); |
160 | + mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells, |
161 | + thres_cells, lossy); |
162 | } |
163 | |
164 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); |
165 | diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
166 | index 20299f6f65fc..736e29635b77 100644 |
167 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
168 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
169 | @@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) |
170 | static int dwmac4_rx_check_timestamp(void *desc) |
171 | { |
172 | struct dma_desc *p = (struct dma_desc *)desc; |
173 | + unsigned int rdes0 = le32_to_cpu(p->des0); |
174 | + unsigned int rdes1 = le32_to_cpu(p->des1); |
175 | + unsigned int rdes3 = le32_to_cpu(p->des3); |
176 | u32 own, ctxt; |
177 | int ret = 1; |
178 | |
179 | - own = p->des3 & RDES3_OWN; |
180 | - ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) |
181 | + own = rdes3 & RDES3_OWN; |
182 | + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) |
183 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); |
184 | |
185 | if (likely(!own && ctxt)) { |
186 | - if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) |
187 | + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) |
188 | /* Corrupted value */ |
189 | ret = -EINVAL; |
190 | else |
191 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
192 | index 5710864fa809..9caf79ba5ef1 100644 |
193 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
194 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
195 | @@ -692,25 +692,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, |
196 | struct ethtool_eee *edata) |
197 | { |
198 | struct stmmac_priv *priv = netdev_priv(dev); |
199 | + int ret; |
200 | |
201 | - priv->eee_enabled = edata->eee_enabled; |
202 | - |
203 | - if (!priv->eee_enabled) |
204 | + if (!edata->eee_enabled) { |
205 | stmmac_disable_eee_mode(priv); |
206 | - else { |
207 | + } else { |
208 | /* We are asking for enabling the EEE but it is safe |
209 | * to verify all by invoking the eee_init function. |
210 | * In case of failure it will return an error. |
211 | */ |
212 | - priv->eee_enabled = stmmac_eee_init(priv); |
213 | - if (!priv->eee_enabled) |
214 | + edata->eee_enabled = stmmac_eee_init(priv); |
215 | + if (!edata->eee_enabled) |
216 | return -EOPNOTSUPP; |
217 | - |
218 | - /* Do not change tx_lpi_timer in case of failure */ |
219 | - priv->tx_lpi_timer = edata->tx_lpi_timer; |
220 | } |
221 | |
222 | - return phy_ethtool_set_eee(dev->phydev, edata); |
223 | + ret = phy_ethtool_set_eee(dev->phydev, edata); |
224 | + if (ret) |
225 | + return ret; |
226 | + |
227 | + priv->eee_enabled = edata->eee_enabled; |
228 | + priv->tx_lpi_timer = edata->tx_lpi_timer; |
229 | + return 0; |
230 | } |
231 | |
232 | static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) |
233 | diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c |
234 | index 74a8782313cf..bd6084e315de 100644 |
235 | --- a/drivers/net/phy/xilinx_gmii2rgmii.c |
236 | +++ b/drivers/net/phy/xilinx_gmii2rgmii.c |
237 | @@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) |
238 | u16 val = 0; |
239 | int err; |
240 | |
241 | - err = priv->phy_drv->read_status(phydev); |
242 | + if (priv->phy_drv->read_status) |
243 | + err = priv->phy_drv->read_status(phydev); |
244 | + else |
245 | + err = genphy_read_status(phydev); |
246 | if (err < 0) |
247 | return err; |
248 | |
249 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
250 | index 27bd586b94b0..9fc9aed6ca9a 100644 |
251 | --- a/drivers/net/vxlan.c |
252 | +++ b/drivers/net/vxlan.c |
253 | @@ -2003,7 +2003,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, |
254 | struct pcpu_sw_netstats *tx_stats, *rx_stats; |
255 | union vxlan_addr loopback; |
256 | union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; |
257 | - struct net_device *dev = skb->dev; |
258 | + struct net_device *dev; |
259 | int len = skb->len; |
260 | |
261 | tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); |
262 | @@ -2023,9 +2023,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, |
263 | #endif |
264 | } |
265 | |
266 | + rcu_read_lock(); |
267 | + dev = skb->dev; |
268 | + if (unlikely(!(dev->flags & IFF_UP))) { |
269 | + kfree_skb(skb); |
270 | + goto drop; |
271 | + } |
272 | + |
273 | if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) |
274 | - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, |
275 | - vni); |
276 | + vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); |
277 | |
278 | u64_stats_update_begin(&tx_stats->syncp); |
279 | tx_stats->tx_packets++; |
280 | @@ -2038,8 +2044,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, |
281 | rx_stats->rx_bytes += len; |
282 | u64_stats_update_end(&rx_stats->syncp); |
283 | } else { |
284 | +drop: |
285 | dev->stats.rx_dropped++; |
286 | } |
287 | + rcu_read_unlock(); |
288 | } |
289 | |
290 | static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, |
291 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
292 | index afc4680c584f..7eb1549cea81 100644 |
293 | --- a/drivers/pci/pci.c |
294 | +++ b/drivers/pci/pci.c |
295 | @@ -6113,7 +6113,8 @@ static int __init pci_setup(char *str) |
296 | } else if (!strncmp(str, "pcie_scan_all", 13)) { |
297 | pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); |
298 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { |
299 | - disable_acs_redir_param = str + 18; |
300 | + disable_acs_redir_param = |
301 | + kstrdup(str + 18, GFP_KERNEL); |
302 | } else { |
303 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
304 | str); |
305 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
306 | index 3f7aad45d215..f1b730b77a31 100644 |
307 | --- a/drivers/target/target_core_transport.c |
308 | +++ b/drivers/target/target_core_transport.c |
309 | @@ -266,7 +266,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) |
310 | } |
311 | ret = transport_init_session(se_sess); |
312 | if (ret < 0) { |
313 | - kfree(se_sess); |
314 | + kmem_cache_free(se_sess_cache, se_sess); |
315 | return ERR_PTR(ret); |
316 | } |
317 | se_sess->sup_prot_ops = sup_prot_ops; |
318 | diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c |
319 | index cf82e7266397..5eaeca805c95 100644 |
320 | --- a/drivers/vhost/vhost.c |
321 | +++ b/drivers/vhost/vhost.c |
322 | @@ -1784,7 +1784,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) |
323 | |
324 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, |
325 | len, iov, 64, VHOST_ACCESS_WO); |
326 | - if (ret) |
327 | + if (ret < 0) |
328 | return ret; |
329 | |
330 | for (i = 0; i < ret; i++) { |
331 | diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h |
332 | index 2b2a6dce1630..4c76fe2c8488 100644 |
333 | --- a/include/linux/netdev_features.h |
334 | +++ b/include/linux/netdev_features.h |
335 | @@ -11,6 +11,8 @@ |
336 | #define _LINUX_NETDEV_FEATURES_H |
337 | |
338 | #include <linux/types.h> |
339 | +#include <linux/bitops.h> |
340 | +#include <asm/byteorder.h> |
341 | |
342 | typedef u64 netdev_features_t; |
343 | |
344 | @@ -154,8 +156,26 @@ enum { |
345 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) |
346 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) |
347 | |
348 | -#define for_each_netdev_feature(mask_addr, bit) \ |
349 | - for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) |
350 | +/* Finds the next feature with the highest number of the range of start till 0. |
351 | + */ |
352 | +static inline int find_next_netdev_feature(u64 feature, unsigned long start) |
353 | +{ |
354 | + /* like BITMAP_LAST_WORD_MASK() for u64 |
355 | + * this sets the most significant 64 - start to 0. |
356 | + */ |
357 | + feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); |
358 | + |
359 | + return fls64(feature) - 1; |
360 | +} |
361 | + |
362 | +/* This goes for the MSB to the LSB through the set feature bits, |
363 | + * mask_addr should be a u64 and bit an int |
364 | + */ |
365 | +#define for_each_netdev_feature(mask_addr, bit) \ |
366 | + for ((bit) = find_next_netdev_feature((mask_addr), \ |
367 | + NETDEV_FEATURE_COUNT); \ |
368 | + (bit) >= 0; \ |
369 | + (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) |
370 | |
371 | /* Features valid for ethtool to change */ |
372 | /* = all defined minus driver/device-class-related */ |
373 | diff --git a/include/net/ax25.h b/include/net/ax25.h |
374 | index 3f9aea8087e3..8b7eb46ad72d 100644 |
375 | --- a/include/net/ax25.h |
376 | +++ b/include/net/ax25.h |
377 | @@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt) |
378 | |
379 | void __ax25_put_route(ax25_route *ax25_rt); |
380 | |
381 | +extern rwlock_t ax25_route_lock; |
382 | + |
383 | +static inline void ax25_route_lock_use(void) |
384 | +{ |
385 | + read_lock(&ax25_route_lock); |
386 | +} |
387 | + |
388 | +static inline void ax25_route_lock_unuse(void) |
389 | +{ |
390 | + read_unlock(&ax25_route_lock); |
391 | +} |
392 | + |
393 | static inline void ax25_put_route(ax25_route *ax25_rt) |
394 | { |
395 | if (refcount_dec_and_test(&ax25_rt->refcount)) |
396 | diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h |
397 | index 00b5e7825508..74ff688568a0 100644 |
398 | --- a/include/net/inetpeer.h |
399 | +++ b/include/net/inetpeer.h |
400 | @@ -39,6 +39,7 @@ struct inet_peer { |
401 | |
402 | u32 metrics[RTAX_MAX]; |
403 | u32 rate_tokens; /* rate limiting for ICMP */ |
404 | + u32 n_redirects; |
405 | unsigned long rate_last; |
406 | /* |
407 | * Once inet_peer is queued for deletion (refcnt == 0), following field |
408 | diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c |
409 | index 70417e9b932d..314bbc8010fb 100644 |
410 | --- a/net/ax25/ax25_ip.c |
411 | +++ b/net/ax25/ax25_ip.c |
412 | @@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) |
413 | dst = (ax25_address *)(bp + 1); |
414 | src = (ax25_address *)(bp + 8); |
415 | |
416 | + ax25_route_lock_use(); |
417 | route = ax25_get_route(dst, NULL); |
418 | if (route) { |
419 | digipeat = route->digipeat; |
420 | @@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) |
421 | ax25_queue_xmit(skb, dev); |
422 | |
423 | put: |
424 | - if (route) |
425 | - ax25_put_route(route); |
426 | |
427 | + ax25_route_lock_unuse(); |
428 | return NETDEV_TX_OK; |
429 | } |
430 | |
431 | diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c |
432 | index a0eff323af12..66f74c85cf6b 100644 |
433 | --- a/net/ax25/ax25_route.c |
434 | +++ b/net/ax25/ax25_route.c |
435 | @@ -40,7 +40,7 @@ |
436 | #include <linux/export.h> |
437 | |
438 | static ax25_route *ax25_route_list; |
439 | -static DEFINE_RWLOCK(ax25_route_lock); |
440 | +DEFINE_RWLOCK(ax25_route_lock); |
441 | |
442 | void ax25_rt_device_down(struct net_device *dev) |
443 | { |
444 | @@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = { |
445 | * Find AX.25 route |
446 | * |
447 | * Only routes with a reference count of zero can be destroyed. |
448 | + * Must be called with ax25_route_lock read locked. |
449 | */ |
450 | ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
451 | { |
452 | @@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
453 | ax25_route *ax25_def_rt = NULL; |
454 | ax25_route *ax25_rt; |
455 | |
456 | - read_lock(&ax25_route_lock); |
457 | /* |
458 | * Bind to the physical interface we heard them on, or the default |
459 | * route if none is found; |
460 | @@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
461 | if (ax25_spe_rt != NULL) |
462 | ax25_rt = ax25_spe_rt; |
463 | |
464 | - if (ax25_rt != NULL) |
465 | - ax25_hold_route(ax25_rt); |
466 | - |
467 | - read_unlock(&ax25_route_lock); |
468 | - |
469 | return ax25_rt; |
470 | } |
471 | |
472 | @@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) |
473 | ax25_route *ax25_rt; |
474 | int err = 0; |
475 | |
476 | - if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) |
477 | + ax25_route_lock_use(); |
478 | + ax25_rt = ax25_get_route(addr, NULL); |
479 | + if (!ax25_rt) { |
480 | + ax25_route_lock_unuse(); |
481 | return -EHOSTUNREACH; |
482 | - |
483 | + } |
484 | if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { |
485 | err = -EHOSTUNREACH; |
486 | goto put; |
487 | @@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) |
488 | } |
489 | |
490 | put: |
491 | - ax25_put_route(ax25_rt); |
492 | - |
493 | + ax25_route_lock_unuse(); |
494 | return err; |
495 | } |
496 | |
497 | diff --git a/net/core/dev.c b/net/core/dev.c |
498 | index af097ca9cb4f..5c8c0a572ee9 100644 |
499 | --- a/net/core/dev.c |
500 | +++ b/net/core/dev.c |
501 | @@ -8039,7 +8039,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower, |
502 | netdev_features_t feature; |
503 | int feature_bit; |
504 | |
505 | - for_each_netdev_feature(&upper_disables, feature_bit) { |
506 | + for_each_netdev_feature(upper_disables, feature_bit) { |
507 | feature = __NETIF_F_BIT(feature_bit); |
508 | if (!(upper->wanted_features & feature) |
509 | && (features & feature)) { |
510 | @@ -8059,7 +8059,7 @@ static void netdev_sync_lower_features(struct net_device *upper, |
511 | netdev_features_t feature; |
512 | int feature_bit; |
513 | |
514 | - for_each_netdev_feature(&upper_disables, feature_bit) { |
515 | + for_each_netdev_feature(upper_disables, feature_bit) { |
516 | feature = __NETIF_F_BIT(feature_bit); |
517 | if (!(features & feature) && (lower->features & feature)) { |
518 | netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", |
519 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
520 | index 589ec5b9ec5f..8656b1e20d35 100644 |
521 | --- a/net/core/skbuff.c |
522 | +++ b/net/core/skbuff.c |
523 | @@ -353,6 +353,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
524 | */ |
525 | void *netdev_alloc_frag(unsigned int fragsz) |
526 | { |
527 | + fragsz = SKB_DATA_ALIGN(fragsz); |
528 | + |
529 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC); |
530 | } |
531 | EXPORT_SYMBOL(netdev_alloc_frag); |
532 | @@ -366,6 +368,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
533 | |
534 | void *napi_alloc_frag(unsigned int fragsz) |
535 | { |
536 | + fragsz = SKB_DATA_ALIGN(fragsz); |
537 | + |
538 | return __napi_alloc_frag(fragsz, GFP_ATOMIC); |
539 | } |
540 | EXPORT_SYMBOL(napi_alloc_frag); |
541 | diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c |
542 | index d757b9642d0d..be778599bfed 100644 |
543 | --- a/net/ipv4/inetpeer.c |
544 | +++ b/net/ipv4/inetpeer.c |
545 | @@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, |
546 | atomic_set(&p->rid, 0); |
547 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; |
548 | p->rate_tokens = 0; |
549 | + p->n_redirects = 0; |
550 | /* 60*HZ is arbitrary, but chosen enough high so that the first |
551 | * calculation of tokens is at its maximum. |
552 | */ |
553 | diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c |
554 | index ac110c1d55b5..481437fc1eb2 100644 |
555 | --- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c |
556 | +++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c |
557 | @@ -104,6 +104,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset) |
558 | int snmp_version(void *context, size_t hdrlen, unsigned char tag, |
559 | const void *data, size_t datalen) |
560 | { |
561 | + if (datalen != 1) |
562 | + return -EINVAL; |
563 | if (*(unsigned char *)data > 1) |
564 | return -ENOTSUPP; |
565 | return 1; |
566 | @@ -113,8 +115,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag, |
567 | const void *data, size_t datalen) |
568 | { |
569 | struct snmp_ctx *ctx = (struct snmp_ctx *)context; |
570 | - __be32 *pdata = (__be32 *)data; |
571 | + __be32 *pdata; |
572 | |
573 | + if (datalen != 4) |
574 | + return -EINVAL; |
575 | + pdata = (__be32 *)data; |
576 | if (*pdata == ctx->from) { |
577 | pr_debug("%s: %pI4 to %pI4\n", __func__, |
578 | (void *)&ctx->from, (void *)&ctx->to); |
579 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
580 | index 8501554e96a4..436b46c0e687 100644 |
581 | --- a/net/ipv4/route.c |
582 | +++ b/net/ipv4/route.c |
583 | @@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb) |
584 | /* No redirected packets during ip_rt_redirect_silence; |
585 | * reset the algorithm. |
586 | */ |
587 | - if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) |
588 | + if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { |
589 | peer->rate_tokens = 0; |
590 | + peer->n_redirects = 0; |
591 | + } |
592 | |
593 | /* Too many ignored redirects; do not send anything |
594 | * set dst.rate_last to the last seen redirected packet. |
595 | */ |
596 | - if (peer->rate_tokens >= ip_rt_redirect_number) { |
597 | + if (peer->n_redirects >= ip_rt_redirect_number) { |
598 | peer->rate_last = jiffies; |
599 | goto out_put_peer; |
600 | } |
601 | @@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) |
602 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); |
603 | peer->rate_last = jiffies; |
604 | ++peer->rate_tokens; |
605 | + ++peer->n_redirects; |
606 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
607 | if (log_martians && |
608 | peer->rate_tokens == ip_rt_redirect_number) |
609 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
610 | index 87fe44197aa1..432dc9af1172 100644 |
611 | --- a/net/ipv4/tcp.c |
612 | +++ b/net/ipv4/tcp.c |
613 | @@ -2519,6 +2519,7 @@ void tcp_write_queue_purge(struct sock *sk) |
614 | sk_mem_reclaim(sk); |
615 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
616 | tcp_sk(sk)->packets_out = 0; |
617 | + inet_csk(sk)->icsk_backoff = 0; |
618 | } |
619 | |
620 | int tcp_disconnect(struct sock *sk, int flags) |
621 | @@ -2567,7 +2568,6 @@ int tcp_disconnect(struct sock *sk, int flags) |
622 | tp->write_seq += tp->max_window + 2; |
623 | if (tp->write_seq == 0) |
624 | tp->write_seq = 1; |
625 | - icsk->icsk_backoff = 0; |
626 | tp->snd_cwnd = 2; |
627 | icsk->icsk_probes_out = 0; |
628 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
629 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
630 | index cd426313a298..3b83b157b0a1 100644 |
631 | --- a/net/ipv4/tcp_ipv4.c |
632 | +++ b/net/ipv4/tcp_ipv4.c |
633 | @@ -535,14 +535,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) |
634 | if (sock_owned_by_user(sk)) |
635 | break; |
636 | |
637 | + skb = tcp_rtx_queue_head(sk); |
638 | + if (WARN_ON_ONCE(!skb)) |
639 | + break; |
640 | + |
641 | icsk->icsk_backoff--; |
642 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : |
643 | TCP_TIMEOUT_INIT; |
644 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); |
645 | |
646 | - skb = tcp_rtx_queue_head(sk); |
647 | - BUG_ON(!skb); |
648 | - |
649 | tcp_mstamp_refresh(tp); |
650 | delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); |
651 | remaining = icsk->icsk_rto - |
652 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
653 | index 3dfc50cd86d6..c57efd5c5b38 100644 |
654 | --- a/net/ipv6/addrconf.c |
655 | +++ b/net/ipv6/addrconf.c |
656 | @@ -1148,7 +1148,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) |
657 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
658 | if (ifa == ifp) |
659 | continue; |
660 | - if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr, |
661 | + if (ifa->prefix_len != ifp->prefix_len || |
662 | + !ipv6_prefix_equal(&ifa->addr, &ifp->addr, |
663 | ifp->prefix_len)) |
664 | continue; |
665 | if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) |
666 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
667 | index be04877b3827..faed98dab913 100644 |
668 | --- a/net/ipv6/ip6_gre.c |
669 | +++ b/net/ipv6/ip6_gre.c |
670 | @@ -1735,6 +1735,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], |
671 | return 0; |
672 | } |
673 | |
674 | +static void ip6erspan_set_version(struct nlattr *data[], |
675 | + struct __ip6_tnl_parm *parms) |
676 | +{ |
677 | + parms->erspan_ver = 1; |
678 | + if (data[IFLA_GRE_ERSPAN_VER]) |
679 | + parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); |
680 | + |
681 | + if (parms->erspan_ver == 1) { |
682 | + if (data[IFLA_GRE_ERSPAN_INDEX]) |
683 | + parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); |
684 | + } else if (parms->erspan_ver == 2) { |
685 | + if (data[IFLA_GRE_ERSPAN_DIR]) |
686 | + parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); |
687 | + if (data[IFLA_GRE_ERSPAN_HWID]) |
688 | + parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); |
689 | + } |
690 | +} |
691 | + |
692 | static void ip6gre_netlink_parms(struct nlattr *data[], |
693 | struct __ip6_tnl_parm *parms) |
694 | { |
695 | @@ -1783,20 +1801,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[], |
696 | |
697 | if (data[IFLA_GRE_COLLECT_METADATA]) |
698 | parms->collect_md = true; |
699 | - |
700 | - parms->erspan_ver = 1; |
701 | - if (data[IFLA_GRE_ERSPAN_VER]) |
702 | - parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); |
703 | - |
704 | - if (parms->erspan_ver == 1) { |
705 | - if (data[IFLA_GRE_ERSPAN_INDEX]) |
706 | - parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); |
707 | - } else if (parms->erspan_ver == 2) { |
708 | - if (data[IFLA_GRE_ERSPAN_DIR]) |
709 | - parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); |
710 | - if (data[IFLA_GRE_ERSPAN_HWID]) |
711 | - parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); |
712 | - } |
713 | } |
714 | |
715 | static int ip6gre_tap_init(struct net_device *dev) |
716 | @@ -2225,6 +2229,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, |
717 | int err; |
718 | |
719 | ip6gre_netlink_parms(data, &nt->parms); |
720 | + ip6erspan_set_version(data, &nt->parms); |
721 | ign = net_generic(net, ip6gre_net_id); |
722 | |
723 | if (nt->parms.collect_md) { |
724 | @@ -2270,6 +2275,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], |
725 | if (IS_ERR(t)) |
726 | return PTR_ERR(t); |
727 | |
728 | + ip6erspan_set_version(data, &p); |
729 | ip6gre_tunnel_unlink_md(ign, t); |
730 | ip6gre_tunnel_unlink(ign, t); |
731 | ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); |
732 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
733 | index b6ea0fadb34f..c76c21604ffd 100644 |
734 | --- a/net/packet/af_packet.c |
735 | +++ b/net/packet/af_packet.c |
736 | @@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
737 | goto out_free; |
738 | } else if (reserve) { |
739 | skb_reserve(skb, -reserve); |
740 | - if (len < reserve) |
741 | + if (len < reserve + sizeof(struct ipv6hdr) && |
742 | + dev->min_header_len != dev->hard_header_len) |
743 | skb_reset_network_header(skb); |
744 | } |
745 | |
746 | diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c |
747 | index c8b9082f4a9d..2d2ed6772fe4 100644 |
748 | --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c |
749 | +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c |
750 | @@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, |
751 | unsigned char *cksum, unsigned char *buf) |
752 | { |
753 | struct crypto_skcipher *cipher; |
754 | - unsigned char plain[8]; |
755 | + unsigned char *plain; |
756 | s32 code; |
757 | |
758 | dprintk("RPC: %s:\n", __func__); |
759 | @@ -53,6 +53,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, |
760 | if (IS_ERR(cipher)) |
761 | return PTR_ERR(cipher); |
762 | |
763 | + plain = kmalloc(8, GFP_NOFS); |
764 | + if (!plain) |
765 | + return -ENOMEM; |
766 | + |
767 | plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); |
768 | plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); |
769 | plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); |
770 | @@ -69,6 +73,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, |
771 | code = krb5_encrypt(cipher, cksum, plain, buf, 8); |
772 | out: |
773 | crypto_free_skcipher(cipher); |
774 | + kfree(plain); |
775 | return code; |
776 | } |
777 | s32 |
778 | @@ -78,12 +83,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx, |
779 | u32 seqnum, |
780 | unsigned char *cksum, unsigned char *buf) |
781 | { |
782 | - unsigned char plain[8]; |
783 | + unsigned char *plain; |
784 | + s32 code; |
785 | |
786 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) |
787 | return krb5_make_rc4_seq_num(kctx, direction, seqnum, |
788 | cksum, buf); |
789 | |
790 | + plain = kmalloc(8, GFP_NOFS); |
791 | + if (!plain) |
792 | + return -ENOMEM; |
793 | + |
794 | plain[0] = (unsigned char) (seqnum & 0xff); |
795 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); |
796 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); |
797 | @@ -94,7 +104,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx, |
798 | plain[6] = direction; |
799 | plain[7] = direction; |
800 | |
801 | - return krb5_encrypt(key, cksum, plain, buf, 8); |
802 | + code = krb5_encrypt(key, cksum, plain, buf, 8); |
803 | + kfree(plain); |
804 | + return code; |
805 | } |
806 | |
807 | static s32 |
808 | @@ -102,7 +114,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, |
809 | unsigned char *buf, int *direction, s32 *seqnum) |
810 | { |
811 | struct crypto_skcipher *cipher; |
812 | - unsigned char plain[8]; |
813 | + unsigned char *plain; |
814 | s32 code; |
815 | |
816 | dprintk("RPC: %s:\n", __func__); |
817 | @@ -115,20 +127,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, |
818 | if (code) |
819 | goto out; |
820 | |
821 | + plain = kmalloc(8, GFP_NOFS); |
822 | + if (!plain) { |
823 | + code = -ENOMEM; |
824 | + goto out; |
825 | + } |
826 | + |
827 | code = krb5_decrypt(cipher, cksum, buf, plain, 8); |
828 | if (code) |
829 | - goto out; |
830 | + goto out_plain; |
831 | |
832 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) |
833 | || (plain[4] != plain[7])) { |
834 | code = (s32)KG_BAD_SEQ; |
835 | - goto out; |
836 | + goto out_plain; |
837 | } |
838 | |
839 | *direction = plain[4]; |
840 | |
841 | *seqnum = ((plain[0] << 24) | (plain[1] << 16) | |
842 | (plain[2] << 8) | (plain[3])); |
843 | +out_plain: |
844 | + kfree(plain); |
845 | out: |
846 | crypto_free_skcipher(cipher); |
847 | return code; |
848 | @@ -141,26 +161,33 @@ krb5_get_seq_num(struct krb5_ctx *kctx, |
849 | int *direction, u32 *seqnum) |
850 | { |
851 | s32 code; |
852 | - unsigned char plain[8]; |
853 | struct crypto_skcipher *key = kctx->seq; |
854 | + unsigned char *plain; |
855 | |
856 | dprintk("RPC: krb5_get_seq_num:\n"); |
857 | |
858 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) |
859 | return krb5_get_rc4_seq_num(kctx, cksum, buf, |
860 | direction, seqnum); |
861 | + plain = kmalloc(8, GFP_NOFS); |
862 | + if (!plain) |
863 | + return -ENOMEM; |
864 | |
865 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) |
866 | - return code; |
867 | + goto out; |
868 | |
869 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || |
870 | - (plain[4] != plain[7])) |
871 | - return (s32)KG_BAD_SEQ; |
872 | + (plain[4] != plain[7])) { |
873 | + code = (s32)KG_BAD_SEQ; |
874 | + goto out; |
875 | + } |
876 | |
877 | *direction = plain[4]; |
878 | |
879 | *seqnum = ((plain[0]) | |
880 | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); |
881 | |
882 | - return 0; |
883 | +out: |
884 | + kfree(plain); |
885 | + return code; |
886 | } |
887 | diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c |
888 | index c361ce782412..c3d5ab01fba7 100644 |
889 | --- a/net/vmw_vsock/vmci_transport.c |
890 | +++ b/net/vmw_vsock/vmci_transport.c |
891 | @@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work) |
892 | |
893 | static void vmci_transport_destruct(struct vsock_sock *vsk) |
894 | { |
895 | + /* transport can be NULL if we hit a failure at init() time */ |
896 | + if (!vmci_trans(vsk)) |
897 | + return; |
898 | + |
899 | /* Ensure that the detach callback doesn't use the sk/vsk |
900 | * we are about to destruct. |
901 | */ |
902 | diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c |
903 | index d49aa79b7997..fef473c736fa 100644 |
904 | --- a/net/x25/af_x25.c |
905 | +++ b/net/x25/af_x25.c |
906 | @@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb) |
907 | unsigned int lci = 1; |
908 | struct sock *sk; |
909 | |
910 | - read_lock_bh(&x25_list_lock); |
911 | - |
912 | - while ((sk = __x25_find_socket(lci, nb)) != NULL) { |
913 | + while ((sk = x25_find_socket(lci, nb)) != NULL) { |
914 | sock_put(sk); |
915 | if (++lci == 4096) { |
916 | lci = 0; |
917 | break; |
918 | } |
919 | + cond_resched(); |
920 | } |
921 | |
922 | - read_unlock_bh(&x25_list_lock); |
923 | return lci; |
924 | } |
925 |