Contents of /trunk/kernel-alx/patches-4.9/0259-4.9.160-all-fixes.patch
Parent Directory | Revision Log
Revision 3312 -
(show annotations)
(download)
Tue Mar 12 10:43:17 2019 UTC (5 years, 6 months ago) by niro
File size: 19217 byte(s)
Tue Mar 12 10:43:17 2019 UTC (5 years, 6 months ago) by niro
File size: 19217 byte(s)
-linux-4.9.160
1 | diff --git a/Makefile b/Makefile |
2 | index a452ead13b1e..af70503df3f4 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 159 |
9 | +SUBLEVEL = 160 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c |
14 | index cb6606a0470d..be60bd5bab78 100644 |
15 | --- a/drivers/hwmon/lm80.c |
16 | +++ b/drivers/hwmon/lm80.c |
17 | @@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, |
18 | } |
19 | |
20 | rv = lm80_read_value(client, LM80_REG_FANDIV); |
21 | - if (rv < 0) |
22 | + if (rv < 0) { |
23 | + mutex_unlock(&data->update_lock); |
24 | return rv; |
25 | + } |
26 | reg = (rv & ~(3 << (2 * (nr + 1)))) |
27 | | (data->fan_div[nr] << (2 * (nr + 1))); |
28 | lm80_write_value(client, LM80_REG_FANDIV, reg); |
29 | diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c |
30 | index 9438d7ec3308..8b29e97cf668 100644 |
31 | --- a/drivers/isdn/mISDN/timerdev.c |
32 | +++ b/drivers/isdn/mISDN/timerdev.c |
33 | @@ -168,8 +168,8 @@ dev_expire_timer(unsigned long data) |
34 | spin_lock_irqsave(&timer->dev->lock, flags); |
35 | if (timer->id >= 0) |
36 | list_move_tail(&timer->list, &timer->dev->expired); |
37 | - spin_unlock_irqrestore(&timer->dev->lock, flags); |
38 | wake_up_interruptible(&timer->dev->wait); |
39 | + spin_unlock_irqrestore(&timer->dev->lock, flags); |
40 | } |
41 | |
42 | static int |
43 | diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c |
44 | index 93ab0b3ad393..af11781fe5f9 100644 |
45 | --- a/drivers/net/ethernet/marvell/sky2.c |
46 | +++ b/drivers/net/ethernet/marvell/sky2.c |
47 | @@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
48 | INIT_WORK(&hw->restart_work, sky2_restart); |
49 | |
50 | pci_set_drvdata(pdev, hw); |
51 | - pdev->d3_delay = 200; |
52 | + pdev->d3_delay = 300; |
53 | |
54 | return 0; |
55 | |
56 | diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
57 | index a601f8d43b75..f988c7573ba5 100644 |
58 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
59 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c |
60 | @@ -237,15 +237,18 @@ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) |
61 | static int dwmac4_rx_check_timestamp(void *desc) |
62 | { |
63 | struct dma_desc *p = (struct dma_desc *)desc; |
64 | + unsigned int rdes0 = le32_to_cpu(p->des0); |
65 | + unsigned int rdes1 = le32_to_cpu(p->des1); |
66 | + unsigned int rdes3 = le32_to_cpu(p->des3); |
67 | u32 own, ctxt; |
68 | int ret = 1; |
69 | |
70 | - own = p->des3 & RDES3_OWN; |
71 | - ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) |
72 | + own = rdes3 & RDES3_OWN; |
73 | + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) |
74 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); |
75 | |
76 | if (likely(!own && ctxt)) { |
77 | - if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) |
78 | + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) |
79 | /* Corrupted value */ |
80 | ret = -EINVAL; |
81 | else |
82 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
83 | index c5d0142adda2..3519a8a589dd 100644 |
84 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
85 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c |
86 | @@ -676,25 +676,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, |
87 | struct ethtool_eee *edata) |
88 | { |
89 | struct stmmac_priv *priv = netdev_priv(dev); |
90 | + int ret; |
91 | |
92 | - priv->eee_enabled = edata->eee_enabled; |
93 | - |
94 | - if (!priv->eee_enabled) |
95 | + if (!edata->eee_enabled) { |
96 | stmmac_disable_eee_mode(priv); |
97 | - else { |
98 | + } else { |
99 | /* We are asking for enabling the EEE but it is safe |
100 | * to verify all by invoking the eee_init function. |
101 | * In case of failure it will return an error. |
102 | */ |
103 | - priv->eee_enabled = stmmac_eee_init(priv); |
104 | - if (!priv->eee_enabled) |
105 | + edata->eee_enabled = stmmac_eee_init(priv); |
106 | + if (!edata->eee_enabled) |
107 | return -EOPNOTSUPP; |
108 | - |
109 | - /* Do not change tx_lpi_timer in case of failure */ |
110 | - priv->tx_lpi_timer = edata->tx_lpi_timer; |
111 | } |
112 | |
113 | - return phy_ethtool_set_eee(priv->phydev, edata); |
114 | + ret = phy_ethtool_set_eee(dev->phydev, edata); |
115 | + if (ret) |
116 | + return ret; |
117 | + |
118 | + priv->eee_enabled = edata->eee_enabled; |
119 | + priv->tx_lpi_timer = edata->tx_lpi_timer; |
120 | + return 0; |
121 | } |
122 | |
123 | static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) |
124 | diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c |
125 | index 7a14e8170e82..aef525467af0 100644 |
126 | --- a/drivers/net/phy/xilinx_gmii2rgmii.c |
127 | +++ b/drivers/net/phy/xilinx_gmii2rgmii.c |
128 | @@ -42,7 +42,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) |
129 | u16 val = 0; |
130 | int err; |
131 | |
132 | - err = priv->phy_drv->read_status(phydev); |
133 | + if (priv->phy_drv->read_status) |
134 | + err = priv->phy_drv->read_status(phydev); |
135 | + else |
136 | + err = genphy_read_status(phydev); |
137 | if (err < 0) |
138 | return err; |
139 | |
140 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
141 | index 28afdf22b88f..373713faa1f5 100644 |
142 | --- a/drivers/net/vxlan.c |
143 | +++ b/drivers/net/vxlan.c |
144 | @@ -1911,7 +1911,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, |
145 | struct pcpu_sw_netstats *tx_stats, *rx_stats; |
146 | union vxlan_addr loopback; |
147 | union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; |
148 | - struct net_device *dev = skb->dev; |
149 | + struct net_device *dev; |
150 | int len = skb->len; |
151 | |
152 | tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); |
153 | @@ -1931,8 +1931,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, |
154 | #endif |
155 | } |
156 | |
157 | + rcu_read_lock(); |
158 | + dev = skb->dev; |
159 | + if (unlikely(!(dev->flags & IFF_UP))) { |
160 | + kfree_skb(skb); |
161 | + goto drop; |
162 | + } |
163 | + |
164 | if (dst_vxlan->flags & VXLAN_F_LEARN) |
165 | - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source); |
166 | + vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source); |
167 | |
168 | u64_stats_update_begin(&tx_stats->syncp); |
169 | tx_stats->tx_packets++; |
170 | @@ -1945,8 +1952,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, |
171 | rx_stats->rx_bytes += len; |
172 | u64_stats_update_end(&rx_stats->syncp); |
173 | } else { |
174 | +drop: |
175 | dev->stats.rx_dropped++; |
176 | } |
177 | + rcu_read_unlock(); |
178 | } |
179 | |
180 | static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
181 | diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c |
182 | index dc387a974325..2383caf88b67 100644 |
183 | --- a/drivers/vhost/vhost.c |
184 | +++ b/drivers/vhost/vhost.c |
185 | @@ -1696,7 +1696,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) |
186 | |
187 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, |
188 | len, iov, 64, VHOST_ACCESS_WO); |
189 | - if (ret) |
190 | + if (ret < 0) |
191 | return ret; |
192 | |
193 | for (i = 0; i < ret; i++) { |
194 | diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c |
195 | index 793d4d571d8d..5c5c389d8fed 100644 |
196 | --- a/fs/btrfs/extent_io.c |
197 | +++ b/fs/btrfs/extent_io.c |
198 | @@ -4463,29 +4463,25 @@ try_submit_last: |
199 | } |
200 | |
201 | /* |
202 | - * Sanity check for fiemap cache |
203 | + * Emit last fiemap cache |
204 | * |
205 | - * All fiemap cache should be submitted by emit_fiemap_extent() |
206 | - * Iteration should be terminated either by last fiemap extent or |
207 | - * fieinfo->fi_extents_max. |
208 | - * So no cached fiemap should exist. |
209 | + * The last fiemap cache may still be cached in the following case: |
210 | + * 0 4k 8k |
211 | + * |<- Fiemap range ->| |
212 | + * |<------------ First extent ----------->| |
213 | + * |
214 | + * In this case, the first extent range will be cached but not emitted. |
215 | + * So we must emit it before ending extent_fiemap(). |
216 | */ |
217 | -static int check_fiemap_cache(struct btrfs_fs_info *fs_info, |
218 | - struct fiemap_extent_info *fieinfo, |
219 | - struct fiemap_cache *cache) |
220 | +static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info, |
221 | + struct fiemap_extent_info *fieinfo, |
222 | + struct fiemap_cache *cache) |
223 | { |
224 | int ret; |
225 | |
226 | if (!cache->cached) |
227 | return 0; |
228 | |
229 | - /* Small and recoverbale problem, only to info developer */ |
230 | -#ifdef CONFIG_BTRFS_DEBUG |
231 | - WARN_ON(1); |
232 | -#endif |
233 | - btrfs_warn(fs_info, |
234 | - "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x", |
235 | - cache->offset, cache->phys, cache->len, cache->flags); |
236 | ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, |
237 | cache->len, cache->flags); |
238 | cache->cached = false; |
239 | @@ -4701,7 +4697,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
240 | } |
241 | out_free: |
242 | if (!ret) |
243 | - ret = check_fiemap_cache(root->fs_info, fieinfo, &cache); |
244 | + ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache); |
245 | free_extent_map(em); |
246 | out: |
247 | btrfs_free_path(path); |
248 | diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h |
249 | index 9c6c8ef2e9e7..b692edeb0b90 100644 |
250 | --- a/include/linux/netdev_features.h |
251 | +++ b/include/linux/netdev_features.h |
252 | @@ -11,6 +11,8 @@ |
253 | #define _LINUX_NETDEV_FEATURES_H |
254 | |
255 | #include <linux/types.h> |
256 | +#include <linux/bitops.h> |
257 | +#include <asm/byteorder.h> |
258 | |
259 | typedef u64 netdev_features_t; |
260 | |
261 | @@ -137,8 +139,26 @@ enum { |
262 | #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL) |
263 | #define NETIF_F_HW_TC __NETIF_F(HW_TC) |
264 | |
265 | -#define for_each_netdev_feature(mask_addr, bit) \ |
266 | - for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) |
267 | +/* Finds the next feature with the highest number of the range of start till 0. |
268 | + */ |
269 | +static inline int find_next_netdev_feature(u64 feature, unsigned long start) |
270 | +{ |
271 | + /* like BITMAP_LAST_WORD_MASK() for u64 |
272 | + * this sets the most significant 64 - start to 0. |
273 | + */ |
274 | + feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); |
275 | + |
276 | + return fls64(feature) - 1; |
277 | +} |
278 | + |
279 | +/* This goes for the MSB to the LSB through the set feature bits, |
280 | + * mask_addr should be a u64 and bit an int |
281 | + */ |
282 | +#define for_each_netdev_feature(mask_addr, bit) \ |
283 | + for ((bit) = find_next_netdev_feature((mask_addr), \ |
284 | + NETDEV_FEATURE_COUNT); \ |
285 | + (bit) >= 0; \ |
286 | + (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) |
287 | |
288 | /* Features valid for ethtool to change */ |
289 | /* = all defined minus driver/device-class-related */ |
290 | diff --git a/include/net/ax25.h b/include/net/ax25.h |
291 | index e602f8177ebf..b507ce2b1952 100644 |
292 | --- a/include/net/ax25.h |
293 | +++ b/include/net/ax25.h |
294 | @@ -199,6 +199,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt) |
295 | |
296 | void __ax25_put_route(ax25_route *ax25_rt); |
297 | |
298 | +extern rwlock_t ax25_route_lock; |
299 | + |
300 | +static inline void ax25_route_lock_use(void) |
301 | +{ |
302 | + read_lock(&ax25_route_lock); |
303 | +} |
304 | + |
305 | +static inline void ax25_route_lock_unuse(void) |
306 | +{ |
307 | + read_unlock(&ax25_route_lock); |
308 | +} |
309 | + |
310 | static inline void ax25_put_route(ax25_route *ax25_rt) |
311 | { |
312 | if (atomic_dec_and_test(&ax25_rt->refcount)) |
313 | diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h |
314 | index 235c7811a86a..408d76f47bd2 100644 |
315 | --- a/include/net/inetpeer.h |
316 | +++ b/include/net/inetpeer.h |
317 | @@ -40,6 +40,7 @@ struct inet_peer { |
318 | |
319 | u32 metrics[RTAX_MAX]; |
320 | u32 rate_tokens; /* rate limiting for ICMP */ |
321 | + u32 n_redirects; |
322 | unsigned long rate_last; |
323 | union { |
324 | struct list_head gc_list; |
325 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
326 | index c3f4f6a9e6c3..fed2a78fb8cb 100644 |
327 | --- a/include/net/tcp.h |
328 | +++ b/include/net/tcp.h |
329 | @@ -1526,6 +1526,7 @@ static inline void tcp_write_queue_purge(struct sock *sk) |
330 | sk_wmem_free_skb(sk, skb); |
331 | sk_mem_reclaim(sk); |
332 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
333 | + inet_csk(sk)->icsk_backoff = 0; |
334 | } |
335 | |
336 | static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) |
337 | diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c |
338 | index 2fa3be965101..cd9a24e5b97a 100644 |
339 | --- a/net/ax25/ax25_ip.c |
340 | +++ b/net/ax25/ax25_ip.c |
341 | @@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) |
342 | dst = (ax25_address *)(bp + 1); |
343 | src = (ax25_address *)(bp + 8); |
344 | |
345 | + ax25_route_lock_use(); |
346 | route = ax25_get_route(dst, NULL); |
347 | if (route) { |
348 | digipeat = route->digipeat; |
349 | @@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) |
350 | ax25_queue_xmit(skb, dev); |
351 | |
352 | put: |
353 | - if (route) |
354 | - ax25_put_route(route); |
355 | |
356 | + ax25_route_lock_unuse(); |
357 | return NETDEV_TX_OK; |
358 | } |
359 | |
360 | diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c |
361 | index d39097737e38..149f82bd83fd 100644 |
362 | --- a/net/ax25/ax25_route.c |
363 | +++ b/net/ax25/ax25_route.c |
364 | @@ -40,7 +40,7 @@ |
365 | #include <linux/export.h> |
366 | |
367 | static ax25_route *ax25_route_list; |
368 | -static DEFINE_RWLOCK(ax25_route_lock); |
369 | +DEFINE_RWLOCK(ax25_route_lock); |
370 | |
371 | void ax25_rt_device_down(struct net_device *dev) |
372 | { |
373 | @@ -349,6 +349,7 @@ const struct file_operations ax25_route_fops = { |
374 | * Find AX.25 route |
375 | * |
376 | * Only routes with a reference count of zero can be destroyed. |
377 | + * Must be called with ax25_route_lock read locked. |
378 | */ |
379 | ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
380 | { |
381 | @@ -356,7 +357,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
382 | ax25_route *ax25_def_rt = NULL; |
383 | ax25_route *ax25_rt; |
384 | |
385 | - read_lock(&ax25_route_lock); |
386 | /* |
387 | * Bind to the physical interface we heard them on, or the default |
388 | * route if none is found; |
389 | @@ -379,11 +379,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) |
390 | if (ax25_spe_rt != NULL) |
391 | ax25_rt = ax25_spe_rt; |
392 | |
393 | - if (ax25_rt != NULL) |
394 | - ax25_hold_route(ax25_rt); |
395 | - |
396 | - read_unlock(&ax25_route_lock); |
397 | - |
398 | return ax25_rt; |
399 | } |
400 | |
401 | @@ -414,9 +409,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) |
402 | ax25_route *ax25_rt; |
403 | int err = 0; |
404 | |
405 | - if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) |
406 | + ax25_route_lock_use(); |
407 | + ax25_rt = ax25_get_route(addr, NULL); |
408 | + if (!ax25_rt) { |
409 | + ax25_route_lock_unuse(); |
410 | return -EHOSTUNREACH; |
411 | - |
412 | + } |
413 | if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { |
414 | err = -EHOSTUNREACH; |
415 | goto put; |
416 | @@ -451,8 +449,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) |
417 | } |
418 | |
419 | put: |
420 | - ax25_put_route(ax25_rt); |
421 | - |
422 | + ax25_route_lock_unuse(); |
423 | return err; |
424 | } |
425 | |
426 | diff --git a/net/core/dev.c b/net/core/dev.c |
427 | index 071c589f7994..8e187f90c85d 100644 |
428 | --- a/net/core/dev.c |
429 | +++ b/net/core/dev.c |
430 | @@ -6909,7 +6909,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower, |
431 | netdev_features_t feature; |
432 | int feature_bit; |
433 | |
434 | - for_each_netdev_feature(&upper_disables, feature_bit) { |
435 | + for_each_netdev_feature(upper_disables, feature_bit) { |
436 | feature = __NETIF_F_BIT(feature_bit); |
437 | if (!(upper->wanted_features & feature) |
438 | && (features & feature)) { |
439 | @@ -6929,7 +6929,7 @@ static void netdev_sync_lower_features(struct net_device *upper, |
440 | netdev_features_t feature; |
441 | int feature_bit; |
442 | |
443 | - for_each_netdev_feature(&upper_disables, feature_bit) { |
444 | + for_each_netdev_feature(upper_disables, feature_bit) { |
445 | feature = __NETIF_F_BIT(feature_bit); |
446 | if (!(features & feature) && (lower->features & feature)) { |
447 | netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", |
448 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
449 | index 11501165f0df..4a71d78d0c6a 100644 |
450 | --- a/net/core/skbuff.c |
451 | +++ b/net/core/skbuff.c |
452 | @@ -383,6 +383,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
453 | */ |
454 | void *netdev_alloc_frag(unsigned int fragsz) |
455 | { |
456 | + fragsz = SKB_DATA_ALIGN(fragsz); |
457 | + |
458 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); |
459 | } |
460 | EXPORT_SYMBOL(netdev_alloc_frag); |
461 | @@ -396,6 +398,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) |
462 | |
463 | void *napi_alloc_frag(unsigned int fragsz) |
464 | { |
465 | + fragsz = SKB_DATA_ALIGN(fragsz); |
466 | + |
467 | return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); |
468 | } |
469 | EXPORT_SYMBOL(napi_alloc_frag); |
470 | diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c |
471 | index 86fa45809540..0c5862914f05 100644 |
472 | --- a/net/ipv4/inetpeer.c |
473 | +++ b/net/ipv4/inetpeer.c |
474 | @@ -448,6 +448,7 @@ relookup: |
475 | atomic_set(&p->rid, 0); |
476 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; |
477 | p->rate_tokens = 0; |
478 | + p->n_redirects = 0; |
479 | /* 60*HZ is arbitrary, but chosen enough high so that the first |
480 | * calculation of tokens is at its maximum. |
481 | */ |
482 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
483 | index 890141d32ab9..d606de65e2d0 100644 |
484 | --- a/net/ipv4/route.c |
485 | +++ b/net/ipv4/route.c |
486 | @@ -882,13 +882,15 @@ void ip_rt_send_redirect(struct sk_buff *skb) |
487 | /* No redirected packets during ip_rt_redirect_silence; |
488 | * reset the algorithm. |
489 | */ |
490 | - if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) |
491 | + if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { |
492 | peer->rate_tokens = 0; |
493 | + peer->n_redirects = 0; |
494 | + } |
495 | |
496 | /* Too many ignored redirects; do not send anything |
497 | * set dst.rate_last to the last seen redirected packet. |
498 | */ |
499 | - if (peer->rate_tokens >= ip_rt_redirect_number) { |
500 | + if (peer->n_redirects >= ip_rt_redirect_number) { |
501 | peer->rate_last = jiffies; |
502 | goto out_put_peer; |
503 | } |
504 | @@ -905,6 +907,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) |
505 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); |
506 | peer->rate_last = jiffies; |
507 | ++peer->rate_tokens; |
508 | + ++peer->n_redirects; |
509 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
510 | if (log_martians && |
511 | peer->rate_tokens == ip_rt_redirect_number) |
512 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
513 | index 9de77d946f5a..2ededb32b754 100644 |
514 | --- a/net/ipv4/tcp.c |
515 | +++ b/net/ipv4/tcp.c |
516 | @@ -2292,7 +2292,6 @@ int tcp_disconnect(struct sock *sk, int flags) |
517 | tp->write_seq += tp->max_window + 2; |
518 | if (tp->write_seq == 0) |
519 | tp->write_seq = 1; |
520 | - icsk->icsk_backoff = 0; |
521 | tp->snd_cwnd = 2; |
522 | icsk->icsk_probes_out = 0; |
523 | tp->packets_out = 0; |
524 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
525 | index 1ea0c91ba994..82c1064ff4aa 100644 |
526 | --- a/net/ipv4/tcp_ipv4.c |
527 | +++ b/net/ipv4/tcp_ipv4.c |
528 | @@ -464,14 +464,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) |
529 | if (sock_owned_by_user(sk)) |
530 | break; |
531 | |
532 | + skb = tcp_write_queue_head(sk); |
533 | + if (WARN_ON_ONCE(!skb)) |
534 | + break; |
535 | + |
536 | icsk->icsk_backoff--; |
537 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : |
538 | TCP_TIMEOUT_INIT; |
539 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); |
540 | |
541 | - skb = tcp_write_queue_head(sk); |
542 | - BUG_ON(!skb); |
543 | - |
544 | remaining = icsk->icsk_rto - |
545 | min(icsk->icsk_rto, |
546 | tcp_time_stamp - tcp_skb_timestamp(skb)); |
547 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
548 | index 8f79f0414bc3..4ce7f9195151 100644 |
549 | --- a/net/ipv6/addrconf.c |
550 | +++ b/net/ipv6/addrconf.c |
551 | @@ -1074,7 +1074,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) |
552 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
553 | if (ifa == ifp) |
554 | continue; |
555 | - if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr, |
556 | + if (ifa->prefix_len != ifp->prefix_len || |
557 | + !ipv6_prefix_equal(&ifa->addr, &ifp->addr, |
558 | ifp->prefix_len)) |
559 | continue; |
560 | if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) |
561 | diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c |
562 | index 008f3424dcbc..102bf9194662 100644 |
563 | --- a/net/vmw_vsock/vmci_transport.c |
564 | +++ b/net/vmw_vsock/vmci_transport.c |
565 | @@ -1656,6 +1656,10 @@ static void vmci_transport_cleanup(struct work_struct *work) |
566 | |
567 | static void vmci_transport_destruct(struct vsock_sock *vsk) |
568 | { |
569 | + /* transport can be NULL if we hit a failure at init() time */ |
570 | + if (!vmci_trans(vsk)) |
571 | + return; |
572 | + |
573 | /* Ensure that the detach callback doesn't use the sk/vsk |
574 | * we are about to destruct. |
575 | */ |
576 | diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c |
577 | index 007721632b07..0a7e5d992bba 100644 |
578 | --- a/net/x25/af_x25.c |
579 | +++ b/net/x25/af_x25.c |
580 | @@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb) |
581 | unsigned int lci = 1; |
582 | struct sock *sk; |
583 | |
584 | - read_lock_bh(&x25_list_lock); |
585 | - |
586 | - while ((sk = __x25_find_socket(lci, nb)) != NULL) { |
587 | + while ((sk = x25_find_socket(lci, nb)) != NULL) { |
588 | sock_put(sk); |
589 | if (++lci == 4096) { |
590 | lci = 0; |
591 | break; |
592 | } |
593 | + cond_resched(); |
594 | } |
595 | |
596 | - read_unlock_bh(&x25_list_lock); |
597 | return lci; |
598 | } |
599 |