Contents of /trunk/kernel-magellan/patches-4.8/0113-4.8.14-all-fixes.patch
Parent Directory | Revision Log
Revision 2853 -
(show annotations)
(download)
Fri Jan 6 09:45:23 2017 UTC (7 years, 8 months ago) by niro
File size: 55012 byte(s)
Fri Jan 6 09:45:23 2017 UTC (7 years, 8 months ago) by niro
File size: 55012 byte(s)
-linux-4.8.14
1 | diff --git a/Makefile b/Makefile |
2 | index b38abe9adef8..6a7492473a0d 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 8 |
8 | -SUBLEVEL = 13 |
9 | +SUBLEVEL = 14 |
10 | EXTRAVERSION = |
11 | NAME = Psychotic Stoned Sheep |
12 | |
13 | diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c |
14 | index c3c12efe0bc0..9c0c8fd0b292 100644 |
15 | --- a/arch/sparc/kernel/signal_32.c |
16 | +++ b/arch/sparc/kernel/signal_32.c |
17 | @@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) |
18 | sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; |
19 | |
20 | /* 1. Make sure we are not getting garbage from the user */ |
21 | - if (!invalid_frame_pointer(sf, sizeof(*sf))) |
22 | + if (invalid_frame_pointer(sf, sizeof(*sf))) |
23 | goto segv_and_exit; |
24 | |
25 | if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) |
26 | @@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) |
27 | |
28 | synchronize_user_stack(); |
29 | sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; |
30 | - if (!invalid_frame_pointer(sf, sizeof(*sf))) |
31 | + if (invalid_frame_pointer(sf, sizeof(*sf))) |
32 | goto segv; |
33 | |
34 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) |
35 | diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c |
36 | index 7ac6b62fb7c1..05c770825386 100644 |
37 | --- a/arch/sparc/mm/init_64.c |
38 | +++ b/arch/sparc/mm/init_64.c |
39 | @@ -802,8 +802,10 @@ struct mdesc_mblock { |
40 | }; |
41 | static struct mdesc_mblock *mblocks; |
42 | static int num_mblocks; |
43 | +static int find_numa_node_for_addr(unsigned long pa, |
44 | + struct node_mem_mask *pnode_mask); |
45 | |
46 | -static unsigned long ra_to_pa(unsigned long addr) |
47 | +static unsigned long __init ra_to_pa(unsigned long addr) |
48 | { |
49 | int i; |
50 | |
51 | @@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr) |
52 | return addr; |
53 | } |
54 | |
55 | -static int find_node(unsigned long addr) |
56 | +static int __init find_node(unsigned long addr) |
57 | { |
58 | + static bool search_mdesc = true; |
59 | + static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; |
60 | + static int last_index; |
61 | int i; |
62 | |
63 | addr = ra_to_pa(addr); |
64 | @@ -830,13 +835,30 @@ static int find_node(unsigned long addr) |
65 | if ((addr & p->mask) == p->val) |
66 | return i; |
67 | } |
68 | - /* The following condition has been observed on LDOM guests.*/ |
69 | - WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" |
70 | - " rule. Some physical memory will be owned by node 0."); |
71 | - return 0; |
72 | + /* The following condition has been observed on LDOM guests because |
73 | + * node_masks only contains the best latency mask and value. |
74 | + * LDOM guest's mdesc can contain a single latency group to |
75 | + * cover multiple address range. Print warning message only if the |
76 | + * address cannot be found in node_masks nor mdesc. |
77 | + */ |
78 | + if ((search_mdesc) && |
79 | + ((addr & last_mem_mask.mask) != last_mem_mask.val)) { |
80 | + /* find the available node in the mdesc */ |
81 | + last_index = find_numa_node_for_addr(addr, &last_mem_mask); |
82 | + numadbg("find_node: latency group for address 0x%lx is %d\n", |
83 | + addr, last_index); |
84 | + if ((last_index < 0) || (last_index >= num_node_masks)) { |
85 | + /* WARN_ONCE() and use default group 0 */ |
86 | + WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); |
87 | + search_mdesc = false; |
88 | + last_index = 0; |
89 | + } |
90 | + } |
91 | + |
92 | + return last_index; |
93 | } |
94 | |
95 | -static u64 memblock_nid_range(u64 start, u64 end, int *nid) |
96 | +static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) |
97 | { |
98 | *nid = find_node(start); |
99 | start += PAGE_SIZE; |
100 | @@ -1160,6 +1182,41 @@ int __node_distance(int from, int to) |
101 | return numa_latency[from][to]; |
102 | } |
103 | |
104 | +static int find_numa_node_for_addr(unsigned long pa, |
105 | + struct node_mem_mask *pnode_mask) |
106 | +{ |
107 | + struct mdesc_handle *md = mdesc_grab(); |
108 | + u64 node, arc; |
109 | + int i = 0; |
110 | + |
111 | + node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); |
112 | + if (node == MDESC_NODE_NULL) |
113 | + goto out; |
114 | + |
115 | + mdesc_for_each_node_by_name(md, node, "group") { |
116 | + mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { |
117 | + u64 target = mdesc_arc_target(md, arc); |
118 | + struct mdesc_mlgroup *m = find_mlgroup(target); |
119 | + |
120 | + if (!m) |
121 | + continue; |
122 | + if ((pa & m->mask) == m->match) { |
123 | + if (pnode_mask) { |
124 | + pnode_mask->mask = m->mask; |
125 | + pnode_mask->val = m->match; |
126 | + } |
127 | + mdesc_release(md); |
128 | + return i; |
129 | + } |
130 | + } |
131 | + i++; |
132 | + } |
133 | + |
134 | +out: |
135 | + mdesc_release(md); |
136 | + return -1; |
137 | +} |
138 | + |
139 | static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) |
140 | { |
141 | int i; |
142 | diff --git a/block/blk-map.c b/block/blk-map.c |
143 | index b8657fa8dc9a..27fd8d92892d 100644 |
144 | --- a/block/blk-map.c |
145 | +++ b/block/blk-map.c |
146 | @@ -118,6 +118,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
147 | struct iov_iter i; |
148 | int ret; |
149 | |
150 | + if (!iter_is_iovec(iter)) |
151 | + goto fail; |
152 | + |
153 | if (map_data) |
154 | copy = true; |
155 | else if (iov_iter_alignment(iter) & align) |
156 | @@ -140,6 +143,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
157 | |
158 | unmap_rq: |
159 | __blk_rq_unmap_user(bio); |
160 | +fail: |
161 | rq->bio = NULL; |
162 | return -EINVAL; |
163 | } |
164 | diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c |
165 | index bda37d336736..b081929e80bc 100644 |
166 | --- a/drivers/net/dsa/b53/b53_common.c |
167 | +++ b/drivers/net/dsa/b53/b53_common.c |
168 | @@ -904,9 +904,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port, |
169 | |
170 | vl->members |= BIT(port) | BIT(cpu_port); |
171 | if (untagged) |
172 | - vl->untag |= BIT(port) | BIT(cpu_port); |
173 | + vl->untag |= BIT(port); |
174 | else |
175 | - vl->untag &= ~(BIT(port) | BIT(cpu_port)); |
176 | + vl->untag &= ~BIT(port); |
177 | + vl->untag &= ~BIT(cpu_port); |
178 | |
179 | b53_set_vlan_entry(dev, vid, vl); |
180 | b53_fast_age_vlan(dev, vid); |
181 | @@ -915,8 +916,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port, |
182 | if (pvid) { |
183 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), |
184 | vlan->vid_end); |
185 | - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), |
186 | - vlan->vid_end); |
187 | b53_fast_age_vlan(dev, vid); |
188 | } |
189 | } |
190 | @@ -926,7 +925,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, |
191 | { |
192 | struct b53_device *dev = ds_to_priv(ds); |
193 | bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; |
194 | - unsigned int cpu_port = dev->cpu_port; |
195 | struct b53_vlan *vl; |
196 | u16 vid; |
197 | u16 pvid; |
198 | @@ -939,8 +937,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, |
199 | b53_get_vlan_entry(dev, vid, vl); |
200 | |
201 | vl->members &= ~BIT(port); |
202 | - if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) |
203 | - vl->members = 0; |
204 | |
205 | if (pvid == vid) { |
206 | if (is5325(dev) || is5365(dev)) |
207 | @@ -949,18 +945,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, |
208 | pvid = 0; |
209 | } |
210 | |
211 | - if (untagged) { |
212 | + if (untagged) |
213 | vl->untag &= ~(BIT(port)); |
214 | - if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port)) |
215 | - vl->untag = 0; |
216 | - } |
217 | |
218 | b53_set_vlan_entry(dev, vid, vl); |
219 | b53_fast_age_vlan(dev, vid); |
220 | } |
221 | |
222 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); |
223 | - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid); |
224 | b53_fast_age_vlan(dev, pvid); |
225 | |
226 | return 0; |
227 | diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c |
228 | index b2b838724a9b..4036865b7c08 100644 |
229 | --- a/drivers/net/dsa/bcm_sf2.c |
230 | +++ b/drivers/net/dsa/bcm_sf2.c |
231 | @@ -1167,6 +1167,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, |
232 | struct phy_device *phydev) |
233 | { |
234 | struct bcm_sf2_priv *priv = ds_to_priv(ds); |
235 | + struct ethtool_eee *p = &priv->port_sts[port].eee; |
236 | u32 id_mode_dis = 0, port_mode; |
237 | const char *str = NULL; |
238 | u32 reg; |
239 | @@ -1241,6 +1242,9 @@ force_link: |
240 | reg |= DUPLX_MODE; |
241 | |
242 | core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); |
243 | + |
244 | + if (!phydev->is_pseudo_fixed_link) |
245 | + p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); |
246 | } |
247 | |
248 | static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, |
249 | diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
250 | index 541456398dfb..842d8b90484e 100644 |
251 | --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
252 | +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
253 | @@ -1172,6 +1172,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
254 | struct bcmgenet_tx_ring *ring) |
255 | { |
256 | struct bcmgenet_priv *priv = netdev_priv(dev); |
257 | + struct device *kdev = &priv->pdev->dev; |
258 | struct enet_cb *tx_cb_ptr; |
259 | struct netdev_queue *txq; |
260 | unsigned int pkts_compl = 0; |
261 | @@ -1199,13 +1200,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
262 | if (tx_cb_ptr->skb) { |
263 | pkts_compl++; |
264 | bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; |
265 | - dma_unmap_single(&dev->dev, |
266 | + dma_unmap_single(kdev, |
267 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
268 | dma_unmap_len(tx_cb_ptr, dma_len), |
269 | DMA_TO_DEVICE); |
270 | bcmgenet_free_cb(tx_cb_ptr); |
271 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { |
272 | - dma_unmap_page(&dev->dev, |
273 | + dma_unmap_page(kdev, |
274 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
275 | dma_unmap_len(tx_cb_ptr, dma_len), |
276 | DMA_TO_DEVICE); |
277 | @@ -1775,6 +1776,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, |
278 | |
279 | static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) |
280 | { |
281 | + struct device *kdev = &priv->pdev->dev; |
282 | struct enet_cb *cb; |
283 | int i; |
284 | |
285 | @@ -1782,7 +1784,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) |
286 | cb = &priv->rx_cbs[i]; |
287 | |
288 | if (dma_unmap_addr(cb, dma_addr)) { |
289 | - dma_unmap_single(&priv->dev->dev, |
290 | + dma_unmap_single(kdev, |
291 | dma_unmap_addr(cb, dma_addr), |
292 | priv->rx_buf_len, DMA_FROM_DEVICE); |
293 | dma_unmap_addr_set(cb, dma_addr, 0); |
294 | diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c |
295 | index d954a97b0b0b..ef0dbcfa6ce6 100644 |
296 | --- a/drivers/net/ethernet/cadence/macb.c |
297 | +++ b/drivers/net/ethernet/cadence/macb.c |
298 | @@ -959,6 +959,7 @@ static inline void macb_init_rx_ring(struct macb *bp) |
299 | addr += bp->rx_buffer_size; |
300 | } |
301 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); |
302 | + bp->rx_tail = 0; |
303 | } |
304 | |
305 | static int macb_rx(struct macb *bp, int budget) |
306 | @@ -1597,8 +1598,6 @@ static void macb_init_rings(struct macb *bp) |
307 | bp->queues[0].tx_head = 0; |
308 | bp->queues[0].tx_tail = 0; |
309 | bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
310 | - |
311 | - bp->rx_tail = 0; |
312 | } |
313 | |
314 | static void macb_reset_hw(struct macb *bp) |
315 | diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c |
316 | index 467138b423d3..d747e17d3429 100644 |
317 | --- a/drivers/net/ethernet/marvell/sky2.c |
318 | +++ b/drivers/net/ethernet/marvell/sky2.c |
319 | @@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); |
320 | |
321 | static void sky2_shutdown(struct pci_dev *pdev) |
322 | { |
323 | + struct sky2_hw *hw = pci_get_drvdata(pdev); |
324 | + int port; |
325 | + |
326 | + for (port = 0; port < hw->ports; port++) { |
327 | + struct net_device *ndev = hw->dev[port]; |
328 | + |
329 | + rtnl_lock(); |
330 | + if (netif_running(ndev)) { |
331 | + dev_close(ndev); |
332 | + netif_device_detach(ndev); |
333 | + } |
334 | + rtnl_unlock(); |
335 | + } |
336 | sky2_suspend(&pdev->dev); |
337 | pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); |
338 | pci_set_power_state(pdev, PCI_D3hot); |
339 | diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c |
340 | index 054e795df90f..92c9a95169c8 100644 |
341 | --- a/drivers/net/ethernet/renesas/sh_eth.c |
342 | +++ b/drivers/net/ethernet/renesas/sh_eth.c |
343 | @@ -518,7 +518,7 @@ static struct sh_eth_cpu_data r7s72100_data = { |
344 | |
345 | .ecsr_value = ECSR_ICD, |
346 | .ecsipr_value = ECSIPR_ICDIP, |
347 | - .eesipr_value = 0xff7f009f, |
348 | + .eesipr_value = 0xe77f009f, |
349 | |
350 | .tx_check = EESR_TC1 | EESR_FTC, |
351 | .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | |
352 | diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
353 | index 16af1ce99233..5ad706b99af8 100644 |
354 | --- a/drivers/net/geneve.c |
355 | +++ b/drivers/net/geneve.c |
356 | @@ -844,7 +844,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
357 | struct geneve_dev *geneve = netdev_priv(dev); |
358 | struct geneve_sock *gs4 = geneve->sock4; |
359 | struct rtable *rt = NULL; |
360 | - const struct iphdr *iip; /* interior IP header */ |
361 | int err = -EINVAL; |
362 | struct flowi4 fl4; |
363 | __u8 tos, ttl; |
364 | @@ -871,8 +870,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
365 | sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
366 | skb_reset_mac_header(skb); |
367 | |
368 | - iip = ip_hdr(skb); |
369 | - |
370 | if (info) { |
371 | const struct ip_tunnel_key *key = &info->key; |
372 | u8 *opts = NULL; |
373 | @@ -892,7 +889,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
374 | if (unlikely(err)) |
375 | goto tx_error; |
376 | |
377 | - tos = ip_tunnel_ecn_encap(key->tos, iip, skb); |
378 | + tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); |
379 | ttl = key->ttl; |
380 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; |
381 | } else { |
382 | @@ -901,7 +898,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
383 | if (unlikely(err)) |
384 | goto tx_error; |
385 | |
386 | - tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); |
387 | + tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); |
388 | ttl = geneve->ttl; |
389 | if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) |
390 | ttl = 1; |
391 | @@ -934,7 +931,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
392 | struct geneve_dev *geneve = netdev_priv(dev); |
393 | struct geneve_sock *gs6 = geneve->sock6; |
394 | struct dst_entry *dst = NULL; |
395 | - const struct iphdr *iip; /* interior IP header */ |
396 | int err = -EINVAL; |
397 | struct flowi6 fl6; |
398 | __u8 prio, ttl; |
399 | @@ -959,8 +955,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
400 | sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
401 | skb_reset_mac_header(skb); |
402 | |
403 | - iip = ip_hdr(skb); |
404 | - |
405 | if (info) { |
406 | const struct ip_tunnel_key *key = &info->key; |
407 | u8 *opts = NULL; |
408 | @@ -981,7 +975,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
409 | if (unlikely(err)) |
410 | goto tx_error; |
411 | |
412 | - prio = ip_tunnel_ecn_encap(key->tos, iip, skb); |
413 | + prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); |
414 | ttl = key->ttl; |
415 | label = info->key.label; |
416 | } else { |
417 | @@ -991,7 +985,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
418 | goto tx_error; |
419 | |
420 | prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), |
421 | - iip, skb); |
422 | + ip_hdr(skb), skb); |
423 | ttl = geneve->ttl; |
424 | if (!ttl && ipv6_addr_is_multicast(&fl6.daddr)) |
425 | ttl = 1; |
426 | diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c |
427 | index c47ec0a04c8e..dd623f674487 100644 |
428 | --- a/drivers/net/usb/cdc_ether.c |
429 | +++ b/drivers/net/usb/cdc_ether.c |
430 | @@ -388,12 +388,6 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb) |
431 | case USB_CDC_NOTIFY_NETWORK_CONNECTION: |
432 | netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", |
433 | event->wValue ? "on" : "off"); |
434 | - |
435 | - /* Work-around for devices with broken off-notifications */ |
436 | - if (event->wValue && |
437 | - !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state)) |
438 | - usbnet_link_change(dev, 0, 0); |
439 | - |
440 | usbnet_link_change(dev, !!event->wValue, 0); |
441 | break; |
442 | case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ |
443 | @@ -466,6 +460,36 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) |
444 | return 1; |
445 | } |
446 | |
447 | +/* Ensure correct link state |
448 | + * |
449 | + * Some devices (ZTE MF823/831/910) export two carrier on notifications when |
450 | + * connected. This causes the link state to be incorrect. Work around this by |
451 | + * always setting the state to off, then on. |
452 | + */ |
453 | +void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) |
454 | +{ |
455 | + struct usb_cdc_notification *event; |
456 | + |
457 | + if (urb->actual_length < sizeof(*event)) |
458 | + return; |
459 | + |
460 | + event = urb->transfer_buffer; |
461 | + |
462 | + if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) { |
463 | + usbnet_cdc_status(dev, urb); |
464 | + return; |
465 | + } |
466 | + |
467 | + netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", |
468 | + event->wValue ? "on" : "off"); |
469 | + |
470 | + if (event->wValue && |
471 | + netif_carrier_ok(dev->net)) |
472 | + netif_carrier_off(dev->net); |
473 | + |
474 | + usbnet_link_change(dev, !!event->wValue, 0); |
475 | +} |
476 | + |
477 | static const struct driver_info cdc_info = { |
478 | .description = "CDC Ethernet Device", |
479 | .flags = FLAG_ETHER | FLAG_POINTTOPOINT, |
480 | @@ -481,7 +505,7 @@ static const struct driver_info zte_cdc_info = { |
481 | .flags = FLAG_ETHER | FLAG_POINTTOPOINT, |
482 | .bind = usbnet_cdc_zte_bind, |
483 | .unbind = usbnet_cdc_unbind, |
484 | - .status = usbnet_cdc_status, |
485 | + .status = usbnet_cdc_zte_status, |
486 | .set_rx_mode = usbnet_cdc_update_filter, |
487 | .manage_power = usbnet_manage_power, |
488 | .rx_fixup = usbnet_cdc_zte_rx_fixup, |
489 | diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c |
490 | index bf3fd34924bd..d8072092e5f0 100644 |
491 | --- a/drivers/net/virtio_net.c |
492 | +++ b/drivers/net/virtio_net.c |
493 | @@ -1468,6 +1468,11 @@ static void virtnet_free_queues(struct virtnet_info *vi) |
494 | netif_napi_del(&vi->rq[i].napi); |
495 | } |
496 | |
497 | + /* We called napi_hash_del() before netif_napi_del(), |
498 | + * we need to respect an RCU grace period before freeing vi->rq |
499 | + */ |
500 | + synchronize_net(); |
501 | + |
502 | kfree(vi->rq); |
503 | kfree(vi->sq); |
504 | } |
505 | diff --git a/include/linux/uio.h b/include/linux/uio.h |
506 | index 75b4aaf31a9d..944e7baf17eb 100644 |
507 | --- a/include/linux/uio.h |
508 | +++ b/include/linux/uio.h |
509 | @@ -102,12 +102,12 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages); |
510 | |
511 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); |
512 | |
513 | -static inline size_t iov_iter_count(struct iov_iter *i) |
514 | +static inline size_t iov_iter_count(const struct iov_iter *i) |
515 | { |
516 | return i->count; |
517 | } |
518 | |
519 | -static inline bool iter_is_iovec(struct iov_iter *i) |
520 | +static inline bool iter_is_iovec(const struct iov_iter *i) |
521 | { |
522 | return !(i->type & (ITER_BVEC | ITER_KVEC)); |
523 | } |
524 | diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h |
525 | index d15214d673b2..2a1abbf8da74 100644 |
526 | --- a/include/net/gro_cells.h |
527 | +++ b/include/net/gro_cells.h |
528 | @@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de |
529 | struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); |
530 | |
531 | __skb_queue_head_init(&cell->napi_skbs); |
532 | + |
533 | + set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); |
534 | + |
535 | netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); |
536 | napi_enable(&cell->napi); |
537 | } |
538 | diff --git a/net/core/flow.c b/net/core/flow.c |
539 | index 3937b1b68d5b..18e8893d4be5 100644 |
540 | --- a/net/core/flow.c |
541 | +++ b/net/core/flow.c |
542 | @@ -95,7 +95,6 @@ static void flow_cache_gc_task(struct work_struct *work) |
543 | list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { |
544 | flow_entry_kill(fce, xfrm); |
545 | atomic_dec(&xfrm->flow_cache_gc_count); |
546 | - WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0); |
547 | } |
548 | } |
549 | |
550 | @@ -236,9 +235,8 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, |
551 | if (fcp->hash_count > fc->high_watermark) |
552 | flow_cache_shrink(fc, fcp); |
553 | |
554 | - if (fcp->hash_count > 2 * fc->high_watermark || |
555 | - atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) { |
556 | - atomic_inc(&net->xfrm.flow_cache_genid); |
557 | + if (atomic_read(&net->xfrm.flow_cache_gc_count) > |
558 | + 2 * num_online_cpus() * fc->high_watermark) { |
559 | flo = ERR_PTR(-ENOBUFS); |
560 | goto ret_object; |
561 | } |
562 | diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c |
563 | index 2c2eb1b629b1..2e9a1c2818c7 100644 |
564 | --- a/net/core/net_namespace.c |
565 | +++ b/net/core/net_namespace.c |
566 | @@ -217,6 +217,8 @@ int peernet2id_alloc(struct net *net, struct net *peer) |
567 | bool alloc; |
568 | int id; |
569 | |
570 | + if (atomic_read(&net->count) == 0) |
571 | + return NETNSA_NSID_NOT_ASSIGNED; |
572 | spin_lock_irqsave(&net->nsid_lock, flags); |
573 | alloc = atomic_read(&peer->count) == 0 ? false : true; |
574 | id = __peernet2id_alloc(net, peer, &alloc); |
575 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
576 | index 189cc78c77eb..08c3702f7074 100644 |
577 | --- a/net/core/rtnetlink.c |
578 | +++ b/net/core/rtnetlink.c |
579 | @@ -1578,7 +1578,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) |
580 | head = &net->dev_index_head[h]; |
581 | hlist_for_each_entry(dev, head, index_hlist) { |
582 | if (link_dump_filtered(dev, master_idx, kind_ops)) |
583 | - continue; |
584 | + goto cont; |
585 | if (idx < s_idx) |
586 | goto cont; |
587 | err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, |
588 | @@ -2791,7 +2791,10 @@ nla_put_failure: |
589 | |
590 | static inline size_t rtnl_fdb_nlmsg_size(void) |
591 | { |
592 | - return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); |
593 | + return NLMSG_ALIGN(sizeof(struct ndmsg)) + |
594 | + nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ |
595 | + nla_total_size(sizeof(u16)) + /* NDA_VLAN */ |
596 | + 0; |
597 | } |
598 | |
599 | static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, |
600 | diff --git a/net/core/sock.c b/net/core/sock.c |
601 | index 10acaccca5c8..ba27920b6bbc 100644 |
602 | --- a/net/core/sock.c |
603 | +++ b/net/core/sock.c |
604 | @@ -715,7 +715,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, |
605 | val = min_t(u32, val, sysctl_wmem_max); |
606 | set_sndbuf: |
607 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
608 | - sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); |
609 | + sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
610 | /* Wake up sending tasks if we upped the value. */ |
611 | sk->sk_write_space(sk); |
612 | break; |
613 | @@ -751,7 +751,7 @@ set_rcvbuf: |
614 | * returning the value we actually used in getsockopt |
615 | * is the most desirable behavior. |
616 | */ |
617 | - sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); |
618 | + sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); |
619 | break; |
620 | |
621 | case SO_RCVBUFFORCE: |
622 | diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c |
623 | index b567c8725aea..edbe59d203ef 100644 |
624 | --- a/net/dccp/ipv4.c |
625 | +++ b/net/dccp/ipv4.c |
626 | @@ -700,6 +700,7 @@ int dccp_invalid_packet(struct sk_buff *skb) |
627 | { |
628 | const struct dccp_hdr *dh; |
629 | unsigned int cscov; |
630 | + u8 dccph_doff; |
631 | |
632 | if (skb->pkt_type != PACKET_HOST) |
633 | return 1; |
634 | @@ -721,18 +722,19 @@ int dccp_invalid_packet(struct sk_buff *skb) |
635 | /* |
636 | * If P.Data Offset is too small for packet type, drop packet and return |
637 | */ |
638 | - if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { |
639 | - DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); |
640 | + dccph_doff = dh->dccph_doff; |
641 | + if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { |
642 | + DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff); |
643 | return 1; |
644 | } |
645 | /* |
646 | * If P.Data Offset is too too large for packet, drop packet and return |
647 | */ |
648 | - if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { |
649 | - DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); |
650 | + if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) { |
651 | + DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff); |
652 | return 1; |
653 | } |
654 | - |
655 | + dh = dccp_hdr(skb); |
656 | /* |
657 | * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet |
658 | * has short sequence numbers), drop packet and return |
659 | diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c |
660 | index f30bad9678f0..3bdecd2f9b88 100644 |
661 | --- a/net/dsa/dsa2.c |
662 | +++ b/net/dsa/dsa2.c |
663 | @@ -28,8 +28,10 @@ static struct dsa_switch_tree *dsa_get_dst(u32 tree) |
664 | struct dsa_switch_tree *dst; |
665 | |
666 | list_for_each_entry(dst, &dsa_switch_trees, list) |
667 | - if (dst->tree == tree) |
668 | + if (dst->tree == tree) { |
669 | + kref_get(&dst->refcount); |
670 | return dst; |
671 | + } |
672 | return NULL; |
673 | } |
674 | |
675 | diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
676 | index eebbc0f2baa8..ed22af67c58a 100644 |
677 | --- a/net/ipv4/af_inet.c |
678 | +++ b/net/ipv4/af_inet.c |
679 | @@ -1237,7 +1237,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, |
680 | fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); |
681 | |
682 | /* fixed ID is invalid if DF bit is not set */ |
683 | - if (fixedid && !(iph->frag_off & htons(IP_DF))) |
684 | + if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF))) |
685 | goto out; |
686 | } |
687 | |
688 | diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c |
689 | index d95631d09248..20fb25e3027b 100644 |
690 | --- a/net/ipv4/esp4.c |
691 | +++ b/net/ipv4/esp4.c |
692 | @@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) |
693 | esph = (void *)skb_push(skb, 4); |
694 | *seqhi = esph->spi; |
695 | esph->spi = esph->seq_no; |
696 | - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); |
697 | + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; |
698 | aead_request_set_callback(req, 0, esp_input_done_esn, skb); |
699 | } |
700 | |
701 | diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c |
702 | index 1b25daf8c7f1..9301308528f8 100644 |
703 | --- a/net/ipv4/fib_frontend.c |
704 | +++ b/net/ipv4/fib_frontend.c |
705 | @@ -157,7 +157,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old, |
706 | |
707 | int fib_unmerge(struct net *net) |
708 | { |
709 | - struct fib_table *old, *new; |
710 | + struct fib_table *old, *new, *main_table; |
711 | |
712 | /* attempt to fetch local table if it has been allocated */ |
713 | old = fib_get_table(net, RT_TABLE_LOCAL); |
714 | @@ -168,11 +168,21 @@ int fib_unmerge(struct net *net) |
715 | if (!new) |
716 | return -ENOMEM; |
717 | |
718 | + /* table is already unmerged */ |
719 | + if (new == old) |
720 | + return 0; |
721 | + |
722 | /* replace merged table with clean table */ |
723 | - if (new != old) { |
724 | - fib_replace_table(net, old, new); |
725 | - fib_free_table(old); |
726 | - } |
727 | + fib_replace_table(net, old, new); |
728 | + fib_free_table(old); |
729 | + |
730 | + /* attempt to fetch main table if it has been allocated */ |
731 | + main_table = fib_get_table(net, RT_TABLE_MAIN); |
732 | + if (!main_table) |
733 | + return 0; |
734 | + |
735 | + /* flush local entries from main table */ |
736 | + fib_table_flush_external(main_table); |
737 | |
738 | return 0; |
739 | } |
740 | diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c |
741 | index 7ef703102dca..84fd727274cf 100644 |
742 | --- a/net/ipv4/fib_trie.c |
743 | +++ b/net/ipv4/fib_trie.c |
744 | @@ -681,6 +681,13 @@ static unsigned char update_suffix(struct key_vector *tn) |
745 | { |
746 | unsigned char slen = tn->pos; |
747 | unsigned long stride, i; |
748 | + unsigned char slen_max; |
749 | + |
750 | + /* only vector 0 can have a suffix length greater than or equal to |
751 | + * tn->pos + tn->bits, the second highest node will have a suffix |
752 | + * length at most of tn->pos + tn->bits - 1 |
753 | + */ |
754 | + slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen); |
755 | |
756 | /* search though the list of children looking for nodes that might |
757 | * have a suffix greater than the one we currently have. This is |
758 | @@ -698,12 +705,8 @@ static unsigned char update_suffix(struct key_vector *tn) |
759 | slen = n->slen; |
760 | i &= ~(stride - 1); |
761 | |
762 | - /* if slen covers all but the last bit we can stop here |
763 | - * there will be nothing longer than that since only node |
764 | - * 0 and 1 << (bits - 1) could have that as their suffix |
765 | - * length. |
766 | - */ |
767 | - if ((slen + 1) >= (tn->pos + tn->bits)) |
768 | + /* stop searching if we have hit the maximum possible value */ |
769 | + if (slen >= slen_max) |
770 | break; |
771 | } |
772 | |
773 | @@ -875,39 +878,27 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn) |
774 | return collapse(t, tn); |
775 | |
776 | /* update parent in case halve failed */ |
777 | - tp = node_parent(tn); |
778 | - |
779 | - /* Return if at least one deflate was run */ |
780 | - if (max_work != MAX_WORK) |
781 | - return tp; |
782 | - |
783 | - /* push the suffix length to the parent node */ |
784 | - if (tn->slen > tn->pos) { |
785 | - unsigned char slen = update_suffix(tn); |
786 | - |
787 | - if (slen > tp->slen) |
788 | - tp->slen = slen; |
789 | - } |
790 | - |
791 | - return tp; |
792 | + return node_parent(tn); |
793 | } |
794 | |
795 | -static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l) |
796 | +static void node_pull_suffix(struct key_vector *tn, unsigned char slen) |
797 | { |
798 | - while ((tp->slen > tp->pos) && (tp->slen > l->slen)) { |
799 | - if (update_suffix(tp) > l->slen) |
800 | + unsigned char node_slen = tn->slen; |
801 | + |
802 | + while ((node_slen > tn->pos) && (node_slen > slen)) { |
803 | + slen = update_suffix(tn); |
804 | + if (node_slen == slen) |
805 | break; |
806 | - tp = node_parent(tp); |
807 | + |
808 | + tn = node_parent(tn); |
809 | + node_slen = tn->slen; |
810 | } |
811 | } |
812 | |
813 | -static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l) |
814 | +static void node_push_suffix(struct key_vector *tn, unsigned char slen) |
815 | { |
816 | - /* if this is a new leaf then tn will be NULL and we can sort |
817 | - * out parent suffix lengths as a part of trie_rebalance |
818 | - */ |
819 | - while (tn->slen < l->slen) { |
820 | - tn->slen = l->slen; |
821 | + while (tn->slen < slen) { |
822 | + tn->slen = slen; |
823 | tn = node_parent(tn); |
824 | } |
825 | } |
826 | @@ -1028,6 +1019,7 @@ static int fib_insert_node(struct trie *t, struct key_vector *tp, |
827 | } |
828 | |
829 | /* Case 3: n is NULL, and will just insert a new leaf */ |
830 | + node_push_suffix(tp, new->fa_slen); |
831 | NODE_INIT_PARENT(l, tp); |
832 | put_child_root(tp, key, l); |
833 | trie_rebalance(t, tp); |
834 | @@ -1069,7 +1061,7 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, |
835 | /* if we added to the tail node then we need to update slen */ |
836 | if (l->slen < new->fa_slen) { |
837 | l->slen = new->fa_slen; |
838 | - leaf_push_suffix(tp, l); |
839 | + node_push_suffix(tp, new->fa_slen); |
840 | } |
841 | |
842 | return 0; |
843 | @@ -1470,6 +1462,8 @@ static void fib_remove_alias(struct trie *t, struct key_vector *tp, |
844 | * out parent suffix lengths as a part of trie_rebalance |
845 | */ |
846 | if (hlist_empty(&l->leaf)) { |
847 | + if (tp->slen == l->slen) |
848 | + node_pull_suffix(tp, tp->pos); |
849 | put_child_root(tp, l->key, NULL); |
850 | node_free(l); |
851 | trie_rebalance(t, tp); |
852 | @@ -1482,7 +1476,7 @@ static void fib_remove_alias(struct trie *t, struct key_vector *tp, |
853 | |
854 | /* update the trie with the latest suffix length */ |
855 | l->slen = fa->fa_slen; |
856 | - leaf_pull_suffix(tp, l); |
857 | + node_pull_suffix(tp, fa->fa_slen); |
858 | } |
859 | |
860 | /* Caller must hold RTNL. */ |
861 | @@ -1713,8 +1707,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) |
862 | local_l = fib_find_node(lt, &local_tp, l->key); |
863 | |
864 | if (fib_insert_alias(lt, local_tp, local_l, new_fa, |
865 | - NULL, l->key)) |
866 | + NULL, l->key)) { |
867 | + kmem_cache_free(fn_alias_kmem, new_fa); |
868 | goto out; |
869 | + } |
870 | } |
871 | |
872 | /* stop loop if key wrapped back to 0 */ |
873 | @@ -1751,6 +1747,10 @@ void fib_table_flush_external(struct fib_table *tb) |
874 | if (IS_TRIE(pn)) |
875 | break; |
876 | |
877 | + /* update the suffix to address pulled leaves */ |
878 | + if (pn->slen > pn->pos) |
879 | + update_suffix(pn); |
880 | + |
881 | /* resize completed node */ |
882 | pn = resize(t, pn); |
883 | cindex = get_index(pkey, pn); |
884 | @@ -1826,6 +1826,10 @@ int fib_table_flush(struct fib_table *tb) |
885 | if (IS_TRIE(pn)) |
886 | break; |
887 | |
888 | + /* update the suffix to address pulled leaves */ |
889 | + if (pn->slen > pn->pos) |
890 | + update_suffix(pn); |
891 | + |
892 | /* resize completed node */ |
893 | pn = resize(t, pn); |
894 | cindex = get_index(pkey, pn); |
895 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
896 | index 307daed9a4b9..f4790c30c543 100644 |
897 | --- a/net/ipv4/ip_output.c |
898 | +++ b/net/ipv4/ip_output.c |
899 | @@ -98,6 +98,9 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
900 | |
901 | iph->tot_len = htons(skb->len); |
902 | ip_send_check(iph); |
903 | + |
904 | + skb->protocol = htons(ETH_P_IP); |
905 | + |
906 | return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, |
907 | net, sk, skb, NULL, skb_dst(skb)->dev, |
908 | dst_output); |
909 | diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c |
910 | index 66ddcb60519a..dcdd5aed7eb1 100644 |
911 | --- a/net/ipv4/ping.c |
912 | +++ b/net/ipv4/ping.c |
913 | @@ -662,6 +662,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, |
914 | if (len > 0xFFFF) |
915 | return -EMSGSIZE; |
916 | |
917 | + /* Must have at least a full ICMP header. */ |
918 | + if (len < icmph_len) |
919 | + return -EINVAL; |
920 | + |
921 | /* |
922 | * Check the flags. |
923 | */ |
924 | diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c |
925 | index c0d71e7d663e..a2d54f5b0fe0 100644 |
926 | --- a/net/ipv4/udp.c |
927 | +++ b/net/ipv4/udp.c |
928 | @@ -1451,7 +1451,7 @@ static void udp_v4_rehash(struct sock *sk) |
929 | udp_lib_rehash(sk, new_hash); |
930 | } |
931 | |
932 | -static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
933 | +int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
934 | { |
935 | int rc; |
936 | |
937 | diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h |
938 | index 7e0fe4bdd967..feb50a16398d 100644 |
939 | --- a/net/ipv4/udp_impl.h |
940 | +++ b/net/ipv4/udp_impl.h |
941 | @@ -25,7 +25,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, |
942 | int flags, int *addr_len); |
943 | int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, |
944 | int flags); |
945 | -int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
946 | +int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
947 | void udp_destroy_sock(struct sock *sk); |
948 | |
949 | #ifdef CONFIG_PROC_FS |
950 | diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c |
951 | index 2eea073e27ef..705d9fbf0bcf 100644 |
952 | --- a/net/ipv4/udplite.c |
953 | +++ b/net/ipv4/udplite.c |
954 | @@ -50,7 +50,7 @@ struct proto udplite_prot = { |
955 | .sendmsg = udp_sendmsg, |
956 | .recvmsg = udp_recvmsg, |
957 | .sendpage = udp_sendpage, |
958 | - .backlog_rcv = udp_queue_rcv_skb, |
959 | + .backlog_rcv = __udp_queue_rcv_skb, |
960 | .hash = udp_lib_hash, |
961 | .unhash = udp_lib_unhash, |
962 | .get_port = udp_v4_get_port, |
963 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
964 | index f5432d65e6bf..8f2e36f8afd9 100644 |
965 | --- a/net/ipv6/addrconf.c |
966 | +++ b/net/ipv6/addrconf.c |
967 | @@ -163,7 +163,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, |
968 | |
969 | static void addrconf_dad_start(struct inet6_ifaddr *ifp); |
970 | static void addrconf_dad_work(struct work_struct *w); |
971 | -static void addrconf_dad_completed(struct inet6_ifaddr *ifp); |
972 | +static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id); |
973 | static void addrconf_dad_run(struct inet6_dev *idev); |
974 | static void addrconf_rs_timer(unsigned long data); |
975 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
976 | @@ -2893,6 +2893,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, |
977 | spin_lock_bh(&ifp->lock); |
978 | ifp->flags &= ~IFA_F_TENTATIVE; |
979 | spin_unlock_bh(&ifp->lock); |
980 | + rt_genid_bump_ipv6(dev_net(idev->dev)); |
981 | ipv6_ifa_notify(RTM_NEWADDR, ifp); |
982 | in6_ifa_put(ifp); |
983 | } |
984 | @@ -3736,7 +3737,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) |
985 | { |
986 | struct inet6_dev *idev = ifp->idev; |
987 | struct net_device *dev = idev->dev; |
988 | - bool notify = false; |
989 | + bool bump_id, notify = false; |
990 | |
991 | addrconf_join_solict(dev, &ifp->addr); |
992 | |
993 | @@ -3751,11 +3752,12 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) |
994 | idev->cnf.accept_dad < 1 || |
995 | !(ifp->flags&IFA_F_TENTATIVE) || |
996 | ifp->flags & IFA_F_NODAD) { |
997 | + bump_id = ifp->flags & IFA_F_TENTATIVE; |
998 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
999 | spin_unlock(&ifp->lock); |
1000 | read_unlock_bh(&idev->lock); |
1001 | |
1002 | - addrconf_dad_completed(ifp); |
1003 | + addrconf_dad_completed(ifp, bump_id); |
1004 | return; |
1005 | } |
1006 | |
1007 | @@ -3815,8 +3817,8 @@ static void addrconf_dad_work(struct work_struct *w) |
1008 | struct inet6_ifaddr, |
1009 | dad_work); |
1010 | struct inet6_dev *idev = ifp->idev; |
1011 | + bool bump_id, disable_ipv6 = false; |
1012 | struct in6_addr mcaddr; |
1013 | - bool disable_ipv6 = false; |
1014 | |
1015 | enum { |
1016 | DAD_PROCESS, |
1017 | @@ -3886,11 +3888,12 @@ static void addrconf_dad_work(struct work_struct *w) |
1018 | * DAD was successful |
1019 | */ |
1020 | |
1021 | + bump_id = ifp->flags & IFA_F_TENTATIVE; |
1022 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
1023 | spin_unlock(&ifp->lock); |
1024 | write_unlock_bh(&idev->lock); |
1025 | |
1026 | - addrconf_dad_completed(ifp); |
1027 | + addrconf_dad_completed(ifp, bump_id); |
1028 | |
1029 | goto out; |
1030 | } |
1031 | @@ -3927,7 +3930,7 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp) |
1032 | return true; |
1033 | } |
1034 | |
1035 | -static void addrconf_dad_completed(struct inet6_ifaddr *ifp) |
1036 | +static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) |
1037 | { |
1038 | struct net_device *dev = ifp->idev->dev; |
1039 | struct in6_addr lladdr; |
1040 | @@ -3978,6 +3981,9 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) |
1041 | spin_unlock(&ifp->lock); |
1042 | write_unlock_bh(&ifp->idev->lock); |
1043 | } |
1044 | + |
1045 | + if (bump_id) |
1046 | + rt_genid_bump_ipv6(dev_net(dev)); |
1047 | } |
1048 | |
1049 | static void addrconf_dad_run(struct inet6_dev *idev) |
1050 | diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c |
1051 | index 060a60b2f8a6..111ba55fd512 100644 |
1052 | --- a/net/ipv6/esp6.c |
1053 | +++ b/net/ipv6/esp6.c |
1054 | @@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) |
1055 | esph = (void *)skb_push(skb, 4); |
1056 | *seqhi = esph->spi; |
1057 | esph->spi = esph->seq_no; |
1058 | - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); |
1059 | + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; |
1060 | aead_request_set_callback(req, 0, esp_input_done_esn, skb); |
1061 | } |
1062 | |
1063 | diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c |
1064 | index a09418bda1f8..93294cf4525c 100644 |
1065 | --- a/net/ipv6/ip6_offload.c |
1066 | +++ b/net/ipv6/ip6_offload.c |
1067 | @@ -98,7 +98,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, |
1068 | segs = ops->callbacks.gso_segment(skb, features); |
1069 | } |
1070 | |
1071 | - if (IS_ERR(segs)) |
1072 | + if (IS_ERR_OR_NULL(segs)) |
1073 | goto out; |
1074 | |
1075 | for (skb = segs; skb; skb = skb->next) { |
1076 | diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
1077 | index 41489f39c456..da4e7b377812 100644 |
1078 | --- a/net/ipv6/ip6_tunnel.c |
1079 | +++ b/net/ipv6/ip6_tunnel.c |
1080 | @@ -1014,6 +1014,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, |
1081 | int mtu; |
1082 | unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; |
1083 | unsigned int max_headroom = psh_hlen; |
1084 | + bool use_cache = false; |
1085 | int err = -1; |
1086 | |
1087 | /* NBMA tunnel */ |
1088 | @@ -1038,7 +1039,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, |
1089 | |
1090 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); |
1091 | neigh_release(neigh); |
1092 | - } else if (!fl6->flowi6_mark) |
1093 | + } else if (!(t->parms.flags & |
1094 | + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { |
1095 | + /* enable the cache only only if the routing decision does |
1096 | + * not depend on the current inner header value |
1097 | + */ |
1098 | + use_cache = true; |
1099 | + } |
1100 | + |
1101 | + if (use_cache) |
1102 | dst = dst_cache_get(&t->dst_cache); |
1103 | |
1104 | if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) |
1105 | @@ -1113,7 +1122,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, |
1106 | skb = new_skb; |
1107 | } |
1108 | |
1109 | - if (!fl6->flowi6_mark && ndst) |
1110 | + if (use_cache && ndst) |
1111 | dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); |
1112 | skb_dst_set(skb, dst); |
1113 | |
1114 | @@ -1134,7 +1143,6 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, |
1115 | if (err) |
1116 | return err; |
1117 | |
1118 | - skb->protocol = htons(ETH_P_IPV6); |
1119 | skb_push(skb, sizeof(struct ipv6hdr)); |
1120 | skb_reset_network_header(skb); |
1121 | ipv6h = ipv6_hdr(skb); |
1122 | diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c |
1123 | index 462f2a76b5c2..1d184322a7b1 100644 |
1124 | --- a/net/ipv6/output_core.c |
1125 | +++ b/net/ipv6/output_core.c |
1126 | @@ -148,6 +148,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) |
1127 | ipv6_hdr(skb)->payload_len = htons(len); |
1128 | IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); |
1129 | |
1130 | + skb->protocol = htons(ETH_P_IPV6); |
1131 | + |
1132 | return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, |
1133 | net, sk, skb, NULL, skb_dst(skb)->dev, |
1134 | dst_output); |
1135 | diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c |
1136 | index c2a8656c22eb..fa39ab8ec1fc 100644 |
1137 | --- a/net/ipv6/udp.c |
1138 | +++ b/net/ipv6/udp.c |
1139 | @@ -514,7 +514,7 @@ out: |
1140 | return; |
1141 | } |
1142 | |
1143 | -static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1144 | +int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1145 | { |
1146 | int rc; |
1147 | |
1148 | diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h |
1149 | index 0682c031ccdc..3c1dbc9f74cf 100644 |
1150 | --- a/net/ipv6/udp_impl.h |
1151 | +++ b/net/ipv6/udp_impl.h |
1152 | @@ -26,7 +26,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, |
1153 | int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); |
1154 | int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, |
1155 | int flags, int *addr_len); |
1156 | -int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
1157 | +int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
1158 | void udpv6_destroy_sock(struct sock *sk); |
1159 | |
1160 | void udp_v6_clear_sk(struct sock *sk, int size); |
1161 | diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c |
1162 | index fd6ef414899b..af2895c77ed6 100644 |
1163 | --- a/net/ipv6/udplite.c |
1164 | +++ b/net/ipv6/udplite.c |
1165 | @@ -45,7 +45,7 @@ struct proto udplitev6_prot = { |
1166 | .getsockopt = udpv6_getsockopt, |
1167 | .sendmsg = udpv6_sendmsg, |
1168 | .recvmsg = udpv6_recvmsg, |
1169 | - .backlog_rcv = udpv6_queue_rcv_skb, |
1170 | + .backlog_rcv = __udpv6_queue_rcv_skb, |
1171 | .hash = udp_lib_hash, |
1172 | .unhash = udp_lib_unhash, |
1173 | .get_port = udp_v6_get_port, |
1174 | diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c |
1175 | index 42de4ccd159f..d0e906d39642 100644 |
1176 | --- a/net/l2tp/l2tp_ip.c |
1177 | +++ b/net/l2tp/l2tp_ip.c |
1178 | @@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1179 | int ret; |
1180 | int chk_addr_ret; |
1181 | |
1182 | - if (!sock_flag(sk, SOCK_ZAPPED)) |
1183 | - return -EINVAL; |
1184 | if (addr_len < sizeof(struct sockaddr_l2tpip)) |
1185 | return -EINVAL; |
1186 | if (addr->l2tp_family != AF_INET) |
1187 | @@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1188 | read_unlock_bh(&l2tp_ip_lock); |
1189 | |
1190 | lock_sock(sk); |
1191 | + if (!sock_flag(sk, SOCK_ZAPPED)) |
1192 | + goto out; |
1193 | + |
1194 | if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) |
1195 | goto out; |
1196 | |
1197 | diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c |
1198 | index ea2ae6664cc8..b9c6a412b806 100644 |
1199 | --- a/net/l2tp/l2tp_ip6.c |
1200 | +++ b/net/l2tp/l2tp_ip6.c |
1201 | @@ -269,8 +269,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1202 | int addr_type; |
1203 | int err; |
1204 | |
1205 | - if (!sock_flag(sk, SOCK_ZAPPED)) |
1206 | - return -EINVAL; |
1207 | if (addr->l2tp_family != AF_INET6) |
1208 | return -EINVAL; |
1209 | if (addr_len < sizeof(*addr)) |
1210 | @@ -296,6 +294,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
1211 | lock_sock(sk); |
1212 | |
1213 | err = -EINVAL; |
1214 | + if (!sock_flag(sk, SOCK_ZAPPED)) |
1215 | + goto out_unlock; |
1216 | + |
1217 | if (sk->sk_state != TCP_CLOSE) |
1218 | goto out_unlock; |
1219 | |
1220 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
1221 | index 62bea4591054..246f29d365c0 100644 |
1222 | --- a/net/netlink/af_netlink.c |
1223 | +++ b/net/netlink/af_netlink.c |
1224 | @@ -329,7 +329,6 @@ static void netlink_sock_destruct(struct sock *sk) |
1225 | if (nlk->cb_running) { |
1226 | if (nlk->cb.done) |
1227 | nlk->cb.done(&nlk->cb); |
1228 | - |
1229 | module_put(nlk->cb.module); |
1230 | kfree_skb(nlk->cb.skb); |
1231 | } |
1232 | @@ -346,6 +345,14 @@ static void netlink_sock_destruct(struct sock *sk) |
1233 | WARN_ON(nlk_sk(sk)->groups); |
1234 | } |
1235 | |
1236 | +static void netlink_sock_destruct_work(struct work_struct *work) |
1237 | +{ |
1238 | + struct netlink_sock *nlk = container_of(work, struct netlink_sock, |
1239 | + work); |
1240 | + |
1241 | + sk_free(&nlk->sk); |
1242 | +} |
1243 | + |
1244 | /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on |
1245 | * SMP. Look, when several writers sleep and reader wakes them up, all but one |
1246 | * immediately hit write lock and grab all the cpus. Exclusive sleep solves |
1247 | @@ -648,8 +655,18 @@ out_module: |
1248 | static void deferred_put_nlk_sk(struct rcu_head *head) |
1249 | { |
1250 | struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu); |
1251 | + struct sock *sk = &nlk->sk; |
1252 | + |
1253 | + if (!atomic_dec_and_test(&sk->sk_refcnt)) |
1254 | + return; |
1255 | + |
1256 | + if (nlk->cb_running && nlk->cb.done) { |
1257 | + INIT_WORK(&nlk->work, netlink_sock_destruct_work); |
1258 | + schedule_work(&nlk->work); |
1259 | + return; |
1260 | + } |
1261 | |
1262 | - sock_put(&nlk->sk); |
1263 | + sk_free(sk); |
1264 | } |
1265 | |
1266 | static int netlink_release(struct socket *sock) |
1267 | diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h |
1268 | index 3cfd6cc60504..4fdb38318977 100644 |
1269 | --- a/net/netlink/af_netlink.h |
1270 | +++ b/net/netlink/af_netlink.h |
1271 | @@ -3,6 +3,7 @@ |
1272 | |
1273 | #include <linux/rhashtable.h> |
1274 | #include <linux/atomic.h> |
1275 | +#include <linux/workqueue.h> |
1276 | #include <net/sock.h> |
1277 | |
1278 | #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) |
1279 | @@ -33,6 +34,7 @@ struct netlink_sock { |
1280 | |
1281 | struct rhash_head node; |
1282 | struct rcu_head rcu; |
1283 | + struct work_struct work; |
1284 | }; |
1285 | |
1286 | static inline struct netlink_sock *nlk_sk(struct sock *sk) |
1287 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
1288 | index d2238b204691..dd2332390c45 100644 |
1289 | --- a/net/packet/af_packet.c |
1290 | +++ b/net/packet/af_packet.c |
1291 | @@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
1292 | |
1293 | if (optlen != sizeof(val)) |
1294 | return -EINVAL; |
1295 | - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
1296 | - return -EBUSY; |
1297 | if (copy_from_user(&val, optval, sizeof(val))) |
1298 | return -EFAULT; |
1299 | switch (val) { |
1300 | case TPACKET_V1: |
1301 | case TPACKET_V2: |
1302 | case TPACKET_V3: |
1303 | - po->tp_version = val; |
1304 | - return 0; |
1305 | + break; |
1306 | default: |
1307 | return -EINVAL; |
1308 | } |
1309 | + lock_sock(sk); |
1310 | + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
1311 | + ret = -EBUSY; |
1312 | + } else { |
1313 | + po->tp_version = val; |
1314 | + ret = 0; |
1315 | + } |
1316 | + release_sock(sk); |
1317 | + return ret; |
1318 | } |
1319 | case PACKET_RESERVE: |
1320 | { |
1321 | @@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
1322 | /* Added to avoid minimal code churn */ |
1323 | struct tpacket_req *req = &req_u->req; |
1324 | |
1325 | + lock_sock(sk); |
1326 | /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ |
1327 | if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { |
1328 | net_warn_ratelimited("Tx-ring is not supported.\n"); |
1329 | @@ -4245,7 +4252,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
1330 | goto out; |
1331 | } |
1332 | |
1333 | - lock_sock(sk); |
1334 | |
1335 | /* Detach socket from network */ |
1336 | spin_lock(&po->bind_lock); |
1337 | @@ -4294,11 +4300,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
1338 | if (!tx_ring) |
1339 | prb_shutdown_retire_blk_timer(po, rb_queue); |
1340 | } |
1341 | - release_sock(sk); |
1342 | |
1343 | if (pg_vec) |
1344 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
1345 | out: |
1346 | + release_sock(sk); |
1347 | return err; |
1348 | } |
1349 | |
1350 | diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c |
1351 | index b54d56d4959b..cf9b2fe8eac6 100644 |
1352 | --- a/net/sched/act_pedit.c |
1353 | +++ b/net/sched/act_pedit.c |
1354 | @@ -108,6 +108,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind) |
1355 | kfree(keys); |
1356 | } |
1357 | |
1358 | +static bool offset_valid(struct sk_buff *skb, int offset) |
1359 | +{ |
1360 | + if (offset > 0 && offset > skb->len) |
1361 | + return false; |
1362 | + |
1363 | + if (offset < 0 && -offset > skb_headroom(skb)) |
1364 | + return false; |
1365 | + |
1366 | + return true; |
1367 | +} |
1368 | + |
1369 | static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, |
1370 | struct tcf_result *res) |
1371 | { |
1372 | @@ -134,6 +145,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, |
1373 | if (tkey->offmask) { |
1374 | char *d, _d; |
1375 | |
1376 | + if (!offset_valid(skb, off + tkey->at)) { |
1377 | + pr_info("tc filter pedit 'at' offset %d out of bounds\n", |
1378 | + off + tkey->at); |
1379 | + goto bad; |
1380 | + } |
1381 | d = skb_header_pointer(skb, off + tkey->at, 1, |
1382 | &_d); |
1383 | if (!d) |
1384 | @@ -146,10 +162,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, |
1385 | " offset must be on 32 bit boundaries\n"); |
1386 | goto bad; |
1387 | } |
1388 | - if (offset > 0 && offset > skb->len) { |
1389 | - pr_info("tc filter pedit" |
1390 | - " offset %d can't exceed pkt length %d\n", |
1391 | - offset, skb->len); |
1392 | + |
1393 | + if (!offset_valid(skb, off + offset)) { |
1394 | + pr_info("tc filter pedit offset %d out of bounds\n", |
1395 | + offset); |
1396 | goto bad; |
1397 | } |
1398 | |
1399 | diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c |
1400 | index 0b8c3ace671f..1bf1f4517db6 100644 |
1401 | --- a/net/sched/cls_basic.c |
1402 | +++ b/net/sched/cls_basic.c |
1403 | @@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle) |
1404 | struct basic_head *head = rtnl_dereference(tp->root); |
1405 | struct basic_filter *f; |
1406 | |
1407 | - if (head == NULL) |
1408 | - return 0UL; |
1409 | - |
1410 | list_for_each_entry(f, &head->flist, link) { |
1411 | if (f->handle == handle) { |
1412 | l = (unsigned long) f; |
1413 | @@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force) |
1414 | tcf_unbind_filter(tp, &f->res); |
1415 | call_rcu(&f->rcu, basic_delete_filter); |
1416 | } |
1417 | - RCU_INIT_POINTER(tp->root, NULL); |
1418 | kfree_rcu(head, rcu); |
1419 | return true; |
1420 | } |
1421 | diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c |
1422 | index c3002c2c68bb..dbec45848fe1 100644 |
1423 | --- a/net/sched/cls_bpf.c |
1424 | +++ b/net/sched/cls_bpf.c |
1425 | @@ -200,7 +200,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force) |
1426 | call_rcu(&prog->rcu, __cls_bpf_delete_prog); |
1427 | } |
1428 | |
1429 | - RCU_INIT_POINTER(tp->root, NULL); |
1430 | kfree_rcu(head, rcu); |
1431 | return true; |
1432 | } |
1433 | @@ -211,9 +210,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) |
1434 | struct cls_bpf_prog *prog; |
1435 | unsigned long ret = 0UL; |
1436 | |
1437 | - if (head == NULL) |
1438 | - return 0UL; |
1439 | - |
1440 | list_for_each_entry(prog, &head->plist, link) { |
1441 | if (prog->handle == handle) { |
1442 | ret = (unsigned long) prog; |
1443 | diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c |
1444 | index 4c85bd3a750c..c104c2019feb 100644 |
1445 | --- a/net/sched/cls_cgroup.c |
1446 | +++ b/net/sched/cls_cgroup.c |
1447 | @@ -130,11 +130,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force) |
1448 | |
1449 | if (!force) |
1450 | return false; |
1451 | - |
1452 | - if (head) { |
1453 | - RCU_INIT_POINTER(tp->root, NULL); |
1454 | + /* Head can still be NULL due to cls_cgroup_init(). */ |
1455 | + if (head) |
1456 | call_rcu(&head->rcu, cls_cgroup_destroy_rcu); |
1457 | - } |
1458 | + |
1459 | return true; |
1460 | } |
1461 | |
1462 | diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c |
1463 | index fbfec6a18839..d7ba2b4ff0f3 100644 |
1464 | --- a/net/sched/cls_flow.c |
1465 | +++ b/net/sched/cls_flow.c |
1466 | @@ -583,7 +583,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force) |
1467 | list_del_rcu(&f->list); |
1468 | call_rcu(&f->rcu, flow_destroy_filter); |
1469 | } |
1470 | - RCU_INIT_POINTER(tp->root, NULL); |
1471 | kfree_rcu(head, rcu); |
1472 | return true; |
1473 | } |
1474 | diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c |
1475 | index 5060801a2f6d..a411571c4d4a 100644 |
1476 | --- a/net/sched/cls_flower.c |
1477 | +++ b/net/sched/cls_flower.c |
1478 | @@ -13,6 +13,7 @@ |
1479 | #include <linux/init.h> |
1480 | #include <linux/module.h> |
1481 | #include <linux/rhashtable.h> |
1482 | +#include <linux/workqueue.h> |
1483 | |
1484 | #include <linux/if_ether.h> |
1485 | #include <linux/in6.h> |
1486 | @@ -55,7 +56,10 @@ struct cls_fl_head { |
1487 | bool mask_assigned; |
1488 | struct list_head filters; |
1489 | struct rhashtable_params ht_params; |
1490 | - struct rcu_head rcu; |
1491 | + union { |
1492 | + struct work_struct work; |
1493 | + struct rcu_head rcu; |
1494 | + }; |
1495 | }; |
1496 | |
1497 | struct cls_fl_filter { |
1498 | @@ -239,6 +243,24 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) |
1499 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); |
1500 | } |
1501 | |
1502 | +static void fl_destroy_sleepable(struct work_struct *work) |
1503 | +{ |
1504 | + struct cls_fl_head *head = container_of(work, struct cls_fl_head, |
1505 | + work); |
1506 | + if (head->mask_assigned) |
1507 | + rhashtable_destroy(&head->ht); |
1508 | + kfree(head); |
1509 | + module_put(THIS_MODULE); |
1510 | +} |
1511 | + |
1512 | +static void fl_destroy_rcu(struct rcu_head *rcu) |
1513 | +{ |
1514 | + struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu); |
1515 | + |
1516 | + INIT_WORK(&head->work, fl_destroy_sleepable); |
1517 | + schedule_work(&head->work); |
1518 | +} |
1519 | + |
1520 | static bool fl_destroy(struct tcf_proto *tp, bool force) |
1521 | { |
1522 | struct cls_fl_head *head = rtnl_dereference(tp->root); |
1523 | @@ -252,10 +274,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force) |
1524 | list_del_rcu(&f->list); |
1525 | call_rcu(&f->rcu, fl_destroy_filter); |
1526 | } |
1527 | - RCU_INIT_POINTER(tp->root, NULL); |
1528 | - if (head->mask_assigned) |
1529 | - rhashtable_destroy(&head->ht); |
1530 | - kfree_rcu(head, rcu); |
1531 | + |
1532 | + __module_get(THIS_MODULE); |
1533 | + call_rcu(&head->rcu, fl_destroy_rcu); |
1534 | return true; |
1535 | } |
1536 | |
1537 | diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c |
1538 | index 25927b6c4436..f935429bd5ef 100644 |
1539 | --- a/net/sched/cls_matchall.c |
1540 | +++ b/net/sched/cls_matchall.c |
1541 | @@ -114,7 +114,6 @@ static bool mall_destroy(struct tcf_proto *tp, bool force) |
1542 | |
1543 | call_rcu(&f->rcu, mall_destroy_filter); |
1544 | } |
1545 | - RCU_INIT_POINTER(tp->root, NULL); |
1546 | kfree_rcu(head, rcu); |
1547 | return true; |
1548 | } |
1549 | diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h |
1550 | index f9c9fc075fe6..9992dfac6938 100644 |
1551 | --- a/net/sched/cls_rsvp.h |
1552 | +++ b/net/sched/cls_rsvp.h |
1553 | @@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
1554 | return -1; |
1555 | nhptr = ip_hdr(skb); |
1556 | #endif |
1557 | - |
1558 | + if (unlikely(!head)) |
1559 | + return -1; |
1560 | restart: |
1561 | |
1562 | #if RSVP_DST_LEN == 4 |
1563 | diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c |
1564 | index 944c8ff45055..403746b20263 100644 |
1565 | --- a/net/sched/cls_tcindex.c |
1566 | +++ b/net/sched/cls_tcindex.c |
1567 | @@ -503,7 +503,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force) |
1568 | walker.fn = tcindex_destroy_element; |
1569 | tcindex_walk(tp, &walker); |
1570 | |
1571 | - RCU_INIT_POINTER(tp->root, NULL); |
1572 | call_rcu(&p->rcu, __tcindex_destroy); |
1573 | return true; |
1574 | } |
1575 | diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c |
1576 | index 65b1bbf133bd..616769983bcd 100644 |
1577 | --- a/net/tipc/bearer.c |
1578 | +++ b/net/tipc/bearer.c |
1579 | @@ -402,6 +402,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, |
1580 | dev = dev_get_by_name(net, driver_name); |
1581 | if (!dev) |
1582 | return -ENODEV; |
1583 | + if (tipc_mtu_bad(dev, 0)) { |
1584 | + dev_put(dev); |
1585 | + return -EINVAL; |
1586 | + } |
1587 | |
1588 | /* Associate TIPC bearer with L2 bearer */ |
1589 | rcu_assign_pointer(b->media_ptr, dev); |
1590 | @@ -606,8 +610,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, |
1591 | if (!b) |
1592 | return NOTIFY_DONE; |
1593 | |
1594 | - b->mtu = dev->mtu; |
1595 | - |
1596 | switch (evt) { |
1597 | case NETDEV_CHANGE: |
1598 | if (netif_carrier_ok(dev)) |
1599 | @@ -621,6 +623,11 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, |
1600 | tipc_reset_bearer(net, b); |
1601 | break; |
1602 | case NETDEV_CHANGEMTU: |
1603 | + if (tipc_mtu_bad(dev, 0)) { |
1604 | + bearer_disable(net, b); |
1605 | + break; |
1606 | + } |
1607 | + b->mtu = dev->mtu; |
1608 | tipc_reset_bearer(net, b); |
1609 | break; |
1610 | case NETDEV_CHANGEADDR: |
1611 | diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h |
1612 | index 43757f1f9cb3..d93f1f1a21e6 100644 |
1613 | --- a/net/tipc/bearer.h |
1614 | +++ b/net/tipc/bearer.h |
1615 | @@ -39,6 +39,7 @@ |
1616 | |
1617 | #include "netlink.h" |
1618 | #include "core.h" |
1619 | +#include "msg.h" |
1620 | #include <net/genetlink.h> |
1621 | |
1622 | #define MAX_MEDIA 3 |
1623 | @@ -59,6 +60,9 @@ |
1624 | #define TIPC_MEDIA_TYPE_IB 2 |
1625 | #define TIPC_MEDIA_TYPE_UDP 3 |
1626 | |
1627 | +/* minimum bearer MTU */ |
1628 | +#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE) |
1629 | + |
1630 | /** |
1631 | * struct tipc_media_addr - destination address used by TIPC bearers |
1632 | * @value: address info (format defined by media) |
1633 | @@ -213,4 +217,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id, |
1634 | void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, |
1635 | struct sk_buff_head *xmitq); |
1636 | |
1637 | +/* check if device MTU is too low for tipc headers */ |
1638 | +static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve) |
1639 | +{ |
1640 | + if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve) |
1641 | + return false; |
1642 | + netdev_warn(dev, "MTU too low for tipc bearer\n"); |
1643 | + return true; |
1644 | +} |
1645 | + |
1646 | #endif /* _TIPC_BEARER_H */ |
1647 | diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c |
1648 | index ae7e14cae085..f60f346e75b3 100644 |
1649 | --- a/net/tipc/udp_media.c |
1650 | +++ b/net/tipc/udp_media.c |
1651 | @@ -372,6 +372,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, |
1652 | udp_conf.local_ip.s_addr = htonl(INADDR_ANY); |
1653 | udp_conf.use_udp_checksums = false; |
1654 | ub->ifindex = dev->ifindex; |
1655 | + if (tipc_mtu_bad(dev, sizeof(struct iphdr) + |
1656 | + sizeof(struct udphdr))) { |
1657 | + err = -EINVAL; |
1658 | + goto err; |
1659 | + } |
1660 | b->mtu = dev->mtu - sizeof(struct iphdr) |
1661 | - sizeof(struct udphdr); |
1662 | #if IS_ENABLED(CONFIG_IPV6) |
1663 | diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
1664 | index 8309687a56b0..568f307afdcf 100644 |
1665 | --- a/net/unix/af_unix.c |
1666 | +++ b/net/unix/af_unix.c |
1667 | @@ -2199,7 +2199,8 @@ out: |
1668 | * Sleep until more data has arrived. But check for races.. |
1669 | */ |
1670 | static long unix_stream_data_wait(struct sock *sk, long timeo, |
1671 | - struct sk_buff *last, unsigned int last_len) |
1672 | + struct sk_buff *last, unsigned int last_len, |
1673 | + bool freezable) |
1674 | { |
1675 | struct sk_buff *tail; |
1676 | DEFINE_WAIT(wait); |
1677 | @@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, |
1678 | |
1679 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
1680 | unix_state_unlock(sk); |
1681 | - timeo = freezable_schedule_timeout(timeo); |
1682 | + if (freezable) |
1683 | + timeo = freezable_schedule_timeout(timeo); |
1684 | + else |
1685 | + timeo = schedule_timeout(timeo); |
1686 | unix_state_lock(sk); |
1687 | |
1688 | if (sock_flag(sk, SOCK_DEAD)) |
1689 | @@ -2250,7 +2254,8 @@ struct unix_stream_read_state { |
1690 | unsigned int splice_flags; |
1691 | }; |
1692 | |
1693 | -static int unix_stream_read_generic(struct unix_stream_read_state *state) |
1694 | +static int unix_stream_read_generic(struct unix_stream_read_state *state, |
1695 | + bool freezable) |
1696 | { |
1697 | struct scm_cookie scm; |
1698 | struct socket *sock = state->socket; |
1699 | @@ -2330,7 +2335,7 @@ again: |
1700 | mutex_unlock(&u->iolock); |
1701 | |
1702 | timeo = unix_stream_data_wait(sk, timeo, last, |
1703 | - last_len); |
1704 | + last_len, freezable); |
1705 | |
1706 | if (signal_pending(current)) { |
1707 | err = sock_intr_errno(timeo); |
1708 | @@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, |
1709 | .flags = flags |
1710 | }; |
1711 | |
1712 | - return unix_stream_read_generic(&state); |
1713 | + return unix_stream_read_generic(&state, true); |
1714 | } |
1715 | |
1716 | static ssize_t skb_unix_socket_splice(struct sock *sk, |
1717 | @@ -2518,7 +2523,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, |
1718 | flags & SPLICE_F_NONBLOCK) |
1719 | state.flags = MSG_DONTWAIT; |
1720 | |
1721 | - return unix_stream_read_generic(&state); |
1722 | + return unix_stream_read_generic(&state, false); |
1723 | } |
1724 | |
1725 | static int unix_shutdown(struct socket *sock, int mode) |