Contents of /trunk/kernel-alx/patches-3.14/0121-3.14.22-all-fixes.patch
Parent Directory | Revision Log
Revision 2506 -
(show annotations)
(download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 11 months ago) by niro
File size: 58204 byte(s)
Fri Oct 17 07:55:45 2014 UTC (9 years, 11 months ago) by niro
File size: 58204 byte(s)
-patches for 3.14
1 | diff --git a/Makefile b/Makefile |
2 | index 41e6e19fe2e9..a59980eb4557 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 14 |
8 | -SUBLEVEL = 21 |
9 | +SUBLEVEL = 22 |
10 | EXTRAVERSION = |
11 | NAME = Remembering Coco |
12 | |
13 | diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c |
14 | index 0378328f47a7..a4127453baae 100644 |
15 | --- a/drivers/crypto/caam/caamhash.c |
16 | +++ b/drivers/crypto/caam/caamhash.c |
17 | @@ -1348,9 +1348,9 @@ static int ahash_update_first(struct ahash_request *req) |
18 | struct device *jrdev = ctx->jrdev; |
19 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | |
20 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; |
21 | - u8 *next_buf = state->buf_0 + state->current_buf * |
22 | - CAAM_MAX_HASH_BLOCK_SIZE; |
23 | - int *next_buflen = &state->buflen_0 + state->current_buf; |
24 | + u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; |
25 | + int *next_buflen = state->current_buf ? |
26 | + &state->buflen_1 : &state->buflen_0; |
27 | int to_hash; |
28 | u32 *sh_desc = ctx->sh_desc_update_first, *desc; |
29 | dma_addr_t ptr = ctx->sh_desc_update_first_dma; |
30 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
31 | index a95b322f0924..cc38948cf65d 100644 |
32 | --- a/drivers/net/bonding/bond_main.c |
33 | +++ b/drivers/net/bonding/bond_main.c |
34 | @@ -3624,8 +3624,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev |
35 | else |
36 | bond_xmit_slave_id(bond, skb, 0); |
37 | } else { |
38 | - slave_id = bond_rr_gen_slave_id(bond); |
39 | - bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt); |
40 | + int slave_cnt = ACCESS_ONCE(bond->slave_cnt); |
41 | + |
42 | + if (likely(slave_cnt)) { |
43 | + slave_id = bond_rr_gen_slave_id(bond); |
44 | + bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); |
45 | + } else { |
46 | + dev_kfree_skb_any(skb); |
47 | + } |
48 | } |
49 | |
50 | return NETDEV_TX_OK; |
51 | @@ -3656,8 +3662,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d |
52 | static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) |
53 | { |
54 | struct bonding *bond = netdev_priv(bond_dev); |
55 | + int slave_cnt = ACCESS_ONCE(bond->slave_cnt); |
56 | |
57 | - bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt)); |
58 | + if (likely(slave_cnt)) |
59 | + bond_xmit_slave_id(bond, skb, |
60 | + bond_xmit_hash(bond, skb, bond->slave_cnt)); |
61 | + else |
62 | + dev_kfree_skb_any(skb); |
63 | |
64 | return NETDEV_TX_OK; |
65 | } |
66 | diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c |
67 | index a210766279d3..9373f1f59605 100644 |
68 | --- a/drivers/net/ethernet/broadcom/tg3.c |
69 | +++ b/drivers/net/ethernet/broadcom/tg3.c |
70 | @@ -6923,7 +6923,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) |
71 | skb->protocol = eth_type_trans(skb, tp->dev); |
72 | |
73 | if (len > (tp->dev->mtu + ETH_HLEN) && |
74 | - skb->protocol != htons(ETH_P_8021Q)) { |
75 | + skb->protocol != htons(ETH_P_8021Q) && |
76 | + skb->protocol != htons(ETH_P_8021AD)) { |
77 | dev_kfree_skb(skb); |
78 | goto drop_it_no_recycle; |
79 | } |
80 | @@ -7915,8 +7916,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
81 | |
82 | entry = tnapi->tx_prod; |
83 | base_flags = 0; |
84 | - if (skb->ip_summed == CHECKSUM_PARTIAL) |
85 | - base_flags |= TXD_FLAG_TCPUDP_CSUM; |
86 | |
87 | mss = skb_shinfo(skb)->gso_size; |
88 | if (mss) { |
89 | @@ -7932,6 +7931,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
90 | |
91 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; |
92 | |
93 | + /* HW/FW can not correctly segment packets that have been |
94 | + * vlan encapsulated. |
95 | + */ |
96 | + if (skb->protocol == htons(ETH_P_8021Q) || |
97 | + skb->protocol == htons(ETH_P_8021AD)) |
98 | + return tg3_tso_bug(tp, skb); |
99 | + |
100 | if (!skb_is_gso_v6(skb)) { |
101 | iph->check = 0; |
102 | iph->tot_len = htons(mss + hdr_len); |
103 | @@ -7978,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
104 | base_flags |= tsflags << 12; |
105 | } |
106 | } |
107 | + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
108 | + /* HW/FW can not correctly checksum packets that have been |
109 | + * vlan encapsulated. |
110 | + */ |
111 | + if (skb->protocol == htons(ETH_P_8021Q) || |
112 | + skb->protocol == htons(ETH_P_8021AD)) { |
113 | + if (skb_checksum_help(skb)) |
114 | + goto drop; |
115 | + } else { |
116 | + base_flags |= TXD_FLAG_TCPUDP_CSUM; |
117 | + } |
118 | } |
119 | |
120 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && |
121 | diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c |
122 | index d0c38e01e99f..0085476a0258 100644 |
123 | --- a/drivers/net/ethernet/cadence/macb.c |
124 | +++ b/drivers/net/ethernet/cadence/macb.c |
125 | @@ -30,7 +30,6 @@ |
126 | #include <linux/of_device.h> |
127 | #include <linux/of_mdio.h> |
128 | #include <linux/of_net.h> |
129 | -#include <linux/pinctrl/consumer.h> |
130 | |
131 | #include "macb.h" |
132 | |
133 | @@ -1810,7 +1809,6 @@ static int __init macb_probe(struct platform_device *pdev) |
134 | struct phy_device *phydev; |
135 | u32 config; |
136 | int err = -ENXIO; |
137 | - struct pinctrl *pinctrl; |
138 | const char *mac; |
139 | |
140 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
141 | @@ -1819,15 +1817,6 @@ static int __init macb_probe(struct platform_device *pdev) |
142 | goto err_out; |
143 | } |
144 | |
145 | - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); |
146 | - if (IS_ERR(pinctrl)) { |
147 | - err = PTR_ERR(pinctrl); |
148 | - if (err == -EPROBE_DEFER) |
149 | - goto err_out; |
150 | - |
151 | - dev_warn(&pdev->dev, "No pinctrl provided\n"); |
152 | - } |
153 | - |
154 | err = -ENOMEM; |
155 | dev = alloc_etherdev(sizeof(*bp)); |
156 | if (!dev) |
157 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c |
158 | index b901371ca361..5d3206d5cb07 100644 |
159 | --- a/drivers/net/ethernet/intel/i40e/i40e_main.c |
160 | +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c |
161 | @@ -4024,6 +4024,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) |
162 | DCB_CAP_DCBX_VER_IEEE; |
163 | pf->flags |= I40E_FLAG_DCB_ENABLED; |
164 | } |
165 | + } else { |
166 | + dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", |
167 | + pf->hw.aq.asq_last_status); |
168 | } |
169 | |
170 | out: |
171 | @@ -8003,7 +8006,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
172 | if (err) { |
173 | dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); |
174 | pf->flags &= ~I40E_FLAG_DCB_ENABLED; |
175 | - goto err_init_dcb; |
176 | + /* Continue without DCB enabled */ |
177 | } |
178 | #endif /* CONFIG_I40E_DCB */ |
179 | |
180 | @@ -8119,9 +8122,6 @@ err_vsis: |
181 | err_switch_setup: |
182 | i40e_reset_interrupt_capability(pf); |
183 | del_timer_sync(&pf->service_timer); |
184 | -#ifdef CONFIG_I40E_DCB |
185 | -err_init_dcb: |
186 | -#endif /* CONFIG_I40E_DCB */ |
187 | err_mac_addr: |
188 | err_configure_lan_hmc: |
189 | (void)i40e_shutdown_lan_hmc(hw); |
190 | diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
191 | index 68026f7e8ba3..4a474dd9c910 100644 |
192 | --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
193 | +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
194 | @@ -872,6 +872,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) |
195 | return -ENOMEM; |
196 | dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, |
197 | DMA_BIDIRECTIONAL); |
198 | + if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { |
199 | + __free_page(dmatest_page); |
200 | + return -ENOMEM; |
201 | + } |
202 | |
203 | /* Run a small DMA test. |
204 | * The magic multipliers to the length tell the firmware |
205 | @@ -1293,6 +1297,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, |
206 | int bytes, int watchdog) |
207 | { |
208 | struct page *page; |
209 | + dma_addr_t bus; |
210 | int idx; |
211 | #if MYRI10GE_ALLOC_SIZE > 4096 |
212 | int end_offset; |
213 | @@ -1317,11 +1322,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, |
214 | rx->watchdog_needed = 1; |
215 | return; |
216 | } |
217 | + |
218 | + bus = pci_map_page(mgp->pdev, page, 0, |
219 | + MYRI10GE_ALLOC_SIZE, |
220 | + PCI_DMA_FROMDEVICE); |
221 | + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { |
222 | + __free_pages(page, MYRI10GE_ALLOC_ORDER); |
223 | + if (rx->fill_cnt - rx->cnt < 16) |
224 | + rx->watchdog_needed = 1; |
225 | + return; |
226 | + } |
227 | + |
228 | rx->page = page; |
229 | rx->page_offset = 0; |
230 | - rx->bus = pci_map_page(mgp->pdev, page, 0, |
231 | - MYRI10GE_ALLOC_SIZE, |
232 | - PCI_DMA_FROMDEVICE); |
233 | + rx->bus = bus; |
234 | + |
235 | } |
236 | rx->info[idx].page = rx->page; |
237 | rx->info[idx].page_offset = rx->page_offset; |
238 | @@ -2765,6 +2780,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, |
239 | mb(); |
240 | } |
241 | |
242 | +static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, |
243 | + struct myri10ge_tx_buf *tx, int idx) |
244 | +{ |
245 | + unsigned int len; |
246 | + int last_idx; |
247 | + |
248 | + /* Free any DMA resources we've alloced and clear out the skb slot */ |
249 | + last_idx = (idx + 1) & tx->mask; |
250 | + idx = tx->req & tx->mask; |
251 | + do { |
252 | + len = dma_unmap_len(&tx->info[idx], len); |
253 | + if (len) { |
254 | + if (tx->info[idx].skb != NULL) |
255 | + pci_unmap_single(mgp->pdev, |
256 | + dma_unmap_addr(&tx->info[idx], |
257 | + bus), len, |
258 | + PCI_DMA_TODEVICE); |
259 | + else |
260 | + pci_unmap_page(mgp->pdev, |
261 | + dma_unmap_addr(&tx->info[idx], |
262 | + bus), len, |
263 | + PCI_DMA_TODEVICE); |
264 | + dma_unmap_len_set(&tx->info[idx], len, 0); |
265 | + tx->info[idx].skb = NULL; |
266 | + } |
267 | + idx = (idx + 1) & tx->mask; |
268 | + } while (idx != last_idx); |
269 | +} |
270 | + |
271 | /* |
272 | * Transmit a packet. We need to split the packet so that a single |
273 | * segment does not cross myri10ge->tx_boundary, so this makes segment |
274 | @@ -2788,7 +2832,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, |
275 | u32 low; |
276 | __be32 high_swapped; |
277 | unsigned int len; |
278 | - int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; |
279 | + int idx, avail, frag_cnt, frag_idx, count, mss, max_segments; |
280 | u16 pseudo_hdr_offset, cksum_offset, queue; |
281 | int cum_len, seglen, boundary, rdma_count; |
282 | u8 flags, odd_flag; |
283 | @@ -2885,9 +2929,12 @@ again: |
284 | |
285 | /* map the skb for DMA */ |
286 | len = skb_headlen(skb); |
287 | + bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
288 | + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) |
289 | + goto drop; |
290 | + |
291 | idx = tx->req & tx->mask; |
292 | tx->info[idx].skb = skb; |
293 | - bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
294 | dma_unmap_addr_set(&tx->info[idx], bus, bus); |
295 | dma_unmap_len_set(&tx->info[idx], len, len); |
296 | |
297 | @@ -2986,12 +3033,16 @@ again: |
298 | break; |
299 | |
300 | /* map next fragment for DMA */ |
301 | - idx = (count + tx->req) & tx->mask; |
302 | frag = &skb_shinfo(skb)->frags[frag_idx]; |
303 | frag_idx++; |
304 | len = skb_frag_size(frag); |
305 | bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, |
306 | DMA_TO_DEVICE); |
307 | + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { |
308 | + myri10ge_unmap_tx_dma(mgp, tx, idx); |
309 | + goto drop; |
310 | + } |
311 | + idx = (count + tx->req) & tx->mask; |
312 | dma_unmap_addr_set(&tx->info[idx], bus, bus); |
313 | dma_unmap_len_set(&tx->info[idx], len, len); |
314 | } |
315 | @@ -3022,31 +3073,8 @@ again: |
316 | return NETDEV_TX_OK; |
317 | |
318 | abort_linearize: |
319 | - /* Free any DMA resources we've alloced and clear out the skb |
320 | - * slot so as to not trip up assertions, and to avoid a |
321 | - * double-free if linearizing fails */ |
322 | + myri10ge_unmap_tx_dma(mgp, tx, idx); |
323 | |
324 | - last_idx = (idx + 1) & tx->mask; |
325 | - idx = tx->req & tx->mask; |
326 | - tx->info[idx].skb = NULL; |
327 | - do { |
328 | - len = dma_unmap_len(&tx->info[idx], len); |
329 | - if (len) { |
330 | - if (tx->info[idx].skb != NULL) |
331 | - pci_unmap_single(mgp->pdev, |
332 | - dma_unmap_addr(&tx->info[idx], |
333 | - bus), len, |
334 | - PCI_DMA_TODEVICE); |
335 | - else |
336 | - pci_unmap_page(mgp->pdev, |
337 | - dma_unmap_addr(&tx->info[idx], |
338 | - bus), len, |
339 | - PCI_DMA_TODEVICE); |
340 | - dma_unmap_len_set(&tx->info[idx], len, 0); |
341 | - tx->info[idx].skb = NULL; |
342 | - } |
343 | - idx = (idx + 1) & tx->mask; |
344 | - } while (idx != last_idx); |
345 | if (skb_is_gso(skb)) { |
346 | netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); |
347 | goto drop; |
348 | diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c |
349 | index d6fce9750b95..3c1c33ceffba 100644 |
350 | --- a/drivers/net/hyperv/netvsc_drv.c |
351 | +++ b/drivers/net/hyperv/netvsc_drv.c |
352 | @@ -146,6 +146,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) |
353 | struct hv_netvsc_packet *packet; |
354 | int ret; |
355 | unsigned int i, num_pages, npg_data; |
356 | + u32 skb_length = skb->len; |
357 | |
358 | /* Add multipages for skb->data and additional 2 for RNDIS */ |
359 | npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1) |
360 | @@ -216,7 +217,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) |
361 | ret = rndis_filter_send(net_device_ctx->device_ctx, |
362 | packet); |
363 | if (ret == 0) { |
364 | - net->stats.tx_bytes += skb->len; |
365 | + net->stats.tx_bytes += skb_length; |
366 | net->stats.tx_packets++; |
367 | } else { |
368 | kfree(packet); |
369 | diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c |
370 | index 7f1abb7c18f2..fbf7dcdc22b0 100644 |
371 | --- a/drivers/net/macvlan.c |
372 | +++ b/drivers/net/macvlan.c |
373 | @@ -709,6 +709,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, |
374 | features, |
375 | mask); |
376 | features |= ALWAYS_ON_FEATURES; |
377 | + features &= ~NETIF_F_NETNS_LOCAL; |
378 | |
379 | return features; |
380 | } |
381 | diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c |
382 | index 3381c4f91a8c..0c6adaaf898c 100644 |
383 | --- a/drivers/net/macvtap.c |
384 | +++ b/drivers/net/macvtap.c |
385 | @@ -112,17 +112,15 @@ out: |
386 | return err; |
387 | } |
388 | |
389 | +/* Requires RTNL */ |
390 | static int macvtap_set_queue(struct net_device *dev, struct file *file, |
391 | struct macvtap_queue *q) |
392 | { |
393 | struct macvlan_dev *vlan = netdev_priv(dev); |
394 | - int err = -EBUSY; |
395 | |
396 | - rtnl_lock(); |
397 | if (vlan->numqueues == MAX_MACVTAP_QUEUES) |
398 | - goto out; |
399 | + return -EBUSY; |
400 | |
401 | - err = 0; |
402 | rcu_assign_pointer(q->vlan, vlan); |
403 | rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); |
404 | sock_hold(&q->sk); |
405 | @@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file, |
406 | vlan->numvtaps++; |
407 | vlan->numqueues++; |
408 | |
409 | -out: |
410 | - rtnl_unlock(); |
411 | - return err; |
412 | + return 0; |
413 | } |
414 | |
415 | static int macvtap_disable_queue(struct macvtap_queue *q) |
416 | @@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk) |
417 | static int macvtap_open(struct inode *inode, struct file *file) |
418 | { |
419 | struct net *net = current->nsproxy->net_ns; |
420 | - struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); |
421 | + struct net_device *dev; |
422 | struct macvtap_queue *q; |
423 | - int err; |
424 | + int err = -ENODEV; |
425 | |
426 | - err = -ENODEV; |
427 | + rtnl_lock(); |
428 | + dev = dev_get_by_macvtap_minor(iminor(inode)); |
429 | if (!dev) |
430 | goto out; |
431 | |
432 | @@ -498,6 +495,7 @@ out: |
433 | if (dev) |
434 | dev_put(dev); |
435 | |
436 | + rtnl_unlock(); |
437 | return err; |
438 | } |
439 | |
440 | diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c |
441 | index 26d8c29b59de..979fe433278c 100644 |
442 | --- a/drivers/net/team/team.c |
443 | +++ b/drivers/net/team/team.c |
444 | @@ -647,7 +647,7 @@ static void team_notify_peers(struct team *team) |
445 | { |
446 | if (!team->notify_peers.count || !netif_running(team->dev)) |
447 | return; |
448 | - atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); |
449 | + atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); |
450 | schedule_delayed_work(&team->notify_peers.dw, 0); |
451 | } |
452 | |
453 | @@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct team *team) |
454 | { |
455 | if (!team->mcast_rejoin.count || !netif_running(team->dev)) |
456 | return; |
457 | - atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); |
458 | + atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); |
459 | schedule_delayed_work(&team->mcast_rejoin.dw, 0); |
460 | } |
461 | |
462 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
463 | index 40ad25d7f28b..9b40532041cb 100644 |
464 | --- a/drivers/net/vxlan.c |
465 | +++ b/drivers/net/vxlan.c |
466 | @@ -1334,7 +1334,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) |
467 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
468 | union vxlan_addr ipa = { |
469 | .sin.sin_addr.s_addr = tip, |
470 | - .sa.sa_family = AF_INET, |
471 | + .sin.sin_family = AF_INET, |
472 | }; |
473 | |
474 | vxlan_ip_miss(dev, &ipa); |
475 | @@ -1495,7 +1495,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) |
476 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
477 | union vxlan_addr ipa = { |
478 | .sin6.sin6_addr = msg->target, |
479 | - .sa.sa_family = AF_INET6, |
480 | + .sin6.sin6_family = AF_INET6, |
481 | }; |
482 | |
483 | vxlan_ip_miss(dev, &ipa); |
484 | @@ -1528,7 +1528,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) |
485 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { |
486 | union vxlan_addr ipa = { |
487 | .sin.sin_addr.s_addr = pip->daddr, |
488 | - .sa.sa_family = AF_INET, |
489 | + .sin.sin_family = AF_INET, |
490 | }; |
491 | |
492 | vxlan_ip_miss(dev, &ipa); |
493 | @@ -1549,7 +1549,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) |
494 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { |
495 | union vxlan_addr ipa = { |
496 | .sin6.sin6_addr = pip6->daddr, |
497 | - .sa.sa_family = AF_INET6, |
498 | + .sin6.sin6_family = AF_INET6, |
499 | }; |
500 | |
501 | vxlan_ip_miss(dev, &ipa); |
502 | diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c |
503 | index 0ff3e3624d4c..feda34404ed0 100644 |
504 | --- a/drivers/tty/serial/8250/8250_pci.c |
505 | +++ b/drivers/tty/serial/8250/8250_pci.c |
506 | @@ -1766,6 +1766,7 @@ pci_wch_ch353_setup(struct serial_private *priv, |
507 | #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 |
508 | #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a |
509 | #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e |
510 | +#define PCI_DEVICE_ID_INTEL_QRK_UART 0x0936 |
511 | |
512 | #define PCI_VENDOR_ID_SUNIX 0x1fd4 |
513 | #define PCI_DEVICE_ID_SUNIX_1999 0x1999 |
514 | @@ -1876,6 +1877,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { |
515 | .subdevice = PCI_ANY_ID, |
516 | .setup = byt_serial_setup, |
517 | }, |
518 | + { |
519 | + .vendor = PCI_VENDOR_ID_INTEL, |
520 | + .device = PCI_DEVICE_ID_INTEL_QRK_UART, |
521 | + .subvendor = PCI_ANY_ID, |
522 | + .subdevice = PCI_ANY_ID, |
523 | + .setup = pci_default_setup, |
524 | + }, |
525 | /* |
526 | * ITE |
527 | */ |
528 | @@ -2710,6 +2718,7 @@ enum pci_board_num_t { |
529 | pbn_ADDIDATA_PCIe_8_3906250, |
530 | pbn_ce4100_1_115200, |
531 | pbn_byt, |
532 | + pbn_qrk, |
533 | pbn_omegapci, |
534 | pbn_NETMOS9900_2s_115200, |
535 | pbn_brcm_trumanage, |
536 | @@ -3456,6 +3465,12 @@ static struct pciserial_board pci_boards[] = { |
537 | .uart_offset = 0x80, |
538 | .reg_shift = 2, |
539 | }, |
540 | + [pbn_qrk] = { |
541 | + .flags = FL_BASE0, |
542 | + .num_ports = 1, |
543 | + .base_baud = 2764800, |
544 | + .reg_shift = 2, |
545 | + }, |
546 | [pbn_omegapci] = { |
547 | .flags = FL_BASE0, |
548 | .num_ports = 8, |
549 | @@ -5150,6 +5165,12 @@ static struct pci_device_id serial_pci_tbl[] = { |
550 | pbn_byt }, |
551 | |
552 | /* |
553 | + * Intel Quark x1000 |
554 | + */ |
555 | + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART, |
556 | + PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
557 | + pbn_qrk }, |
558 | + /* |
559 | * Cronyx Omega PCI |
560 | */ |
561 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA, |
562 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
563 | index 263612ce1f62..445d62a4316a 100644 |
564 | --- a/drivers/usb/core/hub.c |
565 | +++ b/drivers/usb/core/hub.c |
566 | @@ -1948,8 +1948,10 @@ void usb_set_device_state(struct usb_device *udev, |
567 | || new_state == USB_STATE_SUSPENDED) |
568 | ; /* No change to wakeup settings */ |
569 | else if (new_state == USB_STATE_CONFIGURED) |
570 | - wakeup = udev->actconfig->desc.bmAttributes |
571 | - & USB_CONFIG_ATT_WAKEUP; |
572 | + wakeup = (udev->quirks & |
573 | + USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 : |
574 | + udev->actconfig->desc.bmAttributes & |
575 | + USB_CONFIG_ATT_WAKEUP; |
576 | else |
577 | wakeup = 0; |
578 | } |
579 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
580 | index 739ee8e8bdfd..5144d11d032c 100644 |
581 | --- a/drivers/usb/core/quirks.c |
582 | +++ b/drivers/usb/core/quirks.c |
583 | @@ -160,6 +160,10 @@ static const struct usb_device_id usb_interface_quirk_list[] = { |
584 | { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0), |
585 | .driver_info = USB_QUIRK_RESET_RESUME }, |
586 | |
587 | + /* ASUS Base Station(T100) */ |
588 | + { USB_DEVICE(0x0b05, 0x17e0), .driver_info = |
589 | + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, |
590 | + |
591 | { } /* terminating entry must be last */ |
592 | }; |
593 | |
594 | diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c |
595 | index 7a109eae9b9a..85f5215871de 100644 |
596 | --- a/drivers/usb/musb/musb_dsps.c |
597 | +++ b/drivers/usb/musb/musb_dsps.c |
598 | @@ -707,6 +707,7 @@ static int dsps_suspend(struct device *dev) |
599 | struct musb *musb = platform_get_drvdata(glue->musb); |
600 | void __iomem *mbase = musb->ctrl_base; |
601 | |
602 | + del_timer_sync(&glue->timer); |
603 | glue->context.control = dsps_readl(mbase, wrp->control); |
604 | glue->context.epintr = dsps_readl(mbase, wrp->epintr_set); |
605 | glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set); |
606 | @@ -732,6 +733,7 @@ static int dsps_resume(struct device *dev) |
607 | dsps_writel(mbase, wrp->mode, glue->context.mode); |
608 | dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode); |
609 | dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode); |
610 | + setup_timer(&glue->timer, otg_timer, (unsigned long) musb); |
611 | |
612 | return 0; |
613 | } |
614 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
615 | index 330df5ce435b..63b2af2a87c0 100644 |
616 | --- a/drivers/usb/serial/cp210x.c |
617 | +++ b/drivers/usb/serial/cp210x.c |
618 | @@ -122,6 +122,7 @@ static const struct usb_device_id id_table[] = { |
619 | { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ |
620 | { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ |
621 | { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ |
622 | + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ |
623 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
624 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
625 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ |
626 | @@ -155,6 +156,7 @@ static const struct usb_device_id id_table[] = { |
627 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
628 | { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
629 | { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ |
630 | + { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ |
631 | { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ |
632 | { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ |
633 | { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ |
634 | diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h |
635 | index 72ba6f5cbc8d..2abe67bd4df8 100644 |
636 | --- a/include/linux/if_vlan.h |
637 | +++ b/include/linux/if_vlan.h |
638 | @@ -186,7 +186,6 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) |
639 | } |
640 | |
641 | extern bool vlan_do_receive(struct sk_buff **skb); |
642 | -extern struct sk_buff *vlan_untag(struct sk_buff *skb); |
643 | |
644 | extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); |
645 | extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); |
646 | @@ -228,11 +227,6 @@ static inline bool vlan_do_receive(struct sk_buff **skb) |
647 | return false; |
648 | } |
649 | |
650 | -static inline struct sk_buff *vlan_untag(struct sk_buff *skb) |
651 | -{ |
652 | - return skb; |
653 | -} |
654 | - |
655 | static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) |
656 | { |
657 | return 0; |
658 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
659 | index 15ede6a823a6..ad8f85908a56 100644 |
660 | --- a/include/linux/skbuff.h |
661 | +++ b/include/linux/skbuff.h |
662 | @@ -2458,6 +2458,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); |
663 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); |
664 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); |
665 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); |
666 | +struct sk_buff *skb_vlan_untag(struct sk_buff *skb); |
667 | |
668 | struct skb_checksum_ops { |
669 | __wsum (*update)(const void *mem, int len, __wsum wsum); |
670 | diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h |
671 | index 52f944dfe2fd..49587dc22f5d 100644 |
672 | --- a/include/linux/usb/quirks.h |
673 | +++ b/include/linux/usb/quirks.h |
674 | @@ -30,4 +30,7 @@ |
675 | descriptor */ |
676 | #define USB_QUIRK_DELAY_INIT 0x00000040 |
677 | |
678 | +/* device generates spurious wakeup, ignore remote wakeup capability */ |
679 | +#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200 |
680 | + |
681 | #endif /* __LINUX_USB_QUIRKS_H */ |
682 | diff --git a/include/net/dst.h b/include/net/dst.h |
683 | index 77eb53fabfb0..909032821c37 100644 |
684 | --- a/include/net/dst.h |
685 | +++ b/include/net/dst.h |
686 | @@ -466,6 +466,7 @@ void dst_init(void); |
687 | /* Flags for xfrm_lookup flags argument. */ |
688 | enum { |
689 | XFRM_LOOKUP_ICMP = 1 << 0, |
690 | + XFRM_LOOKUP_QUEUE = 1 << 1, |
691 | }; |
692 | |
693 | struct flowi; |
694 | @@ -476,7 +477,16 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, |
695 | int flags) |
696 | { |
697 | return dst_orig; |
698 | -} |
699 | +} |
700 | + |
701 | +static inline struct dst_entry *xfrm_lookup_route(struct net *net, |
702 | + struct dst_entry *dst_orig, |
703 | + const struct flowi *fl, |
704 | + struct sock *sk, |
705 | + int flags) |
706 | +{ |
707 | + return dst_orig; |
708 | +} |
709 | |
710 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) |
711 | { |
712 | @@ -488,6 +498,10 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
713 | const struct flowi *fl, struct sock *sk, |
714 | int flags); |
715 | |
716 | +struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, |
717 | + const struct flowi *fl, struct sock *sk, |
718 | + int flags); |
719 | + |
720 | /* skb attached with this dst needs transformation if dst->xfrm is valid */ |
721 | static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) |
722 | { |
723 | diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h |
724 | index c55aeed41ace..cf9272807788 100644 |
725 | --- a/include/net/inet_connection_sock.h |
726 | +++ b/include/net/inet_connection_sock.h |
727 | @@ -62,6 +62,7 @@ struct inet_connection_sock_af_ops { |
728 | void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); |
729 | int (*bind_conflict)(const struct sock *sk, |
730 | const struct inet_bind_bucket *tb, bool relax); |
731 | + void (*mtu_reduced)(struct sock *sk); |
732 | }; |
733 | |
734 | /** inet_connection_sock - INET connection oriented sock |
735 | diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h |
736 | index 4b7cd695e431..cfcbc3f627bd 100644 |
737 | --- a/include/net/sctp/command.h |
738 | +++ b/include/net/sctp/command.h |
739 | @@ -115,7 +115,7 @@ typedef enum { |
740 | * analysis of the state functions, but in reality just taken from |
741 | * thin air in the hopes othat we don't trigger a kernel panic. |
742 | */ |
743 | -#define SCTP_MAX_NUM_COMMANDS 14 |
744 | +#define SCTP_MAX_NUM_COMMANDS 20 |
745 | |
746 | typedef union { |
747 | __s32 i32; |
748 | diff --git a/include/net/sock.h b/include/net/sock.h |
749 | index 2f7bc435c93d..f66b2b19a6e4 100644 |
750 | --- a/include/net/sock.h |
751 | +++ b/include/net/sock.h |
752 | @@ -969,7 +969,6 @@ struct proto { |
753 | struct sk_buff *skb); |
754 | |
755 | void (*release_cb)(struct sock *sk); |
756 | - void (*mtu_reduced)(struct sock *sk); |
757 | |
758 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
759 | void (*hash)(struct sock *sk); |
760 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
761 | index 743accec6c76..1f0d8479e15f 100644 |
762 | --- a/include/net/tcp.h |
763 | +++ b/include/net/tcp.h |
764 | @@ -453,6 +453,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); |
765 | */ |
766 | |
767 | void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); |
768 | +void tcp_v4_mtu_reduced(struct sock *sk); |
769 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); |
770 | struct sock *tcp_create_openreq_child(struct sock *sk, |
771 | struct request_sock *req, |
772 | @@ -720,8 +721,10 @@ struct tcp_skb_cb { |
773 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ |
774 | #define TCPCB_LOST 0x04 /* SKB is lost */ |
775 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ |
776 | +#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */ |
777 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ |
778 | -#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) |
779 | +#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \ |
780 | + TCPCB_REPAIRED) |
781 | |
782 | __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ |
783 | /* 1 byte hole */ |
784 | diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c |
785 | index 7e57135c7cc4..5d56e05d83dd 100644 |
786 | --- a/net/8021q/vlan_core.c |
787 | +++ b/net/8021q/vlan_core.c |
788 | @@ -106,59 +106,6 @@ u16 vlan_dev_vlan_id(const struct net_device *dev) |
789 | } |
790 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
791 | |
792 | -static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) |
793 | -{ |
794 | - if (skb_cow(skb, skb_headroom(skb)) < 0) { |
795 | - kfree_skb(skb); |
796 | - return NULL; |
797 | - } |
798 | - |
799 | - memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); |
800 | - skb->mac_header += VLAN_HLEN; |
801 | - return skb; |
802 | -} |
803 | - |
804 | -struct sk_buff *vlan_untag(struct sk_buff *skb) |
805 | -{ |
806 | - struct vlan_hdr *vhdr; |
807 | - u16 vlan_tci; |
808 | - |
809 | - if (unlikely(vlan_tx_tag_present(skb))) { |
810 | - /* vlan_tci is already set-up so leave this for another time */ |
811 | - return skb; |
812 | - } |
813 | - |
814 | - skb = skb_share_check(skb, GFP_ATOMIC); |
815 | - if (unlikely(!skb)) |
816 | - goto err_free; |
817 | - |
818 | - if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) |
819 | - goto err_free; |
820 | - |
821 | - vhdr = (struct vlan_hdr *) skb->data; |
822 | - vlan_tci = ntohs(vhdr->h_vlan_TCI); |
823 | - __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); |
824 | - |
825 | - skb_pull_rcsum(skb, VLAN_HLEN); |
826 | - vlan_set_encap_proto(skb, vhdr); |
827 | - |
828 | - skb = vlan_reorder_header(skb); |
829 | - if (unlikely(!skb)) |
830 | - goto err_free; |
831 | - |
832 | - skb_reset_network_header(skb); |
833 | - skb_reset_transport_header(skb); |
834 | - skb_reset_mac_len(skb); |
835 | - |
836 | - return skb; |
837 | - |
838 | -err_free: |
839 | - kfree_skb(skb); |
840 | - return NULL; |
841 | -} |
842 | -EXPORT_SYMBOL(vlan_untag); |
843 | - |
844 | - |
845 | /* |
846 | * vlan info and vid list |
847 | */ |
848 | diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h |
849 | index f2d254b69353..4acfc3eef56a 100644 |
850 | --- a/net/bridge/br_private.h |
851 | +++ b/net/bridge/br_private.h |
852 | @@ -302,6 +302,9 @@ struct br_input_skb_cb { |
853 | int igmp; |
854 | int mrouters_only; |
855 | #endif |
856 | +#ifdef CONFIG_BRIDGE_VLAN_FILTERING |
857 | + bool vlan_filtered; |
858 | +#endif |
859 | }; |
860 | |
861 | #define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb) |
862 | diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c |
863 | index b1c637208497..e1bd2539f9dc 100644 |
864 | --- a/net/bridge/br_vlan.c |
865 | +++ b/net/bridge/br_vlan.c |
866 | @@ -125,7 +125,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, |
867 | { |
868 | u16 vid; |
869 | |
870 | - if (!br->vlan_enabled) |
871 | + /* If this packet was not filtered at input, let it pass */ |
872 | + if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) |
873 | goto out; |
874 | |
875 | /* Vlan filter table must be configured at this point. The |
876 | @@ -163,8 +164,10 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, |
877 | /* If VLAN filtering is disabled on the bridge, all packets are |
878 | * permitted. |
879 | */ |
880 | - if (!br->vlan_enabled) |
881 | + if (!br->vlan_enabled) { |
882 | + BR_INPUT_SKB_CB(skb)->vlan_filtered = false; |
883 | return true; |
884 | + } |
885 | |
886 | /* If there are no vlan in the permitted list, all packets are |
887 | * rejected. |
888 | @@ -172,6 +175,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, |
889 | if (!v) |
890 | goto drop; |
891 | |
892 | + BR_INPUT_SKB_CB(skb)->vlan_filtered = true; |
893 | + |
894 | /* If vlan tx offload is disabled on bridge device and frame was |
895 | * sent from vlan device on the bridge device, it does not have |
896 | * HW accelerated vlan tag. |
897 | @@ -179,7 +184,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, |
898 | if (unlikely(!vlan_tx_tag_present(skb) && |
899 | (skb->protocol == htons(ETH_P_8021Q) || |
900 | skb->protocol == htons(ETH_P_8021AD)))) { |
901 | - skb = vlan_untag(skb); |
902 | + skb = skb_vlan_untag(skb); |
903 | if (unlikely(!skb)) |
904 | return false; |
905 | } |
906 | @@ -228,7 +233,8 @@ bool br_allowed_egress(struct net_bridge *br, |
907 | { |
908 | u16 vid; |
909 | |
910 | - if (!br->vlan_enabled) |
911 | + /* If this packet was not filtered at input, let it pass */ |
912 | + if (!BR_INPUT_SKB_CB(skb)->vlan_filtered) |
913 | return true; |
914 | |
915 | if (!v) |
916 | @@ -247,6 +253,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) |
917 | struct net_bridge *br = p->br; |
918 | struct net_port_vlans *v; |
919 | |
920 | + /* If filtering was disabled at input, let it pass. */ |
921 | if (!br->vlan_enabled) |
922 | return true; |
923 | |
924 | diff --git a/net/core/dev.c b/net/core/dev.c |
925 | index 37bddf729e77..3ed11a555834 100644 |
926 | --- a/net/core/dev.c |
927 | +++ b/net/core/dev.c |
928 | @@ -3554,7 +3554,7 @@ another_round: |
929 | |
930 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || |
931 | skb->protocol == cpu_to_be16(ETH_P_8021AD)) { |
932 | - skb = vlan_untag(skb); |
933 | + skb = skb_vlan_untag(skb); |
934 | if (unlikely(!skb)) |
935 | goto unlock; |
936 | } |
937 | diff --git a/net/core/netpoll.c b/net/core/netpoll.c |
938 | index df9e6b1a9759..723fa7da8025 100644 |
939 | --- a/net/core/netpoll.c |
940 | +++ b/net/core/netpoll.c |
941 | @@ -788,7 +788,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) |
942 | } |
943 | |
944 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { |
945 | - skb = vlan_untag(skb); |
946 | + skb = skb_vlan_untag(skb); |
947 | if (unlikely(!skb)) |
948 | goto out; |
949 | } |
950 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
951 | index aef1500ebc05..b0db904f083d 100644 |
952 | --- a/net/core/rtnetlink.c |
953 | +++ b/net/core/rtnetlink.c |
954 | @@ -799,7 +799,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, |
955 | (nla_total_size(sizeof(struct ifla_vf_mac)) + |
956 | nla_total_size(sizeof(struct ifla_vf_vlan)) + |
957 | nla_total_size(sizeof(struct ifla_vf_tx_rate)) + |
958 | - nla_total_size(sizeof(struct ifla_vf_spoofchk))); |
959 | + nla_total_size(sizeof(struct ifla_vf_spoofchk)) + |
960 | + nla_total_size(sizeof(struct ifla_vf_link_state))); |
961 | return size; |
962 | } else |
963 | return 0; |
964 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
965 | index 8f6391bbf509..baf6fc457df9 100644 |
966 | --- a/net/core/skbuff.c |
967 | +++ b/net/core/skbuff.c |
968 | @@ -62,6 +62,7 @@ |
969 | #include <linux/scatterlist.h> |
970 | #include <linux/errqueue.h> |
971 | #include <linux/prefetch.h> |
972 | +#include <linux/if_vlan.h> |
973 | |
974 | #include <net/protocol.h> |
975 | #include <net/dst.h> |
976 | @@ -3139,6 +3140,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
977 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; |
978 | goto done; |
979 | } |
980 | + /* switch back to head shinfo */ |
981 | + pinfo = skb_shinfo(p); |
982 | + |
983 | if (pinfo->frag_list) |
984 | goto merge; |
985 | if (skb_gro_len(p) != pinfo->gso_size) |
986 | @@ -3963,3 +3967,55 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) |
987 | return shinfo->gso_size; |
988 | } |
989 | EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); |
990 | + |
991 | +static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) |
992 | +{ |
993 | + if (skb_cow(skb, skb_headroom(skb)) < 0) { |
994 | + kfree_skb(skb); |
995 | + return NULL; |
996 | + } |
997 | + |
998 | + memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); |
999 | + skb->mac_header += VLAN_HLEN; |
1000 | + return skb; |
1001 | +} |
1002 | + |
1003 | +struct sk_buff *skb_vlan_untag(struct sk_buff *skb) |
1004 | +{ |
1005 | + struct vlan_hdr *vhdr; |
1006 | + u16 vlan_tci; |
1007 | + |
1008 | + if (unlikely(vlan_tx_tag_present(skb))) { |
1009 | + /* vlan_tci is already set-up so leave this for another time */ |
1010 | + return skb; |
1011 | + } |
1012 | + |
1013 | + skb = skb_share_check(skb, GFP_ATOMIC); |
1014 | + if (unlikely(!skb)) |
1015 | + goto err_free; |
1016 | + |
1017 | + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) |
1018 | + goto err_free; |
1019 | + |
1020 | + vhdr = (struct vlan_hdr *)skb->data; |
1021 | + vlan_tci = ntohs(vhdr->h_vlan_TCI); |
1022 | + __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); |
1023 | + |
1024 | + skb_pull_rcsum(skb, VLAN_HLEN); |
1025 | + vlan_set_encap_proto(skb, vhdr); |
1026 | + |
1027 | + skb = skb_reorder_vlan_header(skb); |
1028 | + if (unlikely(!skb)) |
1029 | + goto err_free; |
1030 | + |
1031 | + skb_reset_network_header(skb); |
1032 | + skb_reset_transport_header(skb); |
1033 | + skb_reset_mac_len(skb); |
1034 | + |
1035 | + return skb; |
1036 | + |
1037 | +err_free: |
1038 | + kfree_skb(skb); |
1039 | + return NULL; |
1040 | +} |
1041 | +EXPORT_SYMBOL(skb_vlan_untag); |
1042 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
1043 | index ca5a01ed8ed6..487bb6252520 100644 |
1044 | --- a/net/ipv4/route.c |
1045 | +++ b/net/ipv4/route.c |
1046 | @@ -2268,9 +2268,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, |
1047 | return rt; |
1048 | |
1049 | if (flp4->flowi4_proto) |
1050 | - rt = (struct rtable *) xfrm_lookup(net, &rt->dst, |
1051 | - flowi4_to_flowi(flp4), |
1052 | - sk, 0); |
1053 | + rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, |
1054 | + flowi4_to_flowi(flp4), |
1055 | + sk, 0); |
1056 | |
1057 | return rt; |
1058 | } |
1059 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
1060 | index b48fba0aaa92..f7d71ec72a47 100644 |
1061 | --- a/net/ipv4/tcp.c |
1062 | +++ b/net/ipv4/tcp.c |
1063 | @@ -1175,13 +1175,6 @@ new_segment: |
1064 | goto wait_for_memory; |
1065 | |
1066 | /* |
1067 | - * All packets are restored as if they have |
1068 | - * already been sent. |
1069 | - */ |
1070 | - if (tp->repair) |
1071 | - TCP_SKB_CB(skb)->when = tcp_time_stamp; |
1072 | - |
1073 | - /* |
1074 | * Check whether we can use HW checksum. |
1075 | */ |
1076 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) |
1077 | @@ -1190,6 +1183,13 @@ new_segment: |
1078 | skb_entail(sk, skb); |
1079 | copy = size_goal; |
1080 | max = size_goal; |
1081 | + |
1082 | + /* All packets are restored as if they have |
1083 | + * already been sent. skb_mstamp isn't set to |
1084 | + * avoid wrong rtt estimation. |
1085 | + */ |
1086 | + if (tp->repair) |
1087 | + TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; |
1088 | } |
1089 | |
1090 | /* Try to append data to the end of skb. */ |
1091 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1092 | index 3898694d0300..22917918fa80 100644 |
1093 | --- a/net/ipv4/tcp_input.c |
1094 | +++ b/net/ipv4/tcp_input.c |
1095 | @@ -2678,7 +2678,6 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) |
1096 | */ |
1097 | static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) |
1098 | { |
1099 | - struct inet_connection_sock *icsk = inet_csk(sk); |
1100 | struct tcp_sock *tp = tcp_sk(sk); |
1101 | bool recovered = !before(tp->snd_una, tp->high_seq); |
1102 | |
1103 | @@ -2704,12 +2703,9 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) |
1104 | |
1105 | if (recovered) { |
1106 | /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ |
1107 | - icsk->icsk_retransmits = 0; |
1108 | tcp_try_undo_recovery(sk); |
1109 | return; |
1110 | } |
1111 | - if (flag & FLAG_DATA_ACKED) |
1112 | - icsk->icsk_retransmits = 0; |
1113 | if (tcp_is_reno(tp)) { |
1114 | /* A Reno DUPACK means new data in F-RTO step 2.b above are |
1115 | * delivered. Lower inflight to clock out (re)tranmissions. |
1116 | @@ -3398,8 +3394,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
1117 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) |
1118 | tcp_rearm_rto(sk); |
1119 | |
1120 | - if (after(ack, prior_snd_una)) |
1121 | + if (after(ack, prior_snd_una)) { |
1122 | flag |= FLAG_SND_UNA_ADVANCED; |
1123 | + icsk->icsk_retransmits = 0; |
1124 | + } |
1125 | |
1126 | prior_fackets = tp->fackets_out; |
1127 | prior_in_flight = tcp_packets_in_flight(tp); |
1128 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
1129 | index 1e4eac779f51..a782d5be132e 100644 |
1130 | --- a/net/ipv4/tcp_ipv4.c |
1131 | +++ b/net/ipv4/tcp_ipv4.c |
1132 | @@ -269,7 +269,7 @@ EXPORT_SYMBOL(tcp_v4_connect); |
1133 | * It can be called through tcp_release_cb() if socket was owned by user |
1134 | * at the time tcp_v4_err() was called to handle ICMP message. |
1135 | */ |
1136 | -static void tcp_v4_mtu_reduced(struct sock *sk) |
1137 | +void tcp_v4_mtu_reduced(struct sock *sk) |
1138 | { |
1139 | struct dst_entry *dst; |
1140 | struct inet_sock *inet = inet_sk(sk); |
1141 | @@ -300,6 +300,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk) |
1142 | tcp_simple_retransmit(sk); |
1143 | } /* else let the usual retransmit timer handle it */ |
1144 | } |
1145 | +EXPORT_SYMBOL(tcp_v4_mtu_reduced); |
1146 | |
1147 | static void do_redirect(struct sk_buff *skb, struct sock *sk) |
1148 | { |
1149 | @@ -2117,6 +2118,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = { |
1150 | .compat_setsockopt = compat_ip_setsockopt, |
1151 | .compat_getsockopt = compat_ip_getsockopt, |
1152 | #endif |
1153 | + .mtu_reduced = tcp_v4_mtu_reduced, |
1154 | }; |
1155 | EXPORT_SYMBOL(ipv4_specific); |
1156 | |
1157 | @@ -2736,7 +2738,6 @@ struct proto tcp_prot = { |
1158 | .sendpage = tcp_sendpage, |
1159 | .backlog_rcv = tcp_v4_do_rcv, |
1160 | .release_cb = tcp_release_cb, |
1161 | - .mtu_reduced = tcp_v4_mtu_reduced, |
1162 | .hash = inet_hash, |
1163 | .unhash = inet_unhash, |
1164 | .get_port = inet_csk_get_port, |
1165 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
1166 | index b3d1addd816b..91b98e5a17aa 100644 |
1167 | --- a/net/ipv4/tcp_output.c |
1168 | +++ b/net/ipv4/tcp_output.c |
1169 | @@ -787,7 +787,7 @@ void tcp_release_cb(struct sock *sk) |
1170 | __sock_put(sk); |
1171 | } |
1172 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { |
1173 | - sk->sk_prot->mtu_reduced(sk); |
1174 | + inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); |
1175 | __sock_put(sk); |
1176 | } |
1177 | } |
1178 | @@ -1876,8 +1876,11 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, |
1179 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
1180 | BUG_ON(!tso_segs); |
1181 | |
1182 | - if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) |
1183 | + if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { |
1184 | + /* "when" is used as a start point for the retransmit timer */ |
1185 | + TCP_SKB_CB(skb)->when = tcp_time_stamp; |
1186 | goto repair; /* Skip network transmission */ |
1187 | + } |
1188 | |
1189 | cwnd_quota = tcp_cwnd_test(tp, skb); |
1190 | if (!cwnd_quota) { |
1191 | @@ -2066,9 +2069,7 @@ void tcp_send_loss_probe(struct sock *sk) |
1192 | if (WARN_ON(!skb || !tcp_skb_pcount(skb))) |
1193 | goto rearm_timer; |
1194 | |
1195 | - /* Probe with zero data doesn't trigger fast recovery. */ |
1196 | - if (skb->len > 0) |
1197 | - err = __tcp_retransmit_skb(sk, skb); |
1198 | + err = __tcp_retransmit_skb(sk, skb); |
1199 | |
1200 | /* Record snd_nxt for loss detection. */ |
1201 | if (likely(!err)) |
1202 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
1203 | index 6c7fa0853fc7..3f0ec063d7f8 100644 |
1204 | --- a/net/ipv6/addrconf.c |
1205 | +++ b/net/ipv6/addrconf.c |
1206 | @@ -1684,14 +1684,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) |
1207 | addrconf_mod_dad_work(ifp, 0); |
1208 | } |
1209 | |
1210 | -/* Join to solicited addr multicast group. */ |
1211 | - |
1212 | +/* Join to solicited addr multicast group. |
1213 | + * caller must hold RTNL */ |
1214 | void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) |
1215 | { |
1216 | struct in6_addr maddr; |
1217 | |
1218 | - ASSERT_RTNL(); |
1219 | - |
1220 | if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) |
1221 | return; |
1222 | |
1223 | @@ -1699,12 +1697,11 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) |
1224 | ipv6_dev_mc_inc(dev, &maddr); |
1225 | } |
1226 | |
1227 | +/* caller must hold RTNL */ |
1228 | void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) |
1229 | { |
1230 | struct in6_addr maddr; |
1231 | |
1232 | - ASSERT_RTNL(); |
1233 | - |
1234 | if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) |
1235 | return; |
1236 | |
1237 | @@ -1712,12 +1709,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) |
1238 | __ipv6_dev_mc_dec(idev, &maddr); |
1239 | } |
1240 | |
1241 | +/* caller must hold RTNL */ |
1242 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) |
1243 | { |
1244 | struct in6_addr addr; |
1245 | |
1246 | - ASSERT_RTNL(); |
1247 | - |
1248 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1249 | return; |
1250 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
1251 | @@ -1726,12 +1722,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) |
1252 | ipv6_dev_ac_inc(ifp->idev->dev, &addr); |
1253 | } |
1254 | |
1255 | +/* caller must hold RTNL */ |
1256 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) |
1257 | { |
1258 | struct in6_addr addr; |
1259 | |
1260 | - ASSERT_RTNL(); |
1261 | - |
1262 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1263 | return; |
1264 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
1265 | diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c |
1266 | index 210183244689..ff2de7d9d8e6 100644 |
1267 | --- a/net/ipv6/anycast.c |
1268 | +++ b/net/ipv6/anycast.c |
1269 | @@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1270 | pac->acl_next = NULL; |
1271 | pac->acl_addr = *addr; |
1272 | |
1273 | + rtnl_lock(); |
1274 | rcu_read_lock(); |
1275 | if (ifindex == 0) { |
1276 | struct rt6_info *rt; |
1277 | @@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1278 | |
1279 | error: |
1280 | rcu_read_unlock(); |
1281 | + rtnl_unlock(); |
1282 | if (pac) |
1283 | sock_kfree_s(sk, pac, sizeof(*pac)); |
1284 | return err; |
1285 | @@ -171,11 +173,13 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1286 | |
1287 | spin_unlock_bh(&ipv6_sk_ac_lock); |
1288 | |
1289 | + rtnl_lock(); |
1290 | rcu_read_lock(); |
1291 | dev = dev_get_by_index_rcu(net, pac->acl_ifindex); |
1292 | if (dev) |
1293 | ipv6_dev_ac_dec(dev, &pac->acl_addr); |
1294 | rcu_read_unlock(); |
1295 | + rtnl_unlock(); |
1296 | |
1297 | sock_kfree_s(sk, pac, sizeof(*pac)); |
1298 | return 0; |
1299 | @@ -198,6 +202,7 @@ void ipv6_sock_ac_close(struct sock *sk) |
1300 | spin_unlock_bh(&ipv6_sk_ac_lock); |
1301 | |
1302 | prev_index = 0; |
1303 | + rtnl_lock(); |
1304 | rcu_read_lock(); |
1305 | while (pac) { |
1306 | struct ipv6_ac_socklist *next = pac->acl_next; |
1307 | @@ -212,6 +217,7 @@ void ipv6_sock_ac_close(struct sock *sk) |
1308 | pac = next; |
1309 | } |
1310 | rcu_read_unlock(); |
1311 | + rtnl_unlock(); |
1312 | } |
1313 | |
1314 | static void aca_put(struct ifacaddr6 *ac) |
1315 | @@ -233,6 +239,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) |
1316 | struct rt6_info *rt; |
1317 | int err; |
1318 | |
1319 | + ASSERT_RTNL(); |
1320 | + |
1321 | idev = in6_dev_get(dev); |
1322 | |
1323 | if (idev == NULL) |
1324 | @@ -302,6 +310,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr) |
1325 | { |
1326 | struct ifacaddr6 *aca, *prev_aca; |
1327 | |
1328 | + ASSERT_RTNL(); |
1329 | + |
1330 | write_lock_bh(&idev->lock); |
1331 | prev_aca = NULL; |
1332 | for (aca = idev->ac_list; aca; aca = aca->aca_next) { |
1333 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
1334 | index 2465d18e8a26..cb57aa862177 100644 |
1335 | --- a/net/ipv6/ip6_gre.c |
1336 | +++ b/net/ipv6/ip6_gre.c |
1337 | @@ -787,7 +787,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) |
1338 | encap_limit = t->parms.encap_limit; |
1339 | |
1340 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
1341 | - fl6.flowi6_proto = IPPROTO_IPIP; |
1342 | + fl6.flowi6_proto = IPPROTO_GRE; |
1343 | |
1344 | dsfield = ipv4_get_dsfield(iph); |
1345 | |
1346 | @@ -837,7 +837,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) |
1347 | encap_limit = t->parms.encap_limit; |
1348 | |
1349 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
1350 | - fl6.flowi6_proto = IPPROTO_IPV6; |
1351 | + fl6.flowi6_proto = IPPROTO_GRE; |
1352 | |
1353 | dsfield = ipv6_get_dsfield(ipv6h); |
1354 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
1355 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
1356 | index 073e5a6fc631..12f7ef0f243a 100644 |
1357 | --- a/net/ipv6/ip6_output.c |
1358 | +++ b/net/ipv6/ip6_output.c |
1359 | @@ -1008,7 +1008,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, |
1360 | if (final_dst) |
1361 | fl6->daddr = *final_dst; |
1362 | |
1363 | - return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
1364 | + return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
1365 | } |
1366 | EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); |
1367 | |
1368 | @@ -1040,7 +1040,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, |
1369 | if (final_dst) |
1370 | fl6->daddr = *final_dst; |
1371 | |
1372 | - return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
1373 | + return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); |
1374 | } |
1375 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); |
1376 | |
1377 | diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c |
1378 | index 08b367c6b9cf..761e4586ab5f 100644 |
1379 | --- a/net/ipv6/mcast.c |
1380 | +++ b/net/ipv6/mcast.c |
1381 | @@ -172,6 +172,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1382 | mc_lst->next = NULL; |
1383 | mc_lst->addr = *addr; |
1384 | |
1385 | + rtnl_lock(); |
1386 | rcu_read_lock(); |
1387 | if (ifindex == 0) { |
1388 | struct rt6_info *rt; |
1389 | @@ -185,6 +186,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1390 | |
1391 | if (dev == NULL) { |
1392 | rcu_read_unlock(); |
1393 | + rtnl_unlock(); |
1394 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
1395 | return -ENODEV; |
1396 | } |
1397 | @@ -202,6 +204,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1398 | |
1399 | if (err) { |
1400 | rcu_read_unlock(); |
1401 | + rtnl_unlock(); |
1402 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
1403 | return err; |
1404 | } |
1405 | @@ -212,6 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1406 | spin_unlock(&ipv6_sk_mc_lock); |
1407 | |
1408 | rcu_read_unlock(); |
1409 | + rtnl_unlock(); |
1410 | |
1411 | return 0; |
1412 | } |
1413 | @@ -229,6 +233,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1414 | if (!ipv6_addr_is_multicast(addr)) |
1415 | return -EINVAL; |
1416 | |
1417 | + rtnl_lock(); |
1418 | spin_lock(&ipv6_sk_mc_lock); |
1419 | for (lnk = &np->ipv6_mc_list; |
1420 | (mc_lst = rcu_dereference_protected(*lnk, |
1421 | @@ -252,12 +257,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) |
1422 | } else |
1423 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); |
1424 | rcu_read_unlock(); |
1425 | + rtnl_unlock(); |
1426 | + |
1427 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); |
1428 | kfree_rcu(mc_lst, rcu); |
1429 | return 0; |
1430 | } |
1431 | } |
1432 | spin_unlock(&ipv6_sk_mc_lock); |
1433 | + rtnl_unlock(); |
1434 | |
1435 | return -EADDRNOTAVAIL; |
1436 | } |
1437 | @@ -302,6 +310,7 @@ void ipv6_sock_mc_close(struct sock *sk) |
1438 | if (!rcu_access_pointer(np->ipv6_mc_list)) |
1439 | return; |
1440 | |
1441 | + rtnl_lock(); |
1442 | spin_lock(&ipv6_sk_mc_lock); |
1443 | while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, |
1444 | lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { |
1445 | @@ -328,6 +337,7 @@ void ipv6_sock_mc_close(struct sock *sk) |
1446 | spin_lock(&ipv6_sk_mc_lock); |
1447 | } |
1448 | spin_unlock(&ipv6_sk_mc_lock); |
1449 | + rtnl_unlock(); |
1450 | } |
1451 | |
1452 | int ip6_mc_source(int add, int omode, struct sock *sk, |
1453 | @@ -845,6 +855,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) |
1454 | struct ifmcaddr6 *mc; |
1455 | struct inet6_dev *idev; |
1456 | |
1457 | + ASSERT_RTNL(); |
1458 | + |
1459 | /* we need to take a reference on idev */ |
1460 | idev = in6_dev_get(dev); |
1461 | |
1462 | @@ -916,6 +928,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) |
1463 | { |
1464 | struct ifmcaddr6 *ma, **map; |
1465 | |
1466 | + ASSERT_RTNL(); |
1467 | + |
1468 | write_lock_bh(&idev->lock); |
1469 | for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { |
1470 | if (ipv6_addr_equal(&ma->mca_addr, addr)) { |
1471 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
1472 | index fe548ba72687..b12b11b123ff 100644 |
1473 | --- a/net/ipv6/sit.c |
1474 | +++ b/net/ipv6/sit.c |
1475 | @@ -101,19 +101,19 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, |
1476 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) { |
1477 | if (local == t->parms.iph.saddr && |
1478 | remote == t->parms.iph.daddr && |
1479 | - (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
1480 | + (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
1481 | (t->dev->flags & IFF_UP)) |
1482 | return t; |
1483 | } |
1484 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) { |
1485 | if (remote == t->parms.iph.daddr && |
1486 | - (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
1487 | + (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
1488 | (t->dev->flags & IFF_UP)) |
1489 | return t; |
1490 | } |
1491 | for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) { |
1492 | if (local == t->parms.iph.saddr && |
1493 | - (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
1494 | + (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
1495 | (t->dev->flags & IFF_UP)) |
1496 | return t; |
1497 | } |
1498 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
1499 | index 889079b2ea85..a4f890dd223a 100644 |
1500 | --- a/net/ipv6/tcp_ipv6.c |
1501 | +++ b/net/ipv6/tcp_ipv6.c |
1502 | @@ -1668,6 +1668,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { |
1503 | .compat_setsockopt = compat_ipv6_setsockopt, |
1504 | .compat_getsockopt = compat_ipv6_getsockopt, |
1505 | #endif |
1506 | + .mtu_reduced = tcp_v6_mtu_reduced, |
1507 | }; |
1508 | |
1509 | #ifdef CONFIG_TCP_MD5SIG |
1510 | @@ -1699,6 +1700,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { |
1511 | .compat_setsockopt = compat_ipv6_setsockopt, |
1512 | .compat_getsockopt = compat_ipv6_getsockopt, |
1513 | #endif |
1514 | + .mtu_reduced = tcp_v4_mtu_reduced, |
1515 | }; |
1516 | |
1517 | #ifdef CONFIG_TCP_MD5SIG |
1518 | @@ -1935,7 +1937,6 @@ struct proto tcpv6_prot = { |
1519 | .sendpage = tcp_sendpage, |
1520 | .backlog_rcv = tcp_v6_do_rcv, |
1521 | .release_cb = tcp_release_cb, |
1522 | - .mtu_reduced = tcp_v6_mtu_reduced, |
1523 | .hash = tcp_v6_hash, |
1524 | .unhash = inet_unhash, |
1525 | .get_port = inet_csk_get_port, |
1526 | diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c |
1527 | index 1e05bbde47ba..da8d067d6107 100644 |
1528 | --- a/net/l2tp/l2tp_ppp.c |
1529 | +++ b/net/l2tp/l2tp_ppp.c |
1530 | @@ -758,7 +758,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, |
1531 | /* If PMTU discovery was enabled, use the MTU that was discovered */ |
1532 | dst = sk_dst_get(tunnel->sock); |
1533 | if (dst != NULL) { |
1534 | - u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock)); |
1535 | + u32 pmtu = dst_mtu(dst); |
1536 | + |
1537 | if (pmtu != 0) |
1538 | session->mtu = session->mru = pmtu - |
1539 | PPPOL2TP_HEADER_OVERHEAD; |
1540 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
1541 | index 0dfe894afd48..c375d731587f 100644 |
1542 | --- a/net/netlink/af_netlink.c |
1543 | +++ b/net/netlink/af_netlink.c |
1544 | @@ -205,7 +205,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, |
1545 | nskb->protocol = htons((u16) sk->sk_protocol); |
1546 | nskb->pkt_type = netlink_is_kernel(sk) ? |
1547 | PACKET_KERNEL : PACKET_USER; |
1548 | - |
1549 | + skb_reset_network_header(nskb); |
1550 | ret = dev_queue_xmit(nskb); |
1551 | if (unlikely(ret > 0)) |
1552 | ret = net_xmit_errno(ret); |
1553 | diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c |
1554 | index 2c77e7b1a913..600c7646b3d3 100644 |
1555 | --- a/net/openvswitch/actions.c |
1556 | +++ b/net/openvswitch/actions.c |
1557 | @@ -42,6 +42,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, |
1558 | |
1559 | static int make_writable(struct sk_buff *skb, int write_len) |
1560 | { |
1561 | + if (!pskb_may_pull(skb, write_len)) |
1562 | + return -ENOMEM; |
1563 | + |
1564 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) |
1565 | return 0; |
1566 | |
1567 | @@ -70,6 +73,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) |
1568 | |
1569 | vlan_set_encap_proto(skb, vhdr); |
1570 | skb->mac_header += VLAN_HLEN; |
1571 | + if (skb_network_offset(skb) < ETH_HLEN) |
1572 | + skb_set_network_header(skb, ETH_HLEN); |
1573 | skb_reset_mac_len(skb); |
1574 | |
1575 | return 0; |
1576 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
1577 | index 48a6a93db296..48b181797d7b 100644 |
1578 | --- a/net/packet/af_packet.c |
1579 | +++ b/net/packet/af_packet.c |
1580 | @@ -635,6 +635,7 @@ static void init_prb_bdqc(struct packet_sock *po, |
1581 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); |
1582 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; |
1583 | |
1584 | + p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); |
1585 | prb_init_ft_ops(p1, req_u); |
1586 | prb_setup_retire_blk_timer(po, tx_ring); |
1587 | prb_open_block(p1, pbd); |
1588 | @@ -1946,6 +1947,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
1589 | if ((int)snaplen < 0) |
1590 | snaplen = 0; |
1591 | } |
1592 | + } else if (unlikely(macoff + snaplen > |
1593 | + GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { |
1594 | + u32 nval; |
1595 | + |
1596 | + nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; |
1597 | + pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", |
1598 | + snaplen, nval, macoff); |
1599 | + snaplen = nval; |
1600 | + if (unlikely((int)snaplen < 0)) { |
1601 | + snaplen = 0; |
1602 | + macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; |
1603 | + } |
1604 | } |
1605 | spin_lock(&sk->sk_receive_queue.lock); |
1606 | h.raw = packet_current_rx_frame(po, skb, |
1607 | @@ -3779,6 +3792,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
1608 | goto out; |
1609 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
1610 | goto out; |
1611 | + if (po->tp_version >= TPACKET_V3 && |
1612 | + (int)(req->tp_block_size - |
1613 | + BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) |
1614 | + goto out; |
1615 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
1616 | po->tp_reserve)) |
1617 | goto out; |
1618 | diff --git a/net/packet/internal.h b/net/packet/internal.h |
1619 | index eb9580a6b25f..cdddf6a30399 100644 |
1620 | --- a/net/packet/internal.h |
1621 | +++ b/net/packet/internal.h |
1622 | @@ -29,6 +29,7 @@ struct tpacket_kbdq_core { |
1623 | char *pkblk_start; |
1624 | char *pkblk_end; |
1625 | int kblk_size; |
1626 | + unsigned int max_frame_len; |
1627 | unsigned int knum_blocks; |
1628 | uint64_t knxt_seq_num; |
1629 | char *prev; |
1630 | diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c |
1631 | index 5170a1ff95a1..7194fe8589b0 100644 |
1632 | --- a/net/sctp/sm_statefuns.c |
1633 | +++ b/net/sctp/sm_statefuns.c |
1634 | @@ -1775,9 +1775,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, |
1635 | /* Update the content of current association. */ |
1636 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); |
1637 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); |
1638 | - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
1639 | - SCTP_STATE(SCTP_STATE_ESTABLISHED)); |
1640 | - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
1641 | + if (sctp_state(asoc, SHUTDOWN_PENDING) && |
1642 | + (sctp_sstate(asoc->base.sk, CLOSING) || |
1643 | + sock_flag(asoc->base.sk, SOCK_DEAD))) { |
1644 | + /* if were currently in SHUTDOWN_PENDING, but the socket |
1645 | + * has been closed by user, don't transition to ESTABLISHED. |
1646 | + * Instead trigger SHUTDOWN bundled with COOKIE_ACK. |
1647 | + */ |
1648 | + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
1649 | + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, |
1650 | + SCTP_ST_CHUNK(0), NULL, |
1651 | + commands); |
1652 | + } else { |
1653 | + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
1654 | + SCTP_STATE(SCTP_STATE_ESTABLISHED)); |
1655 | + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
1656 | + } |
1657 | return SCTP_DISPOSITION_CONSUME; |
1658 | |
1659 | nomem_ev: |
1660 | diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c |
1661 | index 1d5c7bf29938..59cf325f2772 100644 |
1662 | --- a/net/xfrm/xfrm_policy.c |
1663 | +++ b/net/xfrm/xfrm_policy.c |
1664 | @@ -41,6 +41,11 @@ |
1665 | |
1666 | static struct dst_entry *xfrm_policy_sk_bundles; |
1667 | |
1668 | +struct xfrm_flo { |
1669 | + struct dst_entry *dst_orig; |
1670 | + u8 flags; |
1671 | +}; |
1672 | + |
1673 | static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); |
1674 | static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] |
1675 | __read_mostly; |
1676 | @@ -1889,13 +1894,14 @@ static int xdst_queue_output(struct sk_buff *skb) |
1677 | } |
1678 | |
1679 | static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, |
1680 | - struct dst_entry *dst, |
1681 | + struct xfrm_flo *xflo, |
1682 | const struct flowi *fl, |
1683 | int num_xfrms, |
1684 | u16 family) |
1685 | { |
1686 | int err; |
1687 | struct net_device *dev; |
1688 | + struct dst_entry *dst; |
1689 | struct dst_entry *dst1; |
1690 | struct xfrm_dst *xdst; |
1691 | |
1692 | @@ -1903,9 +1909,12 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, |
1693 | if (IS_ERR(xdst)) |
1694 | return xdst; |
1695 | |
1696 | - if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0) |
1697 | + if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || |
1698 | + net->xfrm.sysctl_larval_drop || |
1699 | + num_xfrms <= 0) |
1700 | return xdst; |
1701 | |
1702 | + dst = xflo->dst_orig; |
1703 | dst1 = &xdst->u.dst; |
1704 | dst_hold(dst); |
1705 | xdst->route = dst; |
1706 | @@ -1947,7 +1956,7 @@ static struct flow_cache_object * |
1707 | xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, |
1708 | struct flow_cache_object *oldflo, void *ctx) |
1709 | { |
1710 | - struct dst_entry *dst_orig = (struct dst_entry *)ctx; |
1711 | + struct xfrm_flo *xflo = (struct xfrm_flo *)ctx; |
1712 | struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; |
1713 | struct xfrm_dst *xdst, *new_xdst; |
1714 | int num_pols = 0, num_xfrms = 0, i, err, pol_dead; |
1715 | @@ -1988,7 +1997,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, |
1716 | goto make_dummy_bundle; |
1717 | } |
1718 | |
1719 | - new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); |
1720 | + new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, |
1721 | + xflo->dst_orig); |
1722 | if (IS_ERR(new_xdst)) { |
1723 | err = PTR_ERR(new_xdst); |
1724 | if (err != -EAGAIN) |
1725 | @@ -2022,7 +2032,7 @@ make_dummy_bundle: |
1726 | /* We found policies, but there's no bundles to instantiate: |
1727 | * either because the policy blocks, has no transformations or |
1728 | * we could not build template (no xfrm_states).*/ |
1729 | - xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family); |
1730 | + xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); |
1731 | if (IS_ERR(xdst)) { |
1732 | xfrm_pols_put(pols, num_pols); |
1733 | return ERR_CAST(xdst); |
1734 | @@ -2121,13 +2131,18 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
1735 | } |
1736 | |
1737 | if (xdst == NULL) { |
1738 | + struct xfrm_flo xflo; |
1739 | + |
1740 | + xflo.dst_orig = dst_orig; |
1741 | + xflo.flags = flags; |
1742 | + |
1743 | /* To accelerate a bit... */ |
1744 | if ((dst_orig->flags & DST_NOXFRM) || |
1745 | !net->xfrm.policy_count[XFRM_POLICY_OUT]) |
1746 | goto nopol; |
1747 | |
1748 | flo = flow_cache_lookup(net, fl, family, dir, |
1749 | - xfrm_bundle_lookup, dst_orig); |
1750 | + xfrm_bundle_lookup, &xflo); |
1751 | if (flo == NULL) |
1752 | goto nopol; |
1753 | if (IS_ERR(flo)) { |
1754 | @@ -2155,7 +2170,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
1755 | xfrm_pols_put(pols, drop_pols); |
1756 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); |
1757 | |
1758 | - return make_blackhole(net, family, dst_orig); |
1759 | + return ERR_PTR(-EREMOTE); |
1760 | } |
1761 | |
1762 | err = -EAGAIN; |
1763 | @@ -2212,6 +2227,23 @@ dropdst: |
1764 | } |
1765 | EXPORT_SYMBOL(xfrm_lookup); |
1766 | |
1767 | +/* Callers of xfrm_lookup_route() must ensure a call to dst_output(). |
1768 | + * Otherwise we may send out blackholed packets. |
1769 | + */ |
1770 | +struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, |
1771 | + const struct flowi *fl, |
1772 | + struct sock *sk, int flags) |
1773 | +{ |
1774 | + struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, |
1775 | + flags | XFRM_LOOKUP_QUEUE); |
1776 | + |
1777 | + if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) |
1778 | + return make_blackhole(net, dst_orig->ops->family, dst_orig); |
1779 | + |
1780 | + return dst; |
1781 | +} |
1782 | +EXPORT_SYMBOL(xfrm_lookup_route); |
1783 | + |
1784 | static inline int |
1785 | xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) |
1786 | { |
1787 | @@ -2477,7 +2509,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) |
1788 | |
1789 | skb_dst_force(skb); |
1790 | |
1791 | - dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); |
1792 | + dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); |
1793 | if (IS_ERR(dst)) { |
1794 | res = 0; |
1795 | dst = NULL; |