Contents of /trunk/kernel-alx/patches-3.10/0157-3.10.58-all-fixes.patch
Parent Directory | Revision Log
Revision 2645 -
(show annotations)
(download)
Tue Jul 21 16:20:20 2015 UTC (9 years, 2 months ago) by niro
File size: 22812 byte(s)
Tue Jul 21 16:20:20 2015 UTC (9 years, 2 months ago) by niro
File size: 22812 byte(s)
-linux-3.10.58
1 | diff --git a/Makefile b/Makefile |
2 | index 9df630a513b7..c27454b8ca3e 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 3 |
7 | PATCHLEVEL = 10 |
8 | -SUBLEVEL = 57 |
9 | +SUBLEVEL = 58 |
10 | EXTRAVERSION = |
11 | NAME = TOSSUG Baby Fish |
12 | |
13 | diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c |
14 | index 4942ddf9c8ae..3de4069f020e 100644 |
15 | --- a/drivers/net/ethernet/broadcom/tg3.c |
16 | +++ b/drivers/net/ethernet/broadcom/tg3.c |
17 | @@ -6767,7 +6767,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) |
18 | skb->protocol = eth_type_trans(skb, tp->dev); |
19 | |
20 | if (len > (tp->dev->mtu + ETH_HLEN) && |
21 | - skb->protocol != htons(ETH_P_8021Q)) { |
22 | + skb->protocol != htons(ETH_P_8021Q) && |
23 | + skb->protocol != htons(ETH_P_8021AD)) { |
24 | dev_kfree_skb(skb); |
25 | goto drop_it_no_recycle; |
26 | } |
27 | @@ -7759,8 +7760,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
28 | |
29 | entry = tnapi->tx_prod; |
30 | base_flags = 0; |
31 | - if (skb->ip_summed == CHECKSUM_PARTIAL) |
32 | - base_flags |= TXD_FLAG_TCPUDP_CSUM; |
33 | |
34 | mss = skb_shinfo(skb)->gso_size; |
35 | if (mss) { |
36 | @@ -7776,6 +7775,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
37 | |
38 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; |
39 | |
40 | + /* HW/FW can not correctly segment packets that have been |
41 | + * vlan encapsulated. |
42 | + */ |
43 | + if (skb->protocol == htons(ETH_P_8021Q) || |
44 | + skb->protocol == htons(ETH_P_8021AD)) |
45 | + return tg3_tso_bug(tp, skb); |
46 | + |
47 | if (!skb_is_gso_v6(skb)) { |
48 | iph->check = 0; |
49 | iph->tot_len = htons(mss + hdr_len); |
50 | @@ -7822,6 +7828,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
51 | base_flags |= tsflags << 12; |
52 | } |
53 | } |
54 | + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
55 | + /* HW/FW can not correctly checksum packets that have been |
56 | + * vlan encapsulated. |
57 | + */ |
58 | + if (skb->protocol == htons(ETH_P_8021Q) || |
59 | + skb->protocol == htons(ETH_P_8021AD)) { |
60 | + if (skb_checksum_help(skb)) |
61 | + goto drop; |
62 | + } else { |
63 | + base_flags |= TXD_FLAG_TCPUDP_CSUM; |
64 | + } |
65 | } |
66 | |
67 | if (tg3_flag(tp, USE_JUMBO_BDFLAG) && |
68 | diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
69 | index 7be9788ed0f6..4fb93c5b5563 100644 |
70 | --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
71 | +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c |
72 | @@ -856,6 +856,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) |
73 | return -ENOMEM; |
74 | dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, |
75 | DMA_BIDIRECTIONAL); |
76 | + if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { |
77 | + __free_page(dmatest_page); |
78 | + return -ENOMEM; |
79 | + } |
80 | |
81 | /* Run a small DMA test. |
82 | * The magic multipliers to the length tell the firmware |
83 | @@ -1191,6 +1195,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, |
84 | int bytes, int watchdog) |
85 | { |
86 | struct page *page; |
87 | + dma_addr_t bus; |
88 | int idx; |
89 | #if MYRI10GE_ALLOC_SIZE > 4096 |
90 | int end_offset; |
91 | @@ -1215,11 +1220,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, |
92 | rx->watchdog_needed = 1; |
93 | return; |
94 | } |
95 | + |
96 | + bus = pci_map_page(mgp->pdev, page, 0, |
97 | + MYRI10GE_ALLOC_SIZE, |
98 | + PCI_DMA_FROMDEVICE); |
99 | + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { |
100 | + __free_pages(page, MYRI10GE_ALLOC_ORDER); |
101 | + if (rx->fill_cnt - rx->cnt < 16) |
102 | + rx->watchdog_needed = 1; |
103 | + return; |
104 | + } |
105 | + |
106 | rx->page = page; |
107 | rx->page_offset = 0; |
108 | - rx->bus = pci_map_page(mgp->pdev, page, 0, |
109 | - MYRI10GE_ALLOC_SIZE, |
110 | - PCI_DMA_FROMDEVICE); |
111 | + rx->bus = bus; |
112 | + |
113 | } |
114 | rx->info[idx].page = rx->page; |
115 | rx->info[idx].page_offset = rx->page_offset; |
116 | @@ -2576,6 +2591,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, |
117 | mb(); |
118 | } |
119 | |
120 | +static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, |
121 | + struct myri10ge_tx_buf *tx, int idx) |
122 | +{ |
123 | + unsigned int len; |
124 | + int last_idx; |
125 | + |
126 | + /* Free any DMA resources we've alloced and clear out the skb slot */ |
127 | + last_idx = (idx + 1) & tx->mask; |
128 | + idx = tx->req & tx->mask; |
129 | + do { |
130 | + len = dma_unmap_len(&tx->info[idx], len); |
131 | + if (len) { |
132 | + if (tx->info[idx].skb != NULL) |
133 | + pci_unmap_single(mgp->pdev, |
134 | + dma_unmap_addr(&tx->info[idx], |
135 | + bus), len, |
136 | + PCI_DMA_TODEVICE); |
137 | + else |
138 | + pci_unmap_page(mgp->pdev, |
139 | + dma_unmap_addr(&tx->info[idx], |
140 | + bus), len, |
141 | + PCI_DMA_TODEVICE); |
142 | + dma_unmap_len_set(&tx->info[idx], len, 0); |
143 | + tx->info[idx].skb = NULL; |
144 | + } |
145 | + idx = (idx + 1) & tx->mask; |
146 | + } while (idx != last_idx); |
147 | +} |
148 | + |
149 | /* |
150 | * Transmit a packet. We need to split the packet so that a single |
151 | * segment does not cross myri10ge->tx_boundary, so this makes segment |
152 | @@ -2599,7 +2643,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, |
153 | u32 low; |
154 | __be32 high_swapped; |
155 | unsigned int len; |
156 | - int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; |
157 | + int idx, avail, frag_cnt, frag_idx, count, mss, max_segments; |
158 | u16 pseudo_hdr_offset, cksum_offset, queue; |
159 | int cum_len, seglen, boundary, rdma_count; |
160 | u8 flags, odd_flag; |
161 | @@ -2696,9 +2740,12 @@ again: |
162 | |
163 | /* map the skb for DMA */ |
164 | len = skb_headlen(skb); |
165 | + bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
166 | + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) |
167 | + goto drop; |
168 | + |
169 | idx = tx->req & tx->mask; |
170 | tx->info[idx].skb = skb; |
171 | - bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
172 | dma_unmap_addr_set(&tx->info[idx], bus, bus); |
173 | dma_unmap_len_set(&tx->info[idx], len, len); |
174 | |
175 | @@ -2797,12 +2844,16 @@ again: |
176 | break; |
177 | |
178 | /* map next fragment for DMA */ |
179 | - idx = (count + tx->req) & tx->mask; |
180 | frag = &skb_shinfo(skb)->frags[frag_idx]; |
181 | frag_idx++; |
182 | len = skb_frag_size(frag); |
183 | bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, |
184 | DMA_TO_DEVICE); |
185 | + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { |
186 | + myri10ge_unmap_tx_dma(mgp, tx, idx); |
187 | + goto drop; |
188 | + } |
189 | + idx = (count + tx->req) & tx->mask; |
190 | dma_unmap_addr_set(&tx->info[idx], bus, bus); |
191 | dma_unmap_len_set(&tx->info[idx], len, len); |
192 | } |
193 | @@ -2833,31 +2884,8 @@ again: |
194 | return NETDEV_TX_OK; |
195 | |
196 | abort_linearize: |
197 | - /* Free any DMA resources we've alloced and clear out the skb |
198 | - * slot so as to not trip up assertions, and to avoid a |
199 | - * double-free if linearizing fails */ |
200 | + myri10ge_unmap_tx_dma(mgp, tx, idx); |
201 | |
202 | - last_idx = (idx + 1) & tx->mask; |
203 | - idx = tx->req & tx->mask; |
204 | - tx->info[idx].skb = NULL; |
205 | - do { |
206 | - len = dma_unmap_len(&tx->info[idx], len); |
207 | - if (len) { |
208 | - if (tx->info[idx].skb != NULL) |
209 | - pci_unmap_single(mgp->pdev, |
210 | - dma_unmap_addr(&tx->info[idx], |
211 | - bus), len, |
212 | - PCI_DMA_TODEVICE); |
213 | - else |
214 | - pci_unmap_page(mgp->pdev, |
215 | - dma_unmap_addr(&tx->info[idx], |
216 | - bus), len, |
217 | - PCI_DMA_TODEVICE); |
218 | - dma_unmap_len_set(&tx->info[idx], len, 0); |
219 | - tx->info[idx].skb = NULL; |
220 | - } |
221 | - idx = (idx + 1) & tx->mask; |
222 | - } while (idx != last_idx); |
223 | if (skb_is_gso(skb)) { |
224 | netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); |
225 | goto drop; |
226 | diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c |
227 | index aea78fc2e48f..59e9c56e5b8a 100644 |
228 | --- a/drivers/net/hyperv/netvsc_drv.c |
229 | +++ b/drivers/net/hyperv/netvsc_drv.c |
230 | @@ -138,6 +138,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) |
231 | struct hv_netvsc_packet *packet; |
232 | int ret; |
233 | unsigned int i, num_pages, npg_data; |
234 | + u32 skb_length = skb->len; |
235 | |
236 | /* Add multipages for skb->data and additional 2 for RNDIS */ |
237 | npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1) |
238 | @@ -208,7 +209,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) |
239 | ret = rndis_filter_send(net_device_ctx->device_ctx, |
240 | packet); |
241 | if (ret == 0) { |
242 | - net->stats.tx_bytes += skb->len; |
243 | + net->stats.tx_bytes += skb_length; |
244 | net->stats.tx_packets++; |
245 | } else { |
246 | kfree(packet); |
247 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
248 | index b5d42fee8a84..c9f56ffdba9a 100644 |
249 | --- a/drivers/usb/core/hub.c |
250 | +++ b/drivers/usb/core/hub.c |
251 | @@ -1951,8 +1951,10 @@ void usb_set_device_state(struct usb_device *udev, |
252 | || new_state == USB_STATE_SUSPENDED) |
253 | ; /* No change to wakeup settings */ |
254 | else if (new_state == USB_STATE_CONFIGURED) |
255 | - wakeup = udev->actconfig->desc.bmAttributes |
256 | - & USB_CONFIG_ATT_WAKEUP; |
257 | + wakeup = (udev->quirks & |
258 | + USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 : |
259 | + udev->actconfig->desc.bmAttributes & |
260 | + USB_CONFIG_ATT_WAKEUP; |
261 | else |
262 | wakeup = 0; |
263 | } |
264 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
265 | index 1053eb651b2f..a301b3fa622b 100644 |
266 | --- a/drivers/usb/core/quirks.c |
267 | +++ b/drivers/usb/core/quirks.c |
268 | @@ -162,6 +162,10 @@ static const struct usb_device_id usb_interface_quirk_list[] = { |
269 | { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0), |
270 | .driver_info = USB_QUIRK_RESET_RESUME }, |
271 | |
272 | + /* ASUS Base Station(T100) */ |
273 | + { USB_DEVICE(0x0b05, 0x17e0), .driver_info = |
274 | + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, |
275 | + |
276 | { } /* terminating entry must be last */ |
277 | }; |
278 | |
279 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
280 | index b14379659e35..b22a4bc308e2 100644 |
281 | --- a/drivers/usb/serial/cp210x.c |
282 | +++ b/drivers/usb/serial/cp210x.c |
283 | @@ -122,6 +122,7 @@ static const struct usb_device_id id_table[] = { |
284 | { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ |
285 | { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ |
286 | { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ |
287 | + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ |
288 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
289 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
290 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ |
291 | @@ -155,6 +156,7 @@ static const struct usb_device_id id_table[] = { |
292 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
293 | { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ |
294 | { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ |
295 | + { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ |
296 | { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ |
297 | { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ |
298 | { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ |
299 | diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h |
300 | index 52f944dfe2fd..49587dc22f5d 100644 |
301 | --- a/include/linux/usb/quirks.h |
302 | +++ b/include/linux/usb/quirks.h |
303 | @@ -30,4 +30,7 @@ |
304 | descriptor */ |
305 | #define USB_QUIRK_DELAY_INIT 0x00000040 |
306 | |
307 | +/* device generates spurious wakeup, ignore remote wakeup capability */ |
308 | +#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200 |
309 | + |
310 | #endif /* __LINUX_USB_QUIRKS_H */ |
311 | diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h |
312 | index de2c78529afa..0a8f6f961baa 100644 |
313 | --- a/include/net/inet_connection_sock.h |
314 | +++ b/include/net/inet_connection_sock.h |
315 | @@ -62,6 +62,7 @@ struct inet_connection_sock_af_ops { |
316 | void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); |
317 | int (*bind_conflict)(const struct sock *sk, |
318 | const struct inet_bind_bucket *tb, bool relax); |
319 | + void (*mtu_reduced)(struct sock *sk); |
320 | }; |
321 | |
322 | /** inet_connection_sock - INET connection oriented sock |
323 | diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h |
324 | index 35247271e557..5f39c1cc0766 100644 |
325 | --- a/include/net/sctp/command.h |
326 | +++ b/include/net/sctp/command.h |
327 | @@ -118,7 +118,7 @@ typedef enum { |
328 | * analysis of the state functions, but in reality just taken from |
329 | * thin air in the hopes othat we don't trigger a kernel panic. |
330 | */ |
331 | -#define SCTP_MAX_NUM_COMMANDS 14 |
332 | +#define SCTP_MAX_NUM_COMMANDS 20 |
333 | |
334 | typedef union { |
335 | __s32 i32; |
336 | diff --git a/include/net/sock.h b/include/net/sock.h |
337 | index 26b15c0780be..c0aad07160ef 100644 |
338 | --- a/include/net/sock.h |
339 | +++ b/include/net/sock.h |
340 | @@ -932,7 +932,6 @@ struct proto { |
341 | struct sk_buff *skb); |
342 | |
343 | void (*release_cb)(struct sock *sk); |
344 | - void (*mtu_reduced)(struct sock *sk); |
345 | |
346 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
347 | void (*hash)(struct sock *sk); |
348 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
349 | index 6f87f0873843..29a1a63cd303 100644 |
350 | --- a/include/net/tcp.h |
351 | +++ b/include/net/tcp.h |
352 | @@ -460,6 +460,7 @@ extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); |
353 | */ |
354 | |
355 | extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); |
356 | +void tcp_v4_mtu_reduced(struct sock *sk); |
357 | extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); |
358 | extern struct sock * tcp_create_openreq_child(struct sock *sk, |
359 | struct request_sock *req, |
360 | diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
361 | index 5d87806d3ade..e025c1c788a1 100644 |
362 | --- a/net/ipv4/tcp_ipv4.c |
363 | +++ b/net/ipv4/tcp_ipv4.c |
364 | @@ -268,7 +268,7 @@ EXPORT_SYMBOL(tcp_v4_connect); |
365 | * It can be called through tcp_release_cb() if socket was owned by user |
366 | * at the time tcp_v4_err() was called to handle ICMP message. |
367 | */ |
368 | -static void tcp_v4_mtu_reduced(struct sock *sk) |
369 | +void tcp_v4_mtu_reduced(struct sock *sk) |
370 | { |
371 | struct dst_entry *dst; |
372 | struct inet_sock *inet = inet_sk(sk); |
373 | @@ -298,6 +298,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk) |
374 | tcp_simple_retransmit(sk); |
375 | } /* else let the usual retransmit timer handle it */ |
376 | } |
377 | +EXPORT_SYMBOL(tcp_v4_mtu_reduced); |
378 | |
379 | static void do_redirect(struct sk_buff *skb, struct sock *sk) |
380 | { |
381 | @@ -2142,6 +2143,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = { |
382 | .compat_setsockopt = compat_ip_setsockopt, |
383 | .compat_getsockopt = compat_ip_getsockopt, |
384 | #endif |
385 | + .mtu_reduced = tcp_v4_mtu_reduced, |
386 | }; |
387 | EXPORT_SYMBOL(ipv4_specific); |
388 | |
389 | @@ -2867,7 +2869,6 @@ struct proto tcp_prot = { |
390 | .sendpage = tcp_sendpage, |
391 | .backlog_rcv = tcp_v4_do_rcv, |
392 | .release_cb = tcp_release_cb, |
393 | - .mtu_reduced = tcp_v4_mtu_reduced, |
394 | .hash = inet_hash, |
395 | .unhash = inet_unhash, |
396 | .get_port = inet_csk_get_port, |
397 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
398 | index 56e29f0e230e..11ef25c9cf43 100644 |
399 | --- a/net/ipv4/tcp_output.c |
400 | +++ b/net/ipv4/tcp_output.c |
401 | @@ -775,7 +775,7 @@ void tcp_release_cb(struct sock *sk) |
402 | __sock_put(sk); |
403 | } |
404 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { |
405 | - sk->sk_prot->mtu_reduced(sk); |
406 | + inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); |
407 | __sock_put(sk); |
408 | } |
409 | } |
410 | @@ -2036,9 +2036,7 @@ void tcp_send_loss_probe(struct sock *sk) |
411 | if (WARN_ON(!skb || !tcp_skb_pcount(skb))) |
412 | goto rearm_timer; |
413 | |
414 | - /* Probe with zero data doesn't trigger fast recovery. */ |
415 | - if (skb->len > 0) |
416 | - err = __tcp_retransmit_skb(sk, skb); |
417 | + err = __tcp_retransmit_skb(sk, skb); |
418 | |
419 | /* Record snd_nxt for loss detection. */ |
420 | if (likely(!err)) |
421 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
422 | index 7bcdd0df68db..d0912acd9522 100644 |
423 | --- a/net/ipv6/addrconf.c |
424 | +++ b/net/ipv6/addrconf.c |
425 | @@ -2691,8 +2691,18 @@ static void init_loopback(struct net_device *dev) |
426 | if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) |
427 | continue; |
428 | |
429 | - if (sp_ifa->rt) |
430 | - continue; |
431 | + if (sp_ifa->rt) { |
432 | + /* This dst has been added to garbage list when |
433 | + * lo device down, release this obsolete dst and |
434 | + * reallocate a new router for ifa. |
435 | + */ |
436 | + if (sp_ifa->rt->dst.obsolete > 0) { |
437 | + ip6_rt_put(sp_ifa->rt); |
438 | + sp_ifa->rt = NULL; |
439 | + } else { |
440 | + continue; |
441 | + } |
442 | + } |
443 | |
444 | sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); |
445 | |
446 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
447 | index 7dca7c43fdf1..250a73e77f57 100644 |
448 | --- a/net/ipv6/ip6_gre.c |
449 | +++ b/net/ipv6/ip6_gre.c |
450 | @@ -787,7 +787,7 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) |
451 | encap_limit = t->parms.encap_limit; |
452 | |
453 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
454 | - fl6.flowi6_proto = IPPROTO_IPIP; |
455 | + fl6.flowi6_proto = IPPROTO_GRE; |
456 | |
457 | dsfield = ipv4_get_dsfield(iph); |
458 | |
459 | @@ -837,7 +837,7 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) |
460 | encap_limit = t->parms.encap_limit; |
461 | |
462 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
463 | - fl6.flowi6_proto = IPPROTO_IPV6; |
464 | + fl6.flowi6_proto = IPPROTO_GRE; |
465 | |
466 | dsfield = ipv6_get_dsfield(ipv6h); |
467 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
468 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
469 | index 8d22460a811b..4ddf67c6355b 100644 |
470 | --- a/net/ipv6/sit.c |
471 | +++ b/net/ipv6/sit.c |
472 | @@ -101,19 +101,19 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, |
473 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) { |
474 | if (local == t->parms.iph.saddr && |
475 | remote == t->parms.iph.daddr && |
476 | - (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
477 | + (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
478 | (t->dev->flags & IFF_UP)) |
479 | return t; |
480 | } |
481 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) { |
482 | if (remote == t->parms.iph.daddr && |
483 | - (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
484 | + (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
485 | (t->dev->flags & IFF_UP)) |
486 | return t; |
487 | } |
488 | for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) { |
489 | if (local == t->parms.iph.saddr && |
490 | - (!dev || !t->parms.link || dev->iflink == t->parms.link) && |
491 | + (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
492 | (t->dev->flags & IFF_UP)) |
493 | return t; |
494 | } |
495 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
496 | index 66c718854e5a..1a87659a6139 100644 |
497 | --- a/net/ipv6/tcp_ipv6.c |
498 | +++ b/net/ipv6/tcp_ipv6.c |
499 | @@ -1651,6 +1651,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { |
500 | .compat_setsockopt = compat_ipv6_setsockopt, |
501 | .compat_getsockopt = compat_ipv6_getsockopt, |
502 | #endif |
503 | + .mtu_reduced = tcp_v6_mtu_reduced, |
504 | }; |
505 | |
506 | #ifdef CONFIG_TCP_MD5SIG |
507 | @@ -1682,6 +1683,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { |
508 | .compat_setsockopt = compat_ipv6_setsockopt, |
509 | .compat_getsockopt = compat_ipv6_getsockopt, |
510 | #endif |
511 | + .mtu_reduced = tcp_v4_mtu_reduced, |
512 | }; |
513 | |
514 | #ifdef CONFIG_TCP_MD5SIG |
515 | @@ -1919,7 +1921,6 @@ struct proto tcpv6_prot = { |
516 | .sendpage = tcp_sendpage, |
517 | .backlog_rcv = tcp_v6_do_rcv, |
518 | .release_cb = tcp_release_cb, |
519 | - .mtu_reduced = tcp_v6_mtu_reduced, |
520 | .hash = tcp_v6_hash, |
521 | .unhash = inet_unhash, |
522 | .get_port = inet_csk_get_port, |
523 | diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c |
524 | index 164fa9dcd97d..c3ae2411650c 100644 |
525 | --- a/net/l2tp/l2tp_ppp.c |
526 | +++ b/net/l2tp/l2tp_ppp.c |
527 | @@ -756,7 +756,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, |
528 | /* If PMTU discovery was enabled, use the MTU that was discovered */ |
529 | dst = sk_dst_get(tunnel->sock); |
530 | if (dst != NULL) { |
531 | - u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock)); |
532 | + u32 pmtu = dst_mtu(dst); |
533 | + |
534 | if (pmtu != 0) |
535 | session->mtu = session->mru = pmtu - |
536 | PPPOL2TP_HEADER_OVERHEAD; |
537 | diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c |
538 | index 894b6cbdd929..c4779ca59032 100644 |
539 | --- a/net/openvswitch/actions.c |
540 | +++ b/net/openvswitch/actions.c |
541 | @@ -40,6 +40,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, |
542 | |
543 | static int make_writable(struct sk_buff *skb, int write_len) |
544 | { |
545 | + if (!pskb_may_pull(skb, write_len)) |
546 | + return -ENOMEM; |
547 | + |
548 | if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) |
549 | return 0; |
550 | |
551 | @@ -68,6 +71,8 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) |
552 | |
553 | vlan_set_encap_proto(skb, vhdr); |
554 | skb->mac_header += VLAN_HLEN; |
555 | + if (skb_network_offset(skb) < ETH_HLEN) |
556 | + skb_set_network_header(skb, ETH_HLEN); |
557 | skb_reset_mac_len(skb); |
558 | |
559 | return 0; |
560 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
561 | index e8b5a0dfca21..81b4b816f131 100644 |
562 | --- a/net/packet/af_packet.c |
563 | +++ b/net/packet/af_packet.c |
564 | @@ -565,6 +565,7 @@ static void init_prb_bdqc(struct packet_sock *po, |
565 | p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); |
566 | p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; |
567 | |
568 | + p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); |
569 | prb_init_ft_ops(p1, req_u); |
570 | prb_setup_retire_blk_timer(po, tx_ring); |
571 | prb_open_block(p1, pbd); |
572 | @@ -1803,6 +1804,18 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, |
573 | if ((int)snaplen < 0) |
574 | snaplen = 0; |
575 | } |
576 | + } else if (unlikely(macoff + snaplen > |
577 | + GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { |
578 | + u32 nval; |
579 | + |
580 | + nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff; |
581 | + pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n", |
582 | + snaplen, nval, macoff); |
583 | + snaplen = nval; |
584 | + if (unlikely((int)snaplen < 0)) { |
585 | + snaplen = 0; |
586 | + macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; |
587 | + } |
588 | } |
589 | spin_lock(&sk->sk_receive_queue.lock); |
590 | h.raw = packet_current_rx_frame(po, skb, |
591 | @@ -3642,6 +3655,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
592 | goto out; |
593 | if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) |
594 | goto out; |
595 | + if (po->tp_version >= TPACKET_V3 && |
596 | + (int)(req->tp_block_size - |
597 | + BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) |
598 | + goto out; |
599 | if (unlikely(req->tp_frame_size < po->tp_hdrlen + |
600 | po->tp_reserve)) |
601 | goto out; |
602 | diff --git a/net/packet/internal.h b/net/packet/internal.h |
603 | index 1035fa2d909c..ca086c0c2c08 100644 |
604 | --- a/net/packet/internal.h |
605 | +++ b/net/packet/internal.h |
606 | @@ -29,6 +29,7 @@ struct tpacket_kbdq_core { |
607 | char *pkblk_start; |
608 | char *pkblk_end; |
609 | int kblk_size; |
610 | + unsigned int max_frame_len; |
611 | unsigned int knum_blocks; |
612 | uint64_t knxt_seq_num; |
613 | char *prev; |
614 | diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c |
615 | index 6eb26403de6a..edc204b05c82 100644 |
616 | --- a/net/sctp/sm_statefuns.c |
617 | +++ b/net/sctp/sm_statefuns.c |
618 | @@ -1782,9 +1782,22 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, |
619 | /* Update the content of current association. */ |
620 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); |
621 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); |
622 | - sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
623 | - SCTP_STATE(SCTP_STATE_ESTABLISHED)); |
624 | - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
625 | + if (sctp_state(asoc, SHUTDOWN_PENDING) && |
626 | + (sctp_sstate(asoc->base.sk, CLOSING) || |
627 | + sock_flag(asoc->base.sk, SOCK_DEAD))) { |
628 | + /* if were currently in SHUTDOWN_PENDING, but the socket |
629 | + * has been closed by user, don't transition to ESTABLISHED. |
630 | + * Instead trigger SHUTDOWN bundled with COOKIE_ACK. |
631 | + */ |
632 | + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
633 | + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, |
634 | + SCTP_ST_CHUNK(0), NULL, |
635 | + commands); |
636 | + } else { |
637 | + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
638 | + SCTP_STATE(SCTP_STATE_ESTABLISHED)); |
639 | + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
640 | + } |
641 | return SCTP_DISPOSITION_CONSUME; |
642 | |
643 | nomem_ev: |