Contents of /trunk/kernel-alx/patches-4.4/0149-4.4.50-all-fixes.patch
Parent Directory | Revision Log
Revision 2885 -
(show annotations)
(download)
Mon Mar 27 13:49:23 2017 UTC (7 years, 6 months ago) by niro
File size: 24123 byte(s)
Mon Mar 27 13:49:23 2017 UTC (7 years, 6 months ago) by niro
File size: 24123 byte(s)
linux-4.4.50
1 | diff --git a/Makefile b/Makefile |
2 | index 5fab6d4068b5..10993715abb8 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 4 |
8 | -SUBLEVEL = 49 |
9 | +SUBLEVEL = 50 |
10 | EXTRAVERSION = |
11 | NAME = Blurry Fish Butt |
12 | |
13 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c |
14 | index bbff8ec6713e..28a4b34310b2 100644 |
15 | --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c |
16 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c |
17 | @@ -502,8 +502,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) |
18 | return; |
19 | |
20 | for (ring = 0; ring < priv->rx_ring_num; ring++) { |
21 | - if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) |
22 | + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { |
23 | + local_bh_disable(); |
24 | napi_reschedule(&priv->rx_cq[ring]->napi); |
25 | + local_bh_enable(); |
26 | + } |
27 | } |
28 | } |
29 | |
30 | diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c |
31 | index dc7d970bd1c0..effcdbfb06e9 100644 |
32 | --- a/drivers/net/loopback.c |
33 | +++ b/drivers/net/loopback.c |
34 | @@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev) |
35 | { |
36 | dev->mtu = 64 * 1024; |
37 | dev->hard_header_len = ETH_HLEN; /* 14 */ |
38 | + dev->min_header_len = ETH_HLEN; /* 14 */ |
39 | dev->addr_len = ETH_ALEN; /* 6 */ |
40 | dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ |
41 | dev->flags = IFF_LOOPBACK; |
42 | diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c |
43 | index 159a68782bec..79de9608ac48 100644 |
44 | --- a/drivers/net/macvtap.c |
45 | +++ b/drivers/net/macvtap.c |
46 | @@ -725,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, |
47 | ssize_t n; |
48 | |
49 | if (q->flags & IFF_VNET_HDR) { |
50 | - vnet_hdr_len = q->vnet_hdr_sz; |
51 | + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
52 | |
53 | err = -EINVAL; |
54 | if (len < vnet_hdr_len) |
55 | @@ -865,7 +865,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, |
56 | |
57 | if (q->flags & IFF_VNET_HDR) { |
58 | struct virtio_net_hdr vnet_hdr; |
59 | - vnet_hdr_len = q->vnet_hdr_sz; |
60 | + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
61 | if (iov_iter_count(iter) < vnet_hdr_len) |
62 | return -EINVAL; |
63 | |
64 | diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
65 | index 111b972e3053..c31d8e74f131 100644 |
66 | --- a/drivers/net/tun.c |
67 | +++ b/drivers/net/tun.c |
68 | @@ -1108,9 +1108,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, |
69 | } |
70 | |
71 | if (tun->flags & IFF_VNET_HDR) { |
72 | - if (len < tun->vnet_hdr_sz) |
73 | + int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); |
74 | + |
75 | + if (len < vnet_hdr_sz) |
76 | return -EINVAL; |
77 | - len -= tun->vnet_hdr_sz; |
78 | + len -= vnet_hdr_sz; |
79 | |
80 | n = copy_from_iter(&gso, sizeof(gso), from); |
81 | if (n != sizeof(gso)) |
82 | @@ -1122,7 +1124,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, |
83 | |
84 | if (tun16_to_cpu(tun, gso.hdr_len) > len) |
85 | return -EINVAL; |
86 | - iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso)); |
87 | + iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); |
88 | } |
89 | |
90 | if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { |
91 | @@ -1301,7 +1303,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, |
92 | vlan_hlen = VLAN_HLEN; |
93 | |
94 | if (tun->flags & IFF_VNET_HDR) |
95 | - vnet_hdr_sz = tun->vnet_hdr_sz; |
96 | + vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); |
97 | |
98 | total = skb->len + vlan_hlen + vnet_hdr_sz; |
99 | |
100 | diff --git a/include/linux/can/core.h b/include/linux/can/core.h |
101 | index a0875001b13c..df08a41d5be5 100644 |
102 | --- a/include/linux/can/core.h |
103 | +++ b/include/linux/can/core.h |
104 | @@ -45,10 +45,9 @@ struct can_proto { |
105 | extern int can_proto_register(const struct can_proto *cp); |
106 | extern void can_proto_unregister(const struct can_proto *cp); |
107 | |
108 | -extern int can_rx_register(struct net_device *dev, canid_t can_id, |
109 | - canid_t mask, |
110 | - void (*func)(struct sk_buff *, void *), |
111 | - void *data, char *ident); |
112 | +int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, |
113 | + void (*func)(struct sk_buff *, void *), |
114 | + void *data, char *ident, struct sock *sk); |
115 | |
116 | extern void can_rx_unregister(struct net_device *dev, canid_t can_id, |
117 | canid_t mask, |
118 | diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h |
119 | index 93a6a2c66d15..4035bbe40971 100644 |
120 | --- a/include/linux/netdevice.h |
121 | +++ b/include/linux/netdevice.h |
122 | @@ -1399,6 +1399,7 @@ enum netdev_priv_flags { |
123 | * @mtu: Interface MTU value |
124 | * @type: Interface hardware type |
125 | * @hard_header_len: Maximum hardware header length. |
126 | + * @min_header_len: Minimum hardware header length |
127 | * |
128 | * @needed_headroom: Extra headroom the hardware may need, but not in all |
129 | * cases can this be guaranteed |
130 | @@ -1619,6 +1620,7 @@ struct net_device { |
131 | unsigned int mtu; |
132 | unsigned short type; |
133 | unsigned short hard_header_len; |
134 | + unsigned short min_header_len; |
135 | |
136 | unsigned short needed_headroom; |
137 | unsigned short needed_tailroom; |
138 | @@ -2541,6 +2543,8 @@ static inline bool dev_validate_header(const struct net_device *dev, |
139 | { |
140 | if (likely(len >= dev->hard_header_len)) |
141 | return true; |
142 | + if (len < dev->min_header_len) |
143 | + return false; |
144 | |
145 | if (capable(CAP_SYS_RAWIO)) { |
146 | memset(ll_header + len, 0, dev->hard_header_len - len); |
147 | diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h |
148 | index 3ebb168b9afc..a34b141f125f 100644 |
149 | --- a/include/net/cipso_ipv4.h |
150 | +++ b/include/net/cipso_ipv4.h |
151 | @@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb, |
152 | } |
153 | |
154 | for (opt_iter = 6; opt_iter < opt_len;) { |
155 | + if (opt_iter + 1 == opt_len) { |
156 | + err_offset = opt_iter; |
157 | + goto out; |
158 | + } |
159 | tag_len = opt[opt_iter + 1]; |
160 | if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) { |
161 | err_offset = opt_iter + 1; |
162 | diff --git a/net/can/af_can.c b/net/can/af_can.c |
163 | index 166d436196c1..928f58064098 100644 |
164 | --- a/net/can/af_can.c |
165 | +++ b/net/can/af_can.c |
166 | @@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, |
167 | * @func: callback function on filter match |
168 | * @data: returned parameter for callback function |
169 | * @ident: string for calling module identification |
170 | + * @sk: socket pointer (might be NULL) |
171 | * |
172 | * Description: |
173 | * Invokes the callback function with the received sk_buff and the given |
174 | @@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, |
175 | */ |
176 | int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, |
177 | void (*func)(struct sk_buff *, void *), void *data, |
178 | - char *ident) |
179 | + char *ident, struct sock *sk) |
180 | { |
181 | struct receiver *r; |
182 | struct hlist_head *rl; |
183 | @@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, |
184 | r->func = func; |
185 | r->data = data; |
186 | r->ident = ident; |
187 | + r->sk = sk; |
188 | |
189 | hlist_add_head_rcu(&r->list, rl); |
190 | d->entries++; |
191 | @@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register); |
192 | static void can_rx_delete_receiver(struct rcu_head *rp) |
193 | { |
194 | struct receiver *r = container_of(rp, struct receiver, rcu); |
195 | + struct sock *sk = r->sk; |
196 | |
197 | kmem_cache_free(rcv_cache, r); |
198 | + if (sk) |
199 | + sock_put(sk); |
200 | } |
201 | |
202 | /** |
203 | @@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask, |
204 | spin_unlock(&can_rcvlists_lock); |
205 | |
206 | /* schedule the receiver item for deletion */ |
207 | - if (r) |
208 | + if (r) { |
209 | + if (r->sk) |
210 | + sock_hold(r->sk); |
211 | call_rcu(&r->rcu, can_rx_delete_receiver); |
212 | + } |
213 | } |
214 | EXPORT_SYMBOL(can_rx_unregister); |
215 | |
216 | diff --git a/net/can/af_can.h b/net/can/af_can.h |
217 | index fca0fe9fc45a..b86f5129e838 100644 |
218 | --- a/net/can/af_can.h |
219 | +++ b/net/can/af_can.h |
220 | @@ -50,13 +50,14 @@ |
221 | |
222 | struct receiver { |
223 | struct hlist_node list; |
224 | - struct rcu_head rcu; |
225 | canid_t can_id; |
226 | canid_t mask; |
227 | unsigned long matches; |
228 | void (*func)(struct sk_buff *, void *); |
229 | void *data; |
230 | char *ident; |
231 | + struct sock *sk; |
232 | + struct rcu_head rcu; |
233 | }; |
234 | |
235 | #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) |
236 | diff --git a/net/can/bcm.c b/net/can/bcm.c |
237 | index 24d66c1cc0cd..4ccfd356baed 100644 |
238 | --- a/net/can/bcm.c |
239 | +++ b/net/can/bcm.c |
240 | @@ -1179,7 +1179,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, |
241 | err = can_rx_register(dev, op->can_id, |
242 | REGMASK(op->can_id), |
243 | bcm_rx_handler, op, |
244 | - "bcm"); |
245 | + "bcm", sk); |
246 | |
247 | op->rx_reg_dev = dev; |
248 | dev_put(dev); |
249 | @@ -1188,7 +1188,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, |
250 | } else |
251 | err = can_rx_register(NULL, op->can_id, |
252 | REGMASK(op->can_id), |
253 | - bcm_rx_handler, op, "bcm"); |
254 | + bcm_rx_handler, op, "bcm", sk); |
255 | if (err) { |
256 | /* this bcm rx op is broken -> remove it */ |
257 | list_del(&op->list); |
258 | diff --git a/net/can/gw.c b/net/can/gw.c |
259 | index 455168718c2e..77c8af4047ef 100644 |
260 | --- a/net/can/gw.c |
261 | +++ b/net/can/gw.c |
262 | @@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj) |
263 | { |
264 | return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, |
265 | gwj->ccgw.filter.can_mask, can_can_gw_rcv, |
266 | - gwj, "gw"); |
267 | + gwj, "gw", NULL); |
268 | } |
269 | |
270 | static inline void cgw_unregister_filter(struct cgw_job *gwj) |
271 | diff --git a/net/can/raw.c b/net/can/raw.c |
272 | index 56af689ca999..e9403a26a1d5 100644 |
273 | --- a/net/can/raw.c |
274 | +++ b/net/can/raw.c |
275 | @@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk, |
276 | for (i = 0; i < count; i++) { |
277 | err = can_rx_register(dev, filter[i].can_id, |
278 | filter[i].can_mask, |
279 | - raw_rcv, sk, "raw"); |
280 | + raw_rcv, sk, "raw", sk); |
281 | if (err) { |
282 | /* clean up successfully registered filters */ |
283 | while (--i >= 0) |
284 | @@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk, |
285 | |
286 | if (err_mask) |
287 | err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, |
288 | - raw_rcv, sk, "raw"); |
289 | + raw_rcv, sk, "raw", sk); |
290 | |
291 | return err; |
292 | } |
293 | diff --git a/net/core/dev.c b/net/core/dev.c |
294 | index 0798a0f1b395..08215a85c742 100644 |
295 | --- a/net/core/dev.c |
296 | +++ b/net/core/dev.c |
297 | @@ -1676,24 +1676,19 @@ EXPORT_SYMBOL_GPL(net_dec_ingress_queue); |
298 | |
299 | static struct static_key netstamp_needed __read_mostly; |
300 | #ifdef HAVE_JUMP_LABEL |
301 | -/* We are not allowed to call static_key_slow_dec() from irq context |
302 | - * If net_disable_timestamp() is called from irq context, defer the |
303 | - * static_key_slow_dec() calls. |
304 | - */ |
305 | static atomic_t netstamp_needed_deferred; |
306 | -#endif |
307 | - |
308 | -void net_enable_timestamp(void) |
309 | +static void netstamp_clear(struct work_struct *work) |
310 | { |
311 | -#ifdef HAVE_JUMP_LABEL |
312 | int deferred = atomic_xchg(&netstamp_needed_deferred, 0); |
313 | |
314 | - if (deferred) { |
315 | - while (--deferred) |
316 | - static_key_slow_dec(&netstamp_needed); |
317 | - return; |
318 | - } |
319 | + while (deferred--) |
320 | + static_key_slow_dec(&netstamp_needed); |
321 | +} |
322 | +static DECLARE_WORK(netstamp_work, netstamp_clear); |
323 | #endif |
324 | + |
325 | +void net_enable_timestamp(void) |
326 | +{ |
327 | static_key_slow_inc(&netstamp_needed); |
328 | } |
329 | EXPORT_SYMBOL(net_enable_timestamp); |
330 | @@ -1701,12 +1696,12 @@ EXPORT_SYMBOL(net_enable_timestamp); |
331 | void net_disable_timestamp(void) |
332 | { |
333 | #ifdef HAVE_JUMP_LABEL |
334 | - if (in_interrupt()) { |
335 | - atomic_inc(&netstamp_needed_deferred); |
336 | - return; |
337 | - } |
338 | -#endif |
339 | + /* net_disable_timestamp() can be called from non process context */ |
340 | + atomic_inc(&netstamp_needed_deferred); |
341 | + schedule_work(&netstamp_work); |
342 | +#else |
343 | static_key_slow_dec(&netstamp_needed); |
344 | +#endif |
345 | } |
346 | EXPORT_SYMBOL(net_disable_timestamp); |
347 | |
348 | diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c |
349 | index de85d4e1cf43..52dcd414c2af 100644 |
350 | --- a/net/ethernet/eth.c |
351 | +++ b/net/ethernet/eth.c |
352 | @@ -353,6 +353,7 @@ void ether_setup(struct net_device *dev) |
353 | dev->header_ops = ð_header_ops; |
354 | dev->type = ARPHRD_ETHER; |
355 | dev->hard_header_len = ETH_HLEN; |
356 | + dev->min_header_len = ETH_HLEN; |
357 | dev->mtu = ETH_DATA_LEN; |
358 | dev->addr_len = ETH_ALEN; |
359 | dev->tx_queue_len = 1000; /* Ethernet wants good queues */ |
360 | diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c |
361 | index bdb2a07ec363..6cc3e1d602fb 100644 |
362 | --- a/net/ipv4/cipso_ipv4.c |
363 | +++ b/net/ipv4/cipso_ipv4.c |
364 | @@ -1657,6 +1657,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) |
365 | goto validate_return_locked; |
366 | } |
367 | |
368 | + if (opt_iter + 1 == opt_len) { |
369 | + err_offset = opt_iter; |
370 | + goto validate_return_locked; |
371 | + } |
372 | tag_len = tag[1]; |
373 | if (tag_len > (opt_len - opt_iter)) { |
374 | err_offset = opt_iter + 1; |
375 | diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
376 | index 9ce202549e7a..bc14c5bb124b 100644 |
377 | --- a/net/ipv4/ip_sockglue.c |
378 | +++ b/net/ipv4/ip_sockglue.c |
379 | @@ -1192,7 +1192,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) |
380 | pktinfo->ipi_ifindex = 0; |
381 | pktinfo->ipi_spec_dst.s_addr = 0; |
382 | } |
383 | - skb_dst_drop(skb); |
384 | + /* We need to keep the dst for __ip_options_echo() |
385 | + * We could restrict the test to opt.ts_needtime || opt.srr, |
386 | + * but the following is good enough as IP options are not often used. |
387 | + */ |
388 | + if (unlikely(IPCB(skb)->opt.optlen)) |
389 | + skb_dst_force(skb); |
390 | + else |
391 | + skb_dst_drop(skb); |
392 | } |
393 | |
394 | int ip_setsockopt(struct sock *sk, int level, |
395 | diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c |
396 | index 23160d2b3f71..3a00512addbc 100644 |
397 | --- a/net/ipv4/ping.c |
398 | +++ b/net/ipv4/ping.c |
399 | @@ -645,6 +645,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, |
400 | { |
401 | struct sk_buff *skb = skb_peek(&sk->sk_write_queue); |
402 | |
403 | + if (!skb) |
404 | + return 0; |
405 | pfh->wcheck = csum_partial((char *)&pfh->icmph, |
406 | sizeof(struct icmphdr), pfh->wcheck); |
407 | pfh->icmph.checksum = csum_fold(pfh->wcheck); |
408 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
409 | index 69daa81736f6..600dcda840d1 100644 |
410 | --- a/net/ipv4/tcp.c |
411 | +++ b/net/ipv4/tcp.c |
412 | @@ -783,6 +783,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, |
413 | ret = -EAGAIN; |
414 | break; |
415 | } |
416 | + /* if __tcp_splice_read() got nothing while we have |
417 | + * an skb in receive queue, we do not want to loop. |
418 | + * This might happen with URG data. |
419 | + */ |
420 | + if (!skb_queue_empty(&sk->sk_receive_queue)) |
421 | + break; |
422 | sk_wait_data(sk, &timeo, NULL); |
423 | if (signal_pending(current)) { |
424 | ret = sock_intr_errno(timeo); |
425 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
426 | index 0795647e94c6..de95714d021c 100644 |
427 | --- a/net/ipv4/tcp_output.c |
428 | +++ b/net/ipv4/tcp_output.c |
429 | @@ -2383,9 +2383,11 @@ u32 __tcp_select_window(struct sock *sk) |
430 | int full_space = min_t(int, tp->window_clamp, allowed_space); |
431 | int window; |
432 | |
433 | - if (mss > full_space) |
434 | + if (unlikely(mss > full_space)) { |
435 | mss = full_space; |
436 | - |
437 | + if (mss <= 0) |
438 | + return 0; |
439 | + } |
440 | if (free_space < (full_space >> 1)) { |
441 | icsk->icsk_ack.quick = 0; |
442 | |
443 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
444 | index 17430f341073..e89135828c3d 100644 |
445 | --- a/net/ipv6/ip6_gre.c |
446 | +++ b/net/ipv6/ip6_gre.c |
447 | @@ -55,6 +55,7 @@ |
448 | #include <net/ip6_fib.h> |
449 | #include <net/ip6_route.h> |
450 | #include <net/ip6_tunnel.h> |
451 | +#include <net/gre.h> |
452 | |
453 | |
454 | static bool log_ecn_error = true; |
455 | @@ -367,35 +368,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) |
456 | |
457 | |
458 | static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
459 | - u8 type, u8 code, int offset, __be32 info) |
460 | + u8 type, u8 code, int offset, __be32 info) |
461 | { |
462 | - const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; |
463 | - __be16 *p = (__be16 *)(skb->data + offset); |
464 | - int grehlen = offset + 4; |
465 | + const struct gre_base_hdr *greh; |
466 | + const struct ipv6hdr *ipv6h; |
467 | + int grehlen = sizeof(*greh); |
468 | struct ip6_tnl *t; |
469 | + int key_off = 0; |
470 | __be16 flags; |
471 | + __be32 key; |
472 | |
473 | - flags = p[0]; |
474 | - if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { |
475 | - if (flags&(GRE_VERSION|GRE_ROUTING)) |
476 | - return; |
477 | - if (flags&GRE_KEY) { |
478 | - grehlen += 4; |
479 | - if (flags&GRE_CSUM) |
480 | - grehlen += 4; |
481 | - } |
482 | + if (!pskb_may_pull(skb, offset + grehlen)) |
483 | + return; |
484 | + greh = (const struct gre_base_hdr *)(skb->data + offset); |
485 | + flags = greh->flags; |
486 | + if (flags & (GRE_VERSION | GRE_ROUTING)) |
487 | + return; |
488 | + if (flags & GRE_CSUM) |
489 | + grehlen += 4; |
490 | + if (flags & GRE_KEY) { |
491 | + key_off = grehlen + offset; |
492 | + grehlen += 4; |
493 | } |
494 | |
495 | - /* If only 8 bytes returned, keyed message will be dropped here */ |
496 | - if (!pskb_may_pull(skb, grehlen)) |
497 | + if (!pskb_may_pull(skb, offset + grehlen)) |
498 | return; |
499 | ipv6h = (const struct ipv6hdr *)skb->data; |
500 | - p = (__be16 *)(skb->data + offset); |
501 | + greh = (const struct gre_base_hdr *)(skb->data + offset); |
502 | + key = key_off ? *(__be32 *)(skb->data + key_off) : 0; |
503 | |
504 | t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, |
505 | - flags & GRE_KEY ? |
506 | - *(((__be32 *)p) + (grehlen / 4) - 1) : 0, |
507 | - p[1]); |
508 | + key, greh->protocol); |
509 | if (!t) |
510 | return; |
511 | |
512 | diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
513 | index 2994d1f1a661..6c6161763c2f 100644 |
514 | --- a/net/ipv6/ip6_tunnel.c |
515 | +++ b/net/ipv6/ip6_tunnel.c |
516 | @@ -479,18 +479,19 @@ ip6_tnl_dev_uninit(struct net_device *dev) |
517 | |
518 | __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) |
519 | { |
520 | - const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; |
521 | - __u8 nexthdr = ipv6h->nexthdr; |
522 | - __u16 off = sizeof(*ipv6h); |
523 | + const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw; |
524 | + unsigned int nhoff = raw - skb->data; |
525 | + unsigned int off = nhoff + sizeof(*ipv6h); |
526 | + u8 next, nexthdr = ipv6h->nexthdr; |
527 | |
528 | while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { |
529 | - __u16 optlen = 0; |
530 | struct ipv6_opt_hdr *hdr; |
531 | - if (raw + off + sizeof(*hdr) > skb->data && |
532 | - !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) |
533 | + u16 optlen; |
534 | + |
535 | + if (!pskb_may_pull(skb, off + sizeof(*hdr))) |
536 | break; |
537 | |
538 | - hdr = (struct ipv6_opt_hdr *) (raw + off); |
539 | + hdr = (struct ipv6_opt_hdr *)(skb->data + off); |
540 | if (nexthdr == NEXTHDR_FRAGMENT) { |
541 | struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; |
542 | if (frag_hdr->frag_off) |
543 | @@ -501,20 +502,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) |
544 | } else { |
545 | optlen = ipv6_optlen(hdr); |
546 | } |
547 | + /* cache hdr->nexthdr, since pskb_may_pull() might |
548 | + * invalidate hdr |
549 | + */ |
550 | + next = hdr->nexthdr; |
551 | if (nexthdr == NEXTHDR_DEST) { |
552 | - __u16 i = off + 2; |
553 | + u16 i = 2; |
554 | + |
555 | + /* Remember : hdr is no longer valid at this point. */ |
556 | + if (!pskb_may_pull(skb, off + optlen)) |
557 | + break; |
558 | + |
559 | while (1) { |
560 | struct ipv6_tlv_tnl_enc_lim *tel; |
561 | |
562 | /* No more room for encapsulation limit */ |
563 | - if (i + sizeof (*tel) > off + optlen) |
564 | + if (i + sizeof(*tel) > optlen) |
565 | break; |
566 | |
567 | - tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; |
568 | + tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i); |
569 | /* return index of option if found and valid */ |
570 | if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && |
571 | tel->length == 1) |
572 | - return i; |
573 | + return i + off - nhoff; |
574 | /* else jump to next option */ |
575 | if (tel->type) |
576 | i += tel->length + 2; |
577 | @@ -522,7 +532,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) |
578 | i++; |
579 | } |
580 | } |
581 | - nexthdr = hdr->nexthdr; |
582 | + nexthdr = next; |
583 | off += optlen; |
584 | } |
585 | return 0; |
586 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
587 | index 3da2b16356eb..184f0fe35dc6 100644 |
588 | --- a/net/ipv6/sit.c |
589 | +++ b/net/ipv6/sit.c |
590 | @@ -1389,6 +1389,7 @@ static int ipip6_tunnel_init(struct net_device *dev) |
591 | tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); |
592 | if (!tunnel->dst_cache) { |
593 | free_percpu(dev->tstats); |
594 | + dev->tstats = NULL; |
595 | return -ENOMEM; |
596 | } |
597 | |
598 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
599 | index 5f581616bf6a..76a8c8057a23 100644 |
600 | --- a/net/ipv6/tcp_ipv6.c |
601 | +++ b/net/ipv6/tcp_ipv6.c |
602 | @@ -974,6 +974,16 @@ drop: |
603 | return 0; /* don't send reset */ |
604 | } |
605 | |
606 | +static void tcp_v6_restore_cb(struct sk_buff *skb) |
607 | +{ |
608 | + /* We need to move header back to the beginning if xfrm6_policy_check() |
609 | + * and tcp_v6_fill_cb() are going to be called again. |
610 | + * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. |
611 | + */ |
612 | + memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, |
613 | + sizeof(struct inet6_skb_parm)); |
614 | +} |
615 | + |
616 | static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, |
617 | struct request_sock *req, |
618 | struct dst_entry *dst, |
619 | @@ -1163,8 +1173,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * |
620 | sk_gfp_atomic(sk, GFP_ATOMIC)); |
621 | consume_skb(ireq->pktopts); |
622 | ireq->pktopts = NULL; |
623 | - if (newnp->pktoptions) |
624 | + if (newnp->pktoptions) { |
625 | + tcp_v6_restore_cb(newnp->pktoptions); |
626 | skb_set_owner_r(newnp->pktoptions, newsk); |
627 | + } |
628 | } |
629 | } |
630 | |
631 | @@ -1179,16 +1191,6 @@ out: |
632 | return NULL; |
633 | } |
634 | |
635 | -static void tcp_v6_restore_cb(struct sk_buff *skb) |
636 | -{ |
637 | - /* We need to move header back to the beginning if xfrm6_policy_check() |
638 | - * and tcp_v6_fill_cb() are going to be called again. |
639 | - * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. |
640 | - */ |
641 | - memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, |
642 | - sizeof(struct inet6_skb_parm)); |
643 | -} |
644 | - |
645 | /* The socket must have it's spinlock held when we get |
646 | * here, unless it is a TCP_LISTEN socket. |
647 | * |
648 | diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h |
649 | index 5871537af387..763e8e241ce3 100644 |
650 | --- a/net/l2tp/l2tp_core.h |
651 | +++ b/net/l2tp/l2tp_core.h |
652 | @@ -273,6 +273,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, |
653 | int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, |
654 | const struct l2tp_nl_cmd_ops *ops); |
655 | void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); |
656 | +int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
657 | |
658 | /* Session reference counts. Incremented when code obtains a reference |
659 | * to a session. |
660 | diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c |
661 | index d0e906d39642..445b7cd0826a 100644 |
662 | --- a/net/l2tp/l2tp_ip.c |
663 | +++ b/net/l2tp/l2tp_ip.c |
664 | @@ -11,6 +11,7 @@ |
665 | |
666 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
667 | |
668 | +#include <asm/ioctls.h> |
669 | #include <linux/icmp.h> |
670 | #include <linux/module.h> |
671 | #include <linux/skbuff.h> |
672 | @@ -555,6 +556,30 @@ out: |
673 | return err ? err : copied; |
674 | } |
675 | |
676 | +int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
677 | +{ |
678 | + struct sk_buff *skb; |
679 | + int amount; |
680 | + |
681 | + switch (cmd) { |
682 | + case SIOCOUTQ: |
683 | + amount = sk_wmem_alloc_get(sk); |
684 | + break; |
685 | + case SIOCINQ: |
686 | + spin_lock_bh(&sk->sk_receive_queue.lock); |
687 | + skb = skb_peek(&sk->sk_receive_queue); |
688 | + amount = skb ? skb->len : 0; |
689 | + spin_unlock_bh(&sk->sk_receive_queue.lock); |
690 | + break; |
691 | + |
692 | + default: |
693 | + return -ENOIOCTLCMD; |
694 | + } |
695 | + |
696 | + return put_user(amount, (int __user *)arg); |
697 | +} |
698 | +EXPORT_SYMBOL(l2tp_ioctl); |
699 | + |
700 | static struct proto l2tp_ip_prot = { |
701 | .name = "L2TP/IP", |
702 | .owner = THIS_MODULE, |
703 | @@ -563,7 +588,7 @@ static struct proto l2tp_ip_prot = { |
704 | .bind = l2tp_ip_bind, |
705 | .connect = l2tp_ip_connect, |
706 | .disconnect = l2tp_ip_disconnect, |
707 | - .ioctl = udp_ioctl, |
708 | + .ioctl = l2tp_ioctl, |
709 | .destroy = l2tp_ip_destroy_sock, |
710 | .setsockopt = ip_setsockopt, |
711 | .getsockopt = ip_getsockopt, |
712 | diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c |
713 | index 3c4f867d3633..bcdab1cba773 100644 |
714 | --- a/net/l2tp/l2tp_ip6.c |
715 | +++ b/net/l2tp/l2tp_ip6.c |
716 | @@ -714,7 +714,7 @@ static struct proto l2tp_ip6_prot = { |
717 | .bind = l2tp_ip6_bind, |
718 | .connect = l2tp_ip6_connect, |
719 | .disconnect = l2tp_ip6_disconnect, |
720 | - .ioctl = udp_ioctl, |
721 | + .ioctl = l2tp_ioctl, |
722 | .destroy = l2tp_ip6_destroy_sock, |
723 | .setsockopt = ipv6_setsockopt, |
724 | .getsockopt = ipv6_getsockopt, |
725 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
726 | index f223d1c80ccf..f2d28ed74a0a 100644 |
727 | --- a/net/packet/af_packet.c |
728 | +++ b/net/packet/af_packet.c |
729 | @@ -2637,7 +2637,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
730 | int vnet_hdr_len; |
731 | struct packet_sock *po = pkt_sk(sk); |
732 | unsigned short gso_type = 0; |
733 | - int hlen, tlen; |
734 | + int hlen, tlen, linear; |
735 | int extra_len = 0; |
736 | ssize_t n; |
737 | |
738 | @@ -2741,8 +2741,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
739 | err = -ENOBUFS; |
740 | hlen = LL_RESERVED_SPACE(dev); |
741 | tlen = dev->needed_tailroom; |
742 | - skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, |
743 | - __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), |
744 | + linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len); |
745 | + linear = max(linear, min_t(int, len, dev->hard_header_len)); |
746 | + skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear, |
747 | msg->msg_flags & MSG_DONTWAIT, &err); |
748 | if (skb == NULL) |
749 | goto out_unlock; |
750 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
751 | index b5fd4ab56156..138f2d667212 100644 |
752 | --- a/net/sctp/socket.c |
753 | +++ b/net/sctp/socket.c |
754 | @@ -6960,7 +6960,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, |
755 | */ |
756 | release_sock(sk); |
757 | current_timeo = schedule_timeout(current_timeo); |
758 | - BUG_ON(sk != asoc->base.sk); |
759 | + if (sk != asoc->base.sk) |
760 | + goto do_error; |
761 | lock_sock(sk); |
762 | |
763 | *timeo_p = current_timeo; |