Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.8/0107-4.8.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2847 - (show annotations) (download)
Tue Nov 22 13:19:43 2016 UTC (7 years, 5 months ago) by niro
File size: 62584 byte(s)
-linux-4.8.8
1 diff --git a/Makefile b/Makefile
2 index 4d0f28cb481d..8f18daa2c76a 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 8
8 -SUBLEVEL = 7
9 +SUBLEVEL = 8
10 EXTRAVERSION =
11 NAME = Psychotic Stoned Sheep
12
13 diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
14 index ee655ed1ff1b..1e8fceb308a5 100644
15 --- a/arch/powerpc/include/asm/checksum.h
16 +++ b/arch/powerpc/include/asm/checksum.h
17 @@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
18 return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
19 }
20
21 -static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
22 - unsigned short len,
23 - unsigned short proto,
24 - __wsum sum)
25 +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
26 + __u8 proto, __wsum sum)
27 {
28 #ifdef __powerpc64__
29 unsigned long s = (__force u32)sum;
30 @@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
31 * computes the checksum of the TCP/UDP pseudo-header
32 * returns a 16-bit checksum, already complemented
33 */
34 -static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
35 - unsigned short len,
36 - unsigned short proto,
37 - __wsum sum)
38 +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
39 + __u8 proto, __wsum sum)
40 {
41 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
42 }
43 diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
44 index 9dbfcc0ab577..5ff64afd69f9 100644
45 --- a/drivers/infiniband/ulp/ipoib/ipoib.h
46 +++ b/drivers/infiniband/ulp/ipoib/ipoib.h
47 @@ -63,6 +63,8 @@ enum ipoib_flush_level {
48
49 enum {
50 IPOIB_ENCAP_LEN = 4,
51 + IPOIB_PSEUDO_LEN = 20,
52 + IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
53
54 IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
55 IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
56 @@ -134,15 +136,21 @@ struct ipoib_header {
57 u16 reserved;
58 };
59
60 -struct ipoib_cb {
61 - struct qdisc_skb_cb qdisc_cb;
62 - u8 hwaddr[INFINIBAND_ALEN];
63 +struct ipoib_pseudo_header {
64 + u8 hwaddr[INFINIBAND_ALEN];
65 };
66
67 -static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
68 +static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
69 {
70 - BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
71 - return (struct ipoib_cb *)skb->cb;
72 + char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
73 +
74 + /*
75 + * only the ipoib header is present now, make room for a dummy
76 + * pseudo header and set skb field accordingly
77 + */
78 + memset(data, 0, IPOIB_PSEUDO_LEN);
79 + skb_reset_mac_header(skb);
80 + skb_pull(skb, IPOIB_HARD_LEN);
81 }
82
83 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
84 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
85 index 4ad297d3de89..339a1eecdfe3 100644
86 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
87 +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
88 @@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
89 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
90 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
91
92 +#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
93 +
94 static struct ib_qp_attr ipoib_cm_err_attr = {
95 .qp_state = IB_QPS_ERR
96 };
97 @@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
98 struct sk_buff *skb;
99 int i;
100
101 - skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
102 + skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
103 if (unlikely(!skb))
104 return NULL;
105
106 /*
107 - * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
108 + * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
109 * IP header to a multiple of 16.
110 */
111 - skb_reserve(skb, 12);
112 + skb_reserve(skb, IPOIB_CM_RX_RESERVE);
113
114 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
115 DMA_FROM_DEVICE);
116 @@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
117 if (wc->byte_len < IPOIB_CM_COPYBREAK) {
118 int dlen = wc->byte_len;
119
120 - small_skb = dev_alloc_skb(dlen + 12);
121 + small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
122 if (small_skb) {
123 - skb_reserve(small_skb, 12);
124 + skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
125 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
126 dlen, DMA_FROM_DEVICE);
127 skb_copy_from_linear_data(skb, small_skb->data, dlen);
128 @@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
129
130 copied:
131 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
132 - skb_reset_mac_header(skb);
133 - skb_pull(skb, IPOIB_ENCAP_LEN);
134 + skb_add_pseudo_hdr(skb);
135
136 ++dev->stats.rx_packets;
137 dev->stats.rx_bytes += skb->len;
138 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
139 index be11d5d5b8c1..830fecb6934c 100644
140 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
141 +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
142 @@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
143
144 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
145
146 - skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
147 + skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
148 if (unlikely(!skb))
149 return NULL;
150
151 /*
152 - * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
153 - * header. So we need 4 more bytes to get to 48 and align the
154 - * IP header to a multiple of 16.
155 + * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
156 + * 64 bytes aligned
157 */
158 - skb_reserve(skb, 4);
159 + skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
160
161 mapping = priv->rx_ring[id].mapping;
162 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
163 @@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
164 skb_pull(skb, IB_GRH_BYTES);
165
166 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
167 - skb_reset_mac_header(skb);
168 - skb_pull(skb, IPOIB_ENCAP_LEN);
169 + skb_add_pseudo_hdr(skb);
170
171 ++dev->stats.rx_packets;
172 dev->stats.rx_bytes += skb->len;
173 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
174 index cc1c1b062ea5..823a528ef4eb 100644
175 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
176 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
177 @@ -925,9 +925,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
178 ipoib_neigh_free(neigh);
179 goto err_drop;
180 }
181 - if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
182 + if (skb_queue_len(&neigh->queue) <
183 + IPOIB_MAX_PATH_REC_QUEUE) {
184 + /* put pseudoheader back on for next time */
185 + skb_push(skb, IPOIB_PSEUDO_LEN);
186 __skb_queue_tail(&neigh->queue, skb);
187 - else {
188 + } else {
189 ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
190 skb_queue_len(&neigh->queue));
191 goto err_drop;
192 @@ -964,7 +967,7 @@ err_drop:
193 }
194
195 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
196 - struct ipoib_cb *cb)
197 + struct ipoib_pseudo_header *phdr)
198 {
199 struct ipoib_dev_priv *priv = netdev_priv(dev);
200 struct ipoib_path *path;
201 @@ -972,16 +975,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
202
203 spin_lock_irqsave(&priv->lock, flags);
204
205 - path = __path_find(dev, cb->hwaddr + 4);
206 + path = __path_find(dev, phdr->hwaddr + 4);
207 if (!path || !path->valid) {
208 int new_path = 0;
209
210 if (!path) {
211 - path = path_rec_create(dev, cb->hwaddr + 4);
212 + path = path_rec_create(dev, phdr->hwaddr + 4);
213 new_path = 1;
214 }
215 if (path) {
216 if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
217 + /* put pseudoheader back on for next time */
218 + skb_push(skb, IPOIB_PSEUDO_LEN);
219 __skb_queue_tail(&path->queue, skb);
220 } else {
221 ++dev->stats.tx_dropped;
222 @@ -1009,10 +1014,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
223 be16_to_cpu(path->pathrec.dlid));
224
225 spin_unlock_irqrestore(&priv->lock, flags);
226 - ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
227 + ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
228 return;
229 } else if ((path->query || !path_rec_start(dev, path)) &&
230 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
231 + /* put pseudoheader back on for next time */
232 + skb_push(skb, IPOIB_PSEUDO_LEN);
233 __skb_queue_tail(&path->queue, skb);
234 } else {
235 ++dev->stats.tx_dropped;
236 @@ -1026,13 +1033,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
237 {
238 struct ipoib_dev_priv *priv = netdev_priv(dev);
239 struct ipoib_neigh *neigh;
240 - struct ipoib_cb *cb = ipoib_skb_cb(skb);
241 + struct ipoib_pseudo_header *phdr;
242 struct ipoib_header *header;
243 unsigned long flags;
244
245 + phdr = (struct ipoib_pseudo_header *) skb->data;
246 + skb_pull(skb, sizeof(*phdr));
247 header = (struct ipoib_header *) skb->data;
248
249 - if (unlikely(cb->hwaddr[4] == 0xff)) {
250 + if (unlikely(phdr->hwaddr[4] == 0xff)) {
251 /* multicast, arrange "if" according to probability */
252 if ((header->proto != htons(ETH_P_IP)) &&
253 (header->proto != htons(ETH_P_IPV6)) &&
254 @@ -1045,13 +1054,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
255 return NETDEV_TX_OK;
256 }
257 /* Add in the P_Key for multicast*/
258 - cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
259 - cb->hwaddr[9] = priv->pkey & 0xff;
260 + phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
261 + phdr->hwaddr[9] = priv->pkey & 0xff;
262
263 - neigh = ipoib_neigh_get(dev, cb->hwaddr);
264 + neigh = ipoib_neigh_get(dev, phdr->hwaddr);
265 if (likely(neigh))
266 goto send_using_neigh;
267 - ipoib_mcast_send(dev, cb->hwaddr, skb);
268 + ipoib_mcast_send(dev, phdr->hwaddr, skb);
269 return NETDEV_TX_OK;
270 }
271
272 @@ -1060,16 +1069,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
273 case htons(ETH_P_IP):
274 case htons(ETH_P_IPV6):
275 case htons(ETH_P_TIPC):
276 - neigh = ipoib_neigh_get(dev, cb->hwaddr);
277 + neigh = ipoib_neigh_get(dev, phdr->hwaddr);
278 if (unlikely(!neigh)) {
279 - neigh_add_path(skb, cb->hwaddr, dev);
280 + neigh_add_path(skb, phdr->hwaddr, dev);
281 return NETDEV_TX_OK;
282 }
283 break;
284 case htons(ETH_P_ARP):
285 case htons(ETH_P_RARP):
286 /* for unicast ARP and RARP should always perform path find */
287 - unicast_arp_send(skb, dev, cb);
288 + unicast_arp_send(skb, dev, phdr);
289 return NETDEV_TX_OK;
290 default:
291 /* ethertype not supported by IPoIB */
292 @@ -1086,11 +1095,13 @@ send_using_neigh:
293 goto unref;
294 }
295 } else if (neigh->ah) {
296 - ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
297 + ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
298 goto unref;
299 }
300
301 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
302 + /* put pseudoheader back on for next time */
303 + skb_push(skb, sizeof(*phdr));
304 spin_lock_irqsave(&priv->lock, flags);
305 __skb_queue_tail(&neigh->queue, skb);
306 spin_unlock_irqrestore(&priv->lock, flags);
307 @@ -1122,8 +1133,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
308 unsigned short type,
309 const void *daddr, const void *saddr, unsigned len)
310 {
311 + struct ipoib_pseudo_header *phdr;
312 struct ipoib_header *header;
313 - struct ipoib_cb *cb = ipoib_skb_cb(skb);
314
315 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
316
317 @@ -1132,12 +1143,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
318
319 /*
320 * we don't rely on dst_entry structure, always stuff the
321 - * destination address into skb->cb so we can figure out where
322 + * destination address into skb hard header so we can figure out where
323 * to send the packet later.
324 */
325 - memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
326 + phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
327 + memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
328
329 - return sizeof *header;
330 + return IPOIB_HARD_LEN;
331 }
332
333 static void ipoib_set_mcast_list(struct net_device *dev)
334 @@ -1759,7 +1771,7 @@ void ipoib_setup(struct net_device *dev)
335
336 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
337
338 - dev->hard_header_len = IPOIB_ENCAP_LEN;
339 + dev->hard_header_len = IPOIB_HARD_LEN;
340 dev->addr_len = INFINIBAND_ALEN;
341 dev->type = ARPHRD_INFINIBAND;
342 dev->tx_queue_len = ipoib_sendq_size * 2;
343 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
344 index d3394b6add24..1909dd252c94 100644
345 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
346 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
347 @@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
348 __ipoib_mcast_add(dev, mcast);
349 list_add_tail(&mcast->list, &priv->multicast_list);
350 }
351 - if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
352 + if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
353 + /* put pseudoheader back on for next time */
354 + skb_push(skb, sizeof(struct ipoib_pseudo_header));
355 skb_queue_tail(&mcast->pkt_queue, skb);
356 - else {
357 + } else {
358 ++dev->stats.tx_dropped;
359 dev_kfree_skb_any(skb);
360 }
361 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
362 index 692ee248e486..3474de576dde 100644
363 --- a/drivers/net/ethernet/freescale/fec_main.c
364 +++ b/drivers/net/ethernet/freescale/fec_main.c
365 @@ -913,13 +913,11 @@ fec_restart(struct net_device *ndev)
366 * enet-mac reset will reset mac address registers too,
367 * so need to reconfigure it.
368 */
369 - if (fep->quirks & FEC_QUIRK_ENET_MAC) {
370 - memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
371 - writel((__force u32)cpu_to_be32(temp_mac[0]),
372 - fep->hwp + FEC_ADDR_LOW);
373 - writel((__force u32)cpu_to_be32(temp_mac[1]),
374 - fep->hwp + FEC_ADDR_HIGH);
375 - }
376 + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
377 + writel((__force u32)cpu_to_be32(temp_mac[0]),
378 + fep->hwp + FEC_ADDR_LOW);
379 + writel((__force u32)cpu_to_be32(temp_mac[1]),
380 + fep->hwp + FEC_ADDR_HIGH);
381
382 /* Clear any outstanding interrupt. */
383 writel(0xffffffff, fep->hwp + FEC_IEVENT);
384 @@ -1432,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
385 skb_put(skb, pkt_len - 4);
386 data = skb->data;
387
388 + if (!is_copybreak && need_swap)
389 + swap_buffer(data, pkt_len);
390 +
391 #if !defined(CONFIG_M5272)
392 if (fep->quirks & FEC_QUIRK_HAS_RACC)
393 data = skb_pull_inline(skb, 2);
394 #endif
395
396 - if (!is_copybreak && need_swap)
397 - swap_buffer(data, pkt_len);
398 -
399 /* Extract the enhanced buffer descriptor */
400 ebdp = NULL;
401 if (fep->bufdesc_ex)
402 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
403 index 132cea655920..e3be7e44ff51 100644
404 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
405 +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
406 @@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
407 /* For TX we use the same irq per
408 ring we assigned for the RX */
409 struct mlx4_en_cq *rx_cq;
410 -
411 + int xdp_index;
412 +
413 + /* The xdp tx irq must align with the rx ring that forwards to
414 + * it, so reindex these from 0. This should only happen when
415 + * tx_ring_num is not a multiple of rx_ring_num.
416 + */
417 + xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
418 + if (xdp_index >= 0)
419 + cq_idx = xdp_index;
420 cq_idx = cq_idx % priv->rx_ring_num;
421 rx_cq = priv->rx_cq[cq_idx];
422 cq->vector = rx_cq->vector;
423 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
424 index 3c20e87bb761..16af1ce99233 100644
425 --- a/drivers/net/geneve.c
426 +++ b/drivers/net/geneve.c
427 @@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
428
429 skb_gro_pull(skb, gh_len);
430 skb_gro_postpull_rcsum(skb, gh, gh_len);
431 - pp = ptype->callbacks.gro_receive(head, skb);
432 + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
433 flush = 0;
434
435 out_unlock:
436 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
437 index 3ba29fc80d05..c4d9653cae66 100644
438 --- a/drivers/net/hyperv/netvsc_drv.c
439 +++ b/drivers/net/hyperv/netvsc_drv.c
440 @@ -624,15 +624,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
441 packet->total_data_buflen);
442
443 skb->protocol = eth_type_trans(skb, net);
444 - if (csum_info) {
445 - /* We only look at the IP checksum here.
446 - * Should we be dropping the packet if checksum
447 - * failed? How do we deal with other checksums - TCP/UDP?
448 - */
449 - if (csum_info->receive.ip_checksum_succeeded)
450 +
451 + /* skb is already created with CHECKSUM_NONE */
452 + skb_checksum_none_assert(skb);
453 +
454 + /*
455 + * In Linux, the IP checksum is always checked.
456 + * Do L4 checksum offload if enabled and present.
457 + */
458 + if (csum_info && (net->features & NETIF_F_RXCSUM)) {
459 + if (csum_info->receive.tcp_checksum_succeeded ||
460 + csum_info->receive.udp_checksum_succeeded)
461 skb->ip_summed = CHECKSUM_UNNECESSARY;
462 - else
463 - skb->ip_summed = CHECKSUM_NONE;
464 }
465
466 if (vlan_tci & VLAN_TAG_PRESENT)
467 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
468 index 351e701eb043..b72ddc61eff8 100644
469 --- a/drivers/net/macsec.c
470 +++ b/drivers/net/macsec.c
471 @@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
472 #define DEFAULT_ENCRYPT false
473 #define DEFAULT_ENCODING_SA 0
474
475 +static bool send_sci(const struct macsec_secy *secy)
476 +{
477 + const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
478 +
479 + return tx_sc->send_sci ||
480 + (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
481 +}
482 +
483 static sci_t make_sci(u8 *addr, __be16 port)
484 {
485 sci_t sci;
486 @@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
487
488 /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
489 static void macsec_fill_sectag(struct macsec_eth_header *h,
490 - const struct macsec_secy *secy, u32 pn)
491 + const struct macsec_secy *secy, u32 pn,
492 + bool sci_present)
493 {
494 const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
495
496 - memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
497 + memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
498 h->eth.h_proto = htons(ETH_P_MACSEC);
499
500 - if (tx_sc->send_sci ||
501 - (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
502 + if (sci_present) {
503 h->tci_an |= MACSEC_TCI_SC;
504 memcpy(&h->secure_channel_id, &secy->sci,
505 sizeof(h->secure_channel_id));
506 @@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
507 struct macsec_tx_sc *tx_sc;
508 struct macsec_tx_sa *tx_sa;
509 struct macsec_dev *macsec = macsec_priv(dev);
510 + bool sci_present;
511 u32 pn;
512
513 secy = &macsec->secy;
514 @@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
515
516 unprotected_len = skb->len;
517 eth = eth_hdr(skb);
518 - hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
519 + sci_present = send_sci(secy);
520 + hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
521 memmove(hh, eth, 2 * ETH_ALEN);
522
523 pn = tx_sa_update_pn(tx_sa, secy);
524 @@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
525 kfree_skb(skb);
526 return ERR_PTR(-ENOLINK);
527 }
528 - macsec_fill_sectag(hh, secy, pn);
529 + macsec_fill_sectag(hh, secy, pn, sci_present);
530 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
531
532 skb_put(skb, secy->icv_len);
533 @@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
534 skb_to_sgvec(skb, sg, 0, skb->len);
535
536 if (tx_sc->encrypt) {
537 - int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
538 + int len = skb->len - macsec_hdr_len(sci_present) -
539 secy->icv_len;
540 aead_request_set_crypt(req, sg, sg, len, iv);
541 - aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
542 + aead_request_set_ad(req, macsec_hdr_len(sci_present));
543 } else {
544 aead_request_set_crypt(req, sg, sg, 0, iv);
545 aead_request_set_ad(req, skb->len - secy->icv_len);
546 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
547 index c6f66832a1a6..f424b867f73e 100644
548 --- a/drivers/net/phy/phy.c
549 +++ b/drivers/net/phy/phy.c
550 @@ -608,6 +608,21 @@ void phy_start_machine(struct phy_device *phydev)
551 }
552
553 /**
554 + * phy_trigger_machine - trigger the state machine to run
555 + *
556 + * @phydev: the phy_device struct
557 + *
558 + * Description: There has been a change in state which requires that the
559 + * state machine runs.
560 + */
561 +
562 +static void phy_trigger_machine(struct phy_device *phydev)
563 +{
564 + cancel_delayed_work_sync(&phydev->state_queue);
565 + queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
566 +}
567 +
568 +/**
569 * phy_stop_machine - stop the PHY state machine tracking
570 * @phydev: target phy_device struct
571 *
572 @@ -639,6 +654,8 @@ static void phy_error(struct phy_device *phydev)
573 mutex_lock(&phydev->lock);
574 phydev->state = PHY_HALTED;
575 mutex_unlock(&phydev->lock);
576 +
577 + phy_trigger_machine(phydev);
578 }
579
580 /**
581 @@ -800,8 +817,7 @@ void phy_change(struct work_struct *work)
582 }
583
584 /* reschedule state queue work to run as soon as possible */
585 - cancel_delayed_work_sync(&phydev->state_queue);
586 - queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
587 + phy_trigger_machine(phydev);
588 return;
589
590 ignore:
591 @@ -890,6 +906,8 @@ void phy_start(struct phy_device *phydev)
592 /* if phy was suspended, bring the physical link up again */
593 if (do_resume)
594 phy_resume(phydev);
595 +
596 + phy_trigger_machine(phydev);
597 }
598 EXPORT_SYMBOL(phy_start);
599
600 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
601 index 6e65832051d6..5ae664c02528 100644
602 --- a/drivers/net/vxlan.c
603 +++ b/drivers/net/vxlan.c
604 @@ -584,7 +584,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
605 }
606 }
607
608 - pp = eth_gro_receive(head, skb);
609 + pp = call_gro_receive(eth_gro_receive, head, skb);
610 flush = 0;
611
612 out:
613 diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
614 index d637c933c8a9..58a97d420572 100644
615 --- a/drivers/ptp/ptp_chardev.c
616 +++ b/drivers/ptp/ptp_chardev.c
617 @@ -193,6 +193,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
618 if (err)
619 break;
620
621 + memset(&precise_offset, 0, sizeof(precise_offset));
622 ts = ktime_to_timespec64(xtstamp.device);
623 precise_offset.device.sec = ts.tv_sec;
624 precise_offset.device.nsec = ts.tv_nsec;
625 diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
626 index ca86c885dfaa..3aaea713bf37 100644
627 --- a/drivers/scsi/megaraid/megaraid_sas.h
628 +++ b/drivers/scsi/megaraid/megaraid_sas.h
629 @@ -2233,7 +2233,7 @@ struct megasas_instance_template {
630 };
631
632 #define MEGASAS_IS_LOGICAL(scp) \
633 - (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
634 + ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
635
636 #define MEGASAS_DEV_INDEX(scp) \
637 (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
638 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
639 index c1ed25adb17e..71e489937c6f 100644
640 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
641 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
642 @@ -1713,16 +1713,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
643 goto out_done;
644 }
645
646 - switch (scmd->cmnd[0]) {
647 - case SYNCHRONIZE_CACHE:
648 - /*
649 - * FW takes care of flush cache on its own
650 - * No need to send it down
651 - */
652 + /*
653 + * FW takes care of flush cache on its own for Virtual Disk.
654 + * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
655 + */
656 + if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
657 scmd->result = DID_OK << 16;
658 goto out_done;
659 - default:
660 - break;
661 }
662
663 return instance->instancet->build_and_issue_cmd(instance, scmd);
664 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
665 index 6443cfba7b55..dc3b5962d087 100644
666 --- a/drivers/usb/dwc3/gadget.c
667 +++ b/drivers/usb/dwc3/gadget.c
668 @@ -789,6 +789,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
669 req->trb = trb;
670 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
671 req->first_trb_index = dep->trb_enqueue;
672 + dep->queued_requests++;
673 }
674
675 dwc3_ep_inc_enq(dep);
676 @@ -841,8 +842,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
677
678 trb->ctrl |= DWC3_TRB_CTRL_HWO;
679
680 - dep->queued_requests++;
681 -
682 trace_dwc3_prepare_trb(dep, trb);
683 }
684
685 @@ -1963,7 +1962,9 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
686 unsigned int s_pkt = 0;
687 unsigned int trb_status;
688
689 - dep->queued_requests--;
690 + if (req->trb == trb)
691 + dep->queued_requests--;
692 +
693 trace_dwc3_complete_trb(dep, trb);
694
695 /*
696 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
697 index e8d79d4ebcfe..e942c67ea230 100644
698 --- a/include/linux/netdevice.h
699 +++ b/include/linux/netdevice.h
700 @@ -2154,7 +2154,10 @@ struct napi_gro_cb {
701 /* Used to determine if flush_id can be ignored */
702 u8 is_atomic:1;
703
704 - /* 5 bit hole */
705 + /* Number of gro_receive callbacks this packet already went through */
706 + u8 recursion_counter:4;
707 +
708 + /* 1 bit hole */
709
710 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
711 __wsum csum;
712 @@ -2165,6 +2168,40 @@ struct napi_gro_cb {
713
714 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
715
716 +#define GRO_RECURSION_LIMIT 15
717 +static inline int gro_recursion_inc_test(struct sk_buff *skb)
718 +{
719 + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
720 +}
721 +
722 +typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
723 +static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
724 + struct sk_buff **head,
725 + struct sk_buff *skb)
726 +{
727 + if (unlikely(gro_recursion_inc_test(skb))) {
728 + NAPI_GRO_CB(skb)->flush |= 1;
729 + return NULL;
730 + }
731 +
732 + return cb(head, skb);
733 +}
734 +
735 +typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
736 + struct sk_buff *);
737 +static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
738 + struct sock *sk,
739 + struct sk_buff **head,
740 + struct sk_buff *skb)
741 +{
742 + if (unlikely(gro_recursion_inc_test(skb))) {
743 + NAPI_GRO_CB(skb)->flush |= 1;
744 + return NULL;
745 + }
746 +
747 + return cb(sk, head, skb);
748 +}
749 +
750 struct packet_type {
751 __be16 type; /* This is really htons(ether_type). */
752 struct net_device *dev; /* NULL is wildcarded here */
753 @@ -3862,7 +3899,7 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
754 ldev = netdev_all_lower_get_next(dev, &(iter)))
755
756 #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \
757 - for (iter = (dev)->all_adj_list.lower.next, \
758 + for (iter = &(dev)->all_adj_list.lower, \
759 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \
760 ldev; \
761 ldev = netdev_all_lower_get_next_rcu(dev, &(iter)))
762 diff --git a/include/net/ip.h b/include/net/ip.h
763 index 9742b92dc933..156b0c11b524 100644
764 --- a/include/net/ip.h
765 +++ b/include/net/ip.h
766 @@ -549,7 +549,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
767 */
768
769 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
770 -void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
771 +void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
772 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
773 struct ipcm_cookie *ipc, bool allow_ipv6);
774 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
775 @@ -571,7 +571,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
776
777 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
778 {
779 - ip_cmsg_recv_offset(msg, skb, 0);
780 + ip_cmsg_recv_offset(msg, skb, 0, 0);
781 }
782
783 bool icmp_global_allow(void);
784 diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
785 index d97305d0e71f..0a2d2701285d 100644
786 --- a/include/net/ip6_route.h
787 +++ b/include/net/ip6_route.h
788 @@ -32,6 +32,7 @@ struct route_info {
789 #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
790 #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
791 #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
792 +#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
793
794 /* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
795 * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
796 diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
797 index 262f0379d83a..5a78be518101 100644
798 --- a/include/uapi/linux/rtnetlink.h
799 +++ b/include/uapi/linux/rtnetlink.h
800 @@ -350,7 +350,7 @@ struct rtnexthop {
801 #define RTNH_F_OFFLOAD 8 /* offloaded route */
802 #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
803
804 -#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
805 +#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
806
807 /* Macros to handle hexthops */
808
809 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
810 index 8de138d3306b..f2531ad66b68 100644
811 --- a/net/8021q/vlan.c
812 +++ b/net/8021q/vlan.c
813 @@ -664,7 +664,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
814
815 skb_gro_pull(skb, sizeof(*vhdr));
816 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
817 - pp = ptype->callbacks.gro_receive(head, skb);
818 + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
819
820 out_unlock:
821 rcu_read_unlock();
822 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
823 index c5fea9393946..2136e45f5277 100644
824 --- a/net/bridge/br_multicast.c
825 +++ b/net/bridge/br_multicast.c
826 @@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
827 mod_timer(&query->timer, jiffies);
828 }
829
830 -void br_multicast_enable_port(struct net_bridge_port *port)
831 +static void __br_multicast_enable_port(struct net_bridge_port *port)
832 {
833 struct net_bridge *br = port->br;
834
835 - spin_lock(&br->multicast_lock);
836 if (br->multicast_disabled || !netif_running(br->dev))
837 - goto out;
838 + return;
839
840 br_multicast_enable(&port->ip4_own_query);
841 #if IS_ENABLED(CONFIG_IPV6)
842 @@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
843 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
844 hlist_unhashed(&port->rlist))
845 br_multicast_add_router(br, port);
846 +}
847
848 -out:
849 +void br_multicast_enable_port(struct net_bridge_port *port)
850 +{
851 + struct net_bridge *br = port->br;
852 +
853 + spin_lock(&br->multicast_lock);
854 + __br_multicast_enable_port(port);
855 spin_unlock(&br->multicast_lock);
856 }
857
858 @@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
859
860 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
861 {
862 - int err = 0;
863 struct net_bridge_mdb_htable *mdb;
864 + struct net_bridge_port *port;
865 + int err = 0;
866
867 spin_lock_bh(&br->multicast_lock);
868 if (br->multicast_disabled == !val)
869 @@ -2023,10 +2029,9 @@ rollback:
870 goto rollback;
871 }
872
873 - br_multicast_start_querier(br, &br->ip4_own_query);
874 -#if IS_ENABLED(CONFIG_IPV6)
875 - br_multicast_start_querier(br, &br->ip6_own_query);
876 -#endif
877 + br_multicast_open(br);
878 + list_for_each_entry(port, &br->port_list, list)
879 + __br_multicast_enable_port(port);
880
881 unlock:
882 spin_unlock_bh(&br->multicast_lock);
883 diff --git a/net/core/dev.c b/net/core/dev.c
884 index ea6312057a71..44b3ba462ba1 100644
885 --- a/net/core/dev.c
886 +++ b/net/core/dev.c
887 @@ -3035,6 +3035,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
888 }
889 return head;
890 }
891 +EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
892
893 static void qdisc_pkt_len_init(struct sk_buff *skb)
894 {
895 @@ -4496,6 +4497,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
896 NAPI_GRO_CB(skb)->flush = 0;
897 NAPI_GRO_CB(skb)->free = 0;
898 NAPI_GRO_CB(skb)->encap_mark = 0;
899 + NAPI_GRO_CB(skb)->recursion_counter = 0;
900 NAPI_GRO_CB(skb)->is_fou = 0;
901 NAPI_GRO_CB(skb)->is_atomic = 1;
902 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
903 @@ -5500,10 +5502,14 @@ struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
904 {
905 struct netdev_adjacent *lower;
906
907 - lower = list_first_or_null_rcu(&dev->all_adj_list.lower,
908 - struct netdev_adjacent, list);
909 + lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
910 +
911 + if (&lower->list == &dev->all_adj_list.lower)
912 + return NULL;
913 +
914 + *iter = &lower->list;
915
916 - return lower ? lower->dev : NULL;
917 + return lower->dev;
918 }
919 EXPORT_SYMBOL(netdev_all_lower_get_next_rcu);
920
921 @@ -5578,6 +5584,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
922
923 static int __netdev_adjacent_dev_insert(struct net_device *dev,
924 struct net_device *adj_dev,
925 + u16 ref_nr,
926 struct list_head *dev_list,
927 void *private, bool master)
928 {
929 @@ -5587,7 +5594,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
930 adj = __netdev_find_adj(adj_dev, dev_list);
931
932 if (adj) {
933 - adj->ref_nr++;
934 + adj->ref_nr += ref_nr;
935 return 0;
936 }
937
938 @@ -5597,7 +5604,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
939
940 adj->dev = adj_dev;
941 adj->master = master;
942 - adj->ref_nr = 1;
943 + adj->ref_nr = ref_nr;
944 adj->private = private;
945 dev_hold(adj_dev);
946
947 @@ -5636,6 +5643,7 @@ free_adj:
948
949 static void __netdev_adjacent_dev_remove(struct net_device *dev,
950 struct net_device *adj_dev,
951 + u16 ref_nr,
952 struct list_head *dev_list)
953 {
954 struct netdev_adjacent *adj;
955 @@ -5648,10 +5656,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
956 BUG();
957 }
958
959 - if (adj->ref_nr > 1) {
960 - pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
961 - adj->ref_nr-1);
962 - adj->ref_nr--;
963 + if (adj->ref_nr > ref_nr) {
964 + pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name,
965 + ref_nr, adj->ref_nr-ref_nr);
966 + adj->ref_nr -= ref_nr;
967 return;
968 }
969
970 @@ -5670,21 +5678,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
971
972 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
973 struct net_device *upper_dev,
974 + u16 ref_nr,
975 struct list_head *up_list,
976 struct list_head *down_list,
977 void *private, bool master)
978 {
979 int ret;
980
981 - ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
982 - master);
983 + ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list,
984 + private, master);
985 if (ret)
986 return ret;
987
988 - ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
989 - false);
990 + ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list,
991 + private, false);
992 if (ret) {
993 - __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
994 + __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
995 return ret;
996 }
997
998 @@ -5692,9 +5701,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
999 }
1000
1001 static int __netdev_adjacent_dev_link(struct net_device *dev,
1002 - struct net_device *upper_dev)
1003 + struct net_device *upper_dev,
1004 + u16 ref_nr)
1005 {
1006 - return __netdev_adjacent_dev_link_lists(dev, upper_dev,
1007 + return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr,
1008 &dev->all_adj_list.upper,
1009 &upper_dev->all_adj_list.lower,
1010 NULL, false);
1011 @@ -5702,17 +5712,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev,
1012
1013 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
1014 struct net_device *upper_dev,
1015 + u16 ref_nr,
1016 struct list_head *up_list,
1017 struct list_head *down_list)
1018 {
1019 - __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
1020 - __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
1021 + __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
1022 + __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
1023 }
1024
1025 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
1026 - struct net_device *upper_dev)
1027 + struct net_device *upper_dev,
1028 + u16 ref_nr)
1029 {
1030 - __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
1031 + __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr,
1032 &dev->all_adj_list.upper,
1033 &upper_dev->all_adj_list.lower);
1034 }
1035 @@ -5721,17 +5733,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
1036 struct net_device *upper_dev,
1037 void *private, bool master)
1038 {
1039 - int ret = __netdev_adjacent_dev_link(dev, upper_dev);
1040 + int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1);
1041
1042 if (ret)
1043 return ret;
1044
1045 - ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
1046 + ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1,
1047 &dev->adj_list.upper,
1048 &upper_dev->adj_list.lower,
1049 private, master);
1050 if (ret) {
1051 - __netdev_adjacent_dev_unlink(dev, upper_dev);
1052 + __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
1053 return ret;
1054 }
1055
1056 @@ -5741,8 +5753,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
1057 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
1058 struct net_device *upper_dev)
1059 {
1060 - __netdev_adjacent_dev_unlink(dev, upper_dev);
1061 - __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
1062 + __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
1063 + __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
1064 &dev->adj_list.upper,
1065 &upper_dev->adj_list.lower);
1066 }
1067 @@ -5795,7 +5807,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
1068 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
1069 pr_debug("Interlinking %s with %s, non-neighbour\n",
1070 i->dev->name, j->dev->name);
1071 - ret = __netdev_adjacent_dev_link(i->dev, j->dev);
1072 + ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr);
1073 if (ret)
1074 goto rollback_mesh;
1075 }
1076 @@ -5805,7 +5817,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
1077 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
1078 pr_debug("linking %s's upper device %s with %s\n",
1079 upper_dev->name, i->dev->name, dev->name);
1080 - ret = __netdev_adjacent_dev_link(dev, i->dev);
1081 + ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
1082 if (ret)
1083 goto rollback_upper_mesh;
1084 }
1085 @@ -5814,7 +5826,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
1086 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
1087 pr_debug("linking %s's lower device %s with %s\n", dev->name,
1088 i->dev->name, upper_dev->name);
1089 - ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
1090 + ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
1091 if (ret)
1092 goto rollback_lower_mesh;
1093 }
1094 @@ -5832,7 +5844,7 @@ rollback_lower_mesh:
1095 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
1096 if (i == to_i)
1097 break;
1098 - __netdev_adjacent_dev_unlink(i->dev, upper_dev);
1099 + __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
1100 }
1101
1102 i = NULL;
1103 @@ -5842,7 +5854,7 @@ rollback_upper_mesh:
1104 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
1105 if (i == to_i)
1106 break;
1107 - __netdev_adjacent_dev_unlink(dev, i->dev);
1108 + __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
1109 }
1110
1111 i = j = NULL;
1112 @@ -5854,7 +5866,7 @@ rollback_mesh:
1113 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
1114 if (i == to_i && j == to_j)
1115 break;
1116 - __netdev_adjacent_dev_unlink(i->dev, j->dev);
1117 + __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
1118 }
1119 if (i == to_i)
1120 break;
1121 @@ -5934,16 +5946,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
1122 */
1123 list_for_each_entry(i, &dev->all_adj_list.lower, list)
1124 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
1125 - __netdev_adjacent_dev_unlink(i->dev, j->dev);
1126 + __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
1127
1128 /* remove also the devices itself from lower/upper device
1129 * list
1130 */
1131 list_for_each_entry(i, &dev->all_adj_list.lower, list)
1132 - __netdev_adjacent_dev_unlink(i->dev, upper_dev);
1133 + __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
1134
1135 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
1136 - __netdev_adjacent_dev_unlink(dev, i->dev);
1137 + __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
1138
1139 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
1140 &changeupper_info.info);
1141 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1142 index bbd118b19aef..306b8f0e03c1 100644
1143 --- a/net/core/pktgen.c
1144 +++ b/net/core/pktgen.c
1145 @@ -216,8 +216,8 @@
1146 #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
1147
1148 /* If lock -- protects updating of if_list */
1149 -#define if_lock(t) spin_lock(&(t->if_lock));
1150 -#define if_unlock(t) spin_unlock(&(t->if_lock));
1151 +#define if_lock(t) mutex_lock(&(t->if_lock));
1152 +#define if_unlock(t) mutex_unlock(&(t->if_lock));
1153
1154 /* Used to help with determining the pkts on receive */
1155 #define PKTGEN_MAGIC 0xbe9be955
1156 @@ -423,7 +423,7 @@ struct pktgen_net {
1157 };
1158
1159 struct pktgen_thread {
1160 - spinlock_t if_lock; /* for list of devices */
1161 + struct mutex if_lock; /* for list of devices */
1162 struct list_head if_list; /* All device here */
1163 struct list_head th_list;
1164 struct task_struct *tsk;
1165 @@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
1166 {
1167 struct pktgen_thread *t;
1168
1169 + mutex_lock(&pktgen_thread_lock);
1170 +
1171 list_for_each_entry(t, &pn->pktgen_threads, th_list) {
1172 struct pktgen_dev *pkt_dev;
1173
1174 - rcu_read_lock();
1175 - list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
1176 + if_lock(t);
1177 + list_for_each_entry(pkt_dev, &t->if_list, list) {
1178 if (pkt_dev->odev != dev)
1179 continue;
1180
1181 @@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
1182 dev->name);
1183 break;
1184 }
1185 - rcu_read_unlock();
1186 + if_unlock(t);
1187 }
1188 + mutex_unlock(&pktgen_thread_lock);
1189 }
1190
1191 static int pktgen_device_event(struct notifier_block *unused,
1192 @@ -2286,7 +2289,7 @@ out:
1193
1194 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
1195 {
1196 - pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
1197 + pkt_dev->pkt_overhead = 0;
1198 pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
1199 pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
1200 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
1201 @@ -2777,13 +2780,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
1202 }
1203
1204 static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
1205 - struct pktgen_dev *pkt_dev,
1206 - unsigned int extralen)
1207 + struct pktgen_dev *pkt_dev)
1208 {
1209 + unsigned int extralen = LL_RESERVED_SPACE(dev);
1210 struct sk_buff *skb = NULL;
1211 - unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
1212 - pkt_dev->pkt_overhead;
1213 + unsigned int size;
1214
1215 + size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
1216 if (pkt_dev->flags & F_NODE) {
1217 int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
1218
1219 @@ -2796,8 +2799,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
1220 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
1221 }
1222
1223 + /* the caller pre-fetches from skb->data and reserves for the mac hdr */
1224 if (likely(skb))
1225 - skb_reserve(skb, LL_RESERVED_SPACE(dev));
1226 + skb_reserve(skb, extralen - 16);
1227
1228 return skb;
1229 }
1230 @@ -2830,16 +2834,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
1231 mod_cur_headers(pkt_dev);
1232 queue_map = pkt_dev->cur_queue_map;
1233
1234 - datalen = (odev->hard_header_len + 16) & ~0xf;
1235 -
1236 - skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
1237 + skb = pktgen_alloc_skb(odev, pkt_dev);
1238 if (!skb) {
1239 sprintf(pkt_dev->result, "No memory");
1240 return NULL;
1241 }
1242
1243 prefetchw(skb->data);
1244 - skb_reserve(skb, datalen);
1245 + skb_reserve(skb, 16);
1246
1247 /* Reserve for ethernet and IP header */
1248 eth = (__u8 *) skb_push(skb, 14);
1249 @@ -2959,7 +2961,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
1250 mod_cur_headers(pkt_dev);
1251 queue_map = pkt_dev->cur_queue_map;
1252
1253 - skb = pktgen_alloc_skb(odev, pkt_dev, 16);
1254 + skb = pktgen_alloc_skb(odev, pkt_dev);
1255 if (!skb) {
1256 sprintf(pkt_dev->result, "No memory");
1257 return NULL;
1258 @@ -3763,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
1259 return -ENOMEM;
1260 }
1261
1262 - spin_lock_init(&t->if_lock);
1263 + mutex_init(&t->if_lock);
1264 t->cpu = cpu;
1265
1266 INIT_LIST_HEAD(&t->if_list);
1267 diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
1268 index 66dff5e3d772..02acfff36028 100644
1269 --- a/net/ethernet/eth.c
1270 +++ b/net/ethernet/eth.c
1271 @@ -439,7 +439,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
1272
1273 skb_gro_pull(skb, sizeof(*eh));
1274 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
1275 - pp = ptype->callbacks.gro_receive(head, skb);
1276 + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
1277
1278 out_unlock:
1279 rcu_read_unlock();
1280 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
1281 index 55513e654d79..eebbc0f2baa8 100644
1282 --- a/net/ipv4/af_inet.c
1283 +++ b/net/ipv4/af_inet.c
1284 @@ -1388,7 +1388,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
1285 skb_gro_pull(skb, sizeof(*iph));
1286 skb_set_transport_header(skb, skb_gro_offset(skb));
1287
1288 - pp = ops->callbacks.gro_receive(head, skb);
1289 + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1290
1291 out_unlock:
1292 rcu_read_unlock();
1293 diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
1294 index 321d57f825ce..5351b61ab8d3 100644
1295 --- a/net/ipv4/fou.c
1296 +++ b/net/ipv4/fou.c
1297 @@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
1298 if (!ops || !ops->callbacks.gro_receive)
1299 goto out_unlock;
1300
1301 - pp = ops->callbacks.gro_receive(head, skb);
1302 + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1303
1304 out_unlock:
1305 rcu_read_unlock();
1306 @@ -441,7 +441,7 @@ next_proto:
1307 if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
1308 goto out_unlock;
1309
1310 - pp = ops->callbacks.gro_receive(head, skb);
1311 + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1312 flush = 0;
1313
1314 out_unlock:
1315 diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
1316 index ecd1e09dbbf1..6871f59cd0c0 100644
1317 --- a/net/ipv4/gre_offload.c
1318 +++ b/net/ipv4/gre_offload.c
1319 @@ -227,7 +227,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
1320 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
1321 skb_gro_postpull_rcsum(skb, greh, grehlen);
1322
1323 - pp = ptype->callbacks.gro_receive(head, skb);
1324 + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
1325 flush = 0;
1326
1327 out_unlock:
1328 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1329 index 71a52f4d4cff..11ef96e2147a 100644
1330 --- a/net/ipv4/ip_sockglue.c
1331 +++ b/net/ipv4/ip_sockglue.c
1332 @@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
1333 }
1334
1335 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
1336 - int offset)
1337 + int tlen, int offset)
1338 {
1339 __wsum csum = skb->csum;
1340
1341 @@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
1342 return;
1343
1344 if (offset != 0)
1345 - csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
1346 - offset, 0));
1347 + csum = csum_sub(csum,
1348 + csum_partial(skb_transport_header(skb) + tlen,
1349 + offset, 0));
1350
1351 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
1352 }
1353 @@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
1354 }
1355
1356 void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
1357 - int offset)
1358 + int tlen, int offset)
1359 {
1360 struct inet_sock *inet = inet_sk(skb->sk);
1361 unsigned int flags = inet->cmsg_flags;
1362 @@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
1363 }
1364
1365 if (flags & IP_CMSG_CHECKSUM)
1366 - ip_cmsg_recv_checksum(msg, skb, offset);
1367 + ip_cmsg_recv_checksum(msg, skb, tlen, offset);
1368 }
1369 EXPORT_SYMBOL(ip_cmsg_recv_offset);
1370
1371 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
1372 index 1cb67de106fe..80bc36b25de2 100644
1373 --- a/net/ipv4/sysctl_net_ipv4.c
1374 +++ b/net/ipv4/sysctl_net_ipv4.c
1375 @@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
1376 container_of(table->data, struct net, ipv4.ping_group_range.range);
1377 unsigned int seq;
1378 do {
1379 - seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
1380 + seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
1381
1382 *low = data[0];
1383 *high = data[1];
1384 - } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
1385 + } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
1386 }
1387
1388 /* Update system visible IP port range */
1389 @@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
1390 kgid_t *data = table->data;
1391 struct net *net =
1392 container_of(table->data, struct net, ipv4.ping_group_range.range);
1393 - write_seqlock(&net->ipv4.ip_local_ports.lock);
1394 + write_seqlock(&net->ipv4.ping_group_range.lock);
1395 data[0] = low;
1396 data[1] = high;
1397 - write_sequnlock(&net->ipv4.ip_local_ports.lock);
1398 + write_sequnlock(&net->ipv4.ping_group_range.lock);
1399 }
1400
1401 /* Validate changes from /proc interface. */
1402 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1403 index 5fdcb8d108d4..c0d71e7d663e 100644
1404 --- a/net/ipv4/udp.c
1405 +++ b/net/ipv4/udp.c
1406 @@ -1327,7 +1327,7 @@ try_again:
1407 *addr_len = sizeof(*sin);
1408 }
1409 if (inet->cmsg_flags)
1410 - ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
1411 + ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
1412
1413 err = copied;
1414 if (flags & MSG_TRUNC)
1415 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
1416 index 81f253b6ff36..6de9f977356e 100644
1417 --- a/net/ipv4/udp_offload.c
1418 +++ b/net/ipv4/udp_offload.c
1419 @@ -293,7 +293,7 @@ unflush:
1420
1421 skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
1422 skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
1423 - pp = udp_sk(sk)->gro_receive(sk, head, skb);
1424 + pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
1425
1426 out_unlock:
1427 rcu_read_unlock();
1428 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1429 index 2f1f5d439788..f5432d65e6bf 100644
1430 --- a/net/ipv6/addrconf.c
1431 +++ b/net/ipv6/addrconf.c
1432 @@ -2995,7 +2995,7 @@ static void init_loopback(struct net_device *dev)
1433 * lo device down, release this obsolete dst and
1434 * reallocate a new router for ifa.
1435 */
1436 - if (sp_ifa->rt->dst.obsolete > 0) {
1437 + if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
1438 ip6_rt_put(sp_ifa->rt);
1439 sp_ifa->rt = NULL;
1440 } else {
1441 diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
1442 index 22e90e56b5a9..a09418bda1f8 100644
1443 --- a/net/ipv6/ip6_offload.c
1444 +++ b/net/ipv6/ip6_offload.c
1445 @@ -243,7 +243,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
1446
1447 skb_gro_postpull_rcsum(skb, iph, nlen);
1448
1449 - pp = ops->callbacks.gro_receive(head, skb);
1450 + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1451
1452 out_unlock:
1453 rcu_read_unlock();
1454 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1455 index 888543debe4e..41489f39c456 100644
1456 --- a/net/ipv6/ip6_tunnel.c
1457 +++ b/net/ipv6/ip6_tunnel.c
1458 @@ -155,6 +155,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
1459 hash = HASH(&any, local);
1460 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
1461 if (ipv6_addr_equal(local, &t->parms.laddr) &&
1462 + ipv6_addr_any(&t->parms.raddr) &&
1463 (t->dev->flags & IFF_UP))
1464 return t;
1465 }
1466 @@ -162,6 +163,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
1467 hash = HASH(remote, &any);
1468 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
1469 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
1470 + ipv6_addr_any(&t->parms.laddr) &&
1471 (t->dev->flags & IFF_UP))
1472 return t;
1473 }
1474 @@ -1132,6 +1134,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1475 if (err)
1476 return err;
1477
1478 + skb->protocol = htons(ETH_P_IPV6);
1479 skb_push(skb, sizeof(struct ipv6hdr));
1480 skb_reset_network_header(skb);
1481 ipv6h = ipv6_hdr(skb);
1482 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1483 index 269218aacbea..23153ac6c9b9 100644
1484 --- a/net/ipv6/route.c
1485 +++ b/net/ipv6/route.c
1486 @@ -656,7 +656,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
1487 struct net_device *dev = rt->dst.dev;
1488
1489 if (dev && !netif_carrier_ok(dev) &&
1490 - idev->cnf.ignore_routes_with_linkdown)
1491 + idev->cnf.ignore_routes_with_linkdown &&
1492 + !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
1493 goto out;
1494
1495 if (rt6_check_expired(rt))
1496 @@ -1050,6 +1051,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1497 int strict = 0;
1498
1499 strict |= flags & RT6_LOOKUP_F_IFACE;
1500 + strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1501 if (net->ipv6.devconf_all->forwarding == 0)
1502 strict |= RT6_LOOKUP_F_REACHABLE;
1503
1504 @@ -1783,7 +1785,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1505 };
1506 struct fib6_table *table;
1507 struct rt6_info *rt;
1508 - int flags = RT6_LOOKUP_F_IFACE;
1509 + int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
1510
1511 table = fib6_get_table(net, cfg->fc_table);
1512 if (!table)
1513 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1514 index 94f4f89d73e7..fc67822c42e0 100644
1515 --- a/net/ipv6/tcp_ipv6.c
1516 +++ b/net/ipv6/tcp_ipv6.c
1517 @@ -1193,6 +1193,16 @@ out:
1518 return NULL;
1519 }
1520
1521 +static void tcp_v6_restore_cb(struct sk_buff *skb)
1522 +{
1523 + /* We need to move header back to the beginning if xfrm6_policy_check()
1524 + * and tcp_v6_fill_cb() are going to be called again.
1525 + * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1526 + */
1527 + memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1528 + sizeof(struct inet6_skb_parm));
1529 +}
1530 +
1531 /* The socket must have it's spinlock held when we get
1532 * here, unless it is a TCP_LISTEN socket.
1533 *
1534 @@ -1322,6 +1332,7 @@ ipv6_pktoptions:
1535 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1536 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1537 skb_set_owner_r(opt_skb, sk);
1538 + tcp_v6_restore_cb(opt_skb);
1539 opt_skb = xchg(&np->pktoptions, opt_skb);
1540 } else {
1541 __kfree_skb(opt_skb);
1542 @@ -1355,15 +1366,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1543 TCP_SKB_CB(skb)->sacked = 0;
1544 }
1545
1546 -static void tcp_v6_restore_cb(struct sk_buff *skb)
1547 -{
1548 - /* We need to move header back to the beginning if xfrm6_policy_check()
1549 - * and tcp_v6_fill_cb() are going to be called again.
1550 - */
1551 - memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1552 - sizeof(struct inet6_skb_parm));
1553 -}
1554 -
1555 static int tcp_v6_rcv(struct sk_buff *skb)
1556 {
1557 const struct tcphdr *th;
1558 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1559 index 19ac3a1c308d..c2a8656c22eb 100644
1560 --- a/net/ipv6/udp.c
1561 +++ b/net/ipv6/udp.c
1562 @@ -427,7 +427,8 @@ try_again:
1563
1564 if (is_udp4) {
1565 if (inet->cmsg_flags)
1566 - ip_cmsg_recv(msg, skb);
1567 + ip_cmsg_recv_offset(msg, skb,
1568 + sizeof(struct udphdr), off);
1569 } else {
1570 if (np->rxopt.all)
1571 ip6_datagram_recv_specific_ctl(sk, msg, skb);
1572 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1573 index 627f898c05b9..62bea4591054 100644
1574 --- a/net/netlink/af_netlink.c
1575 +++ b/net/netlink/af_netlink.c
1576 @@ -1832,7 +1832,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1577 /* Record the max length of recvmsg() calls for future allocations */
1578 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
1579 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
1580 - 16384);
1581 + SKB_WITH_OVERHEAD(32768));
1582
1583 copied = data_skb->len;
1584 if (len < copied) {
1585 @@ -2083,8 +2083,9 @@ static int netlink_dump(struct sock *sk)
1586
1587 if (alloc_min_size < nlk->max_recvmsg_len) {
1588 alloc_size = nlk->max_recvmsg_len;
1589 - skb = alloc_skb(alloc_size, GFP_KERNEL |
1590 - __GFP_NOWARN | __GFP_NORETRY);
1591 + skb = alloc_skb(alloc_size,
1592 + (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
1593 + __GFP_NOWARN | __GFP_NORETRY);
1594 }
1595 if (!skb) {
1596 alloc_size = alloc_min_size;
1597 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1598 index 33a4697d5539..d2238b204691 100644
1599 --- a/net/packet/af_packet.c
1600 +++ b/net/packet/af_packet.c
1601 @@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
1602 static int packet_direct_xmit(struct sk_buff *skb)
1603 {
1604 struct net_device *dev = skb->dev;
1605 - netdev_features_t features;
1606 + struct sk_buff *orig_skb = skb;
1607 struct netdev_queue *txq;
1608 int ret = NETDEV_TX_BUSY;
1609
1610 @@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
1611 !netif_carrier_ok(dev)))
1612 goto drop;
1613
1614 - features = netif_skb_features(skb);
1615 - if (skb_needs_linearize(skb, features) &&
1616 - __skb_linearize(skb))
1617 + skb = validate_xmit_skb_list(skb, dev);
1618 + if (skb != orig_skb)
1619 goto drop;
1620
1621 txq = skb_get_tx_queue(dev, skb);
1622 @@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
1623 return ret;
1624 drop:
1625 atomic_long_inc(&dev->tx_dropped);
1626 - kfree_skb(skb);
1627 + kfree_skb_list(skb);
1628 return NET_XMIT_DROP;
1629 }
1630
1631 @@ -3952,6 +3951,7 @@ static int packet_notifier(struct notifier_block *this,
1632 }
1633 if (msg == NETDEV_UNREGISTER) {
1634 packet_cached_dev_reset(po);
1635 + fanout_release(sk);
1636 po->ifindex = -1;
1637 if (po->prot_hook.dev)
1638 dev_put(po->prot_hook.dev);
1639 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
1640 index d09d0687594b..027ddf412c40 100644
1641 --- a/net/sched/act_api.c
1642 +++ b/net/sched/act_api.c
1643 @@ -341,22 +341,25 @@ int tcf_register_action(struct tc_action_ops *act,
1644 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
1645 return -EINVAL;
1646
1647 + /* We have to register pernet ops before making the action ops visible,
1648 + * otherwise tcf_action_init_1() could get a partially initialized
1649 + * netns.
1650 + */
1651 + ret = register_pernet_subsys(ops);
1652 + if (ret)
1653 + return ret;
1654 +
1655 write_lock(&act_mod_lock);
1656 list_for_each_entry(a, &act_base, head) {
1657 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
1658 write_unlock(&act_mod_lock);
1659 + unregister_pernet_subsys(ops);
1660 return -EEXIST;
1661 }
1662 }
1663 list_add_tail(&act->head, &act_base);
1664 write_unlock(&act_mod_lock);
1665
1666 - ret = register_pernet_subsys(ops);
1667 - if (ret) {
1668 - tcf_unregister_action(act, ops);
1669 - return ret;
1670 - }
1671 -
1672 return 0;
1673 }
1674 EXPORT_SYMBOL(tcf_register_action);
1675 @@ -367,8 +370,6 @@ int tcf_unregister_action(struct tc_action_ops *act,
1676 struct tc_action_ops *a;
1677 int err = -ENOENT;
1678
1679 - unregister_pernet_subsys(ops);
1680 -
1681 write_lock(&act_mod_lock);
1682 list_for_each_entry(a, &act_base, head) {
1683 if (a == act) {
1684 @@ -378,6 +379,8 @@ int tcf_unregister_action(struct tc_action_ops *act,
1685 }
1686 }
1687 write_unlock(&act_mod_lock);
1688 + if (!err)
1689 + unregister_pernet_subsys(ops);
1690 return err;
1691 }
1692 EXPORT_SYMBOL(tcf_unregister_action);
1693 diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
1694 index 691409de3e1a..4ffc6c13a566 100644
1695 --- a/net/sched/act_vlan.c
1696 +++ b/net/sched/act_vlan.c
1697 @@ -36,6 +36,12 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
1698 bstats_update(&v->tcf_bstats, skb);
1699 action = v->tcf_action;
1700
1701 + /* Ensure 'data' points at mac_header prior calling vlan manipulating
1702 + * functions.
1703 + */
1704 + if (skb_at_tc_ingress(skb))
1705 + skb_push_rcsum(skb, skb->mac_len);
1706 +
1707 switch (v->tcfv_action) {
1708 case TCA_VLAN_ACT_POP:
1709 err = skb_vlan_pop(skb);
1710 @@ -57,6 +63,9 @@ drop:
1711 action = TC_ACT_SHOT;
1712 v->tcf_qstats.drops++;
1713 unlock:
1714 + if (skb_at_tc_ingress(skb))
1715 + skb_pull_rcsum(skb, skb->mac_len);
1716 +
1717 spin_unlock(&v->tcf_lock);
1718 return action;
1719 }
1720 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
1721 index a7c5645373af..74bed5e9bb89 100644
1722 --- a/net/sched/cls_api.c
1723 +++ b/net/sched/cls_api.c
1724 @@ -344,7 +344,8 @@ replay:
1725 if (err == 0) {
1726 struct tcf_proto *next = rtnl_dereference(tp->next);
1727
1728 - tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
1729 + tfilter_notify(net, skb, n, tp,
1730 + t->tcm_handle, RTM_DELTFILTER);
1731 if (tcf_destroy(tp, false))
1732 RCU_INIT_POINTER(*back, next);
1733 }
1734 diff --git a/net/sctp/output.c b/net/sctp/output.c
1735 index 31b7bc35895d..81929907a365 100644
1736 --- a/net/sctp/output.c
1737 +++ b/net/sctp/output.c
1738 @@ -417,6 +417,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
1739 __u8 has_data = 0;
1740 int gso = 0;
1741 int pktcount = 0;
1742 + int auth_len = 0;
1743 struct dst_entry *dst;
1744 unsigned char *auth = NULL; /* pointer to auth in skb data */
1745
1746 @@ -505,7 +506,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
1747 list_for_each_entry(chunk, &packet->chunk_list, list) {
1748 int padded = WORD_ROUND(chunk->skb->len);
1749
1750 - if (pkt_size + padded > tp->pathmtu)
1751 + if (chunk == packet->auth)
1752 + auth_len = padded;
1753 + else if (auth_len + padded + packet->overhead >
1754 + tp->pathmtu)
1755 + goto nomem;
1756 + else if (pkt_size + padded > tp->pathmtu)
1757 break;
1758 pkt_size += padded;
1759 }
1760 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
1761 index d88bb2b0b699..920469e7b0ef 100644
1762 --- a/net/sctp/sm_statefuns.c
1763 +++ b/net/sctp/sm_statefuns.c
1764 @@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
1765 return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1766 commands);
1767
1768 + /* Report violation if chunk len overflows */
1769 + ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1770 + if (ch_end > skb_tail_pointer(skb))
1771 + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1772 + commands);
1773 +
1774 /* Now that we know we at least have a chunk header,
1775 * do things that are type appropriate.
1776 */
1777 @@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
1778 }
1779 }
1780
1781 - /* Report violation if chunk len overflows */
1782 - ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1783 - if (ch_end > skb_tail_pointer(skb))
1784 - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1785 - commands);
1786 -
1787 ch = (sctp_chunkhdr_t *) ch_end;
1788 } while (ch_end < skb_tail_pointer(skb));
1789
1790 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1791 index 8ed2d99bde6d..baccbf3c1c60 100644
1792 --- a/net/sctp/socket.c
1793 +++ b/net/sctp/socket.c
1794 @@ -4683,7 +4683,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
1795 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
1796 int __user *optlen)
1797 {
1798 - if (len <= 0)
1799 + if (len == 0)
1800 return -EINVAL;
1801 if (len > sizeof(struct sctp_event_subscribe))
1802 len = sizeof(struct sctp_event_subscribe);
1803 @@ -6426,6 +6426,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
1804 if (get_user(len, optlen))
1805 return -EFAULT;
1806
1807 + if (len < 0)
1808 + return -EINVAL;
1809 +
1810 lock_sock(sk);
1811
1812 switch (optname) {
1813 diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
1814 index a5fc9dd24aa9..a56c5e6f4498 100644
1815 --- a/net/switchdev/switchdev.c
1816 +++ b/net/switchdev/switchdev.c
1817 @@ -774,6 +774,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
1818 u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
1819 int err;
1820
1821 + if (!netif_is_bridge_port(dev))
1822 + return -EOPNOTSUPP;
1823 +
1824 err = switchdev_port_attr_get(dev, &attr);
1825 if (err && err != -EOPNOTSUPP)
1826 return err;
1827 @@ -929,6 +932,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
1828 struct nlattr *afspec;
1829 int err = 0;
1830
1831 + if (!netif_is_bridge_port(dev))
1832 + return -EOPNOTSUPP;
1833 +
1834 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
1835 IFLA_PROTINFO);
1836 if (protinfo) {
1837 @@ -962,6 +968,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
1838 {
1839 struct nlattr *afspec;
1840
1841 + if (!netif_is_bridge_port(dev))
1842 + return -EOPNOTSUPP;
1843 +
1844 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
1845 IFLA_AF_SPEC);
1846 if (afspec)