Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.4/0131-4.4.32-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2867 - (hide annotations) (download)
Mon Mar 27 13:49:11 2017 UTC (7 years, 1 month ago) by niro
File size: 47123 byte(s)
linux-4.4.32
1 niro 2867 diff --git a/Makefile b/Makefile
2     index 7c6f28e7a2f6..fba9b09a1330 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 4
8     -SUBLEVEL = 31
9     +SUBLEVEL = 32
10     EXTRAVERSION =
11     NAME = Blurry Fish Butt
12    
13     diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
14     index bbe56871245c..4298aeb1e20f 100644
15     --- a/arch/mips/kvm/emulate.c
16     +++ b/arch/mips/kvm/emulate.c
17     @@ -822,7 +822,7 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
18     bool user;
19    
20     /* No need to flush for entries which are already invalid */
21     - if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
22     + if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
23     return;
24     /* User address space doesn't need flushing for KSeg2/3 changes */
25     user = tlb->tlb_hi < KVM_GUEST_KSEG0;
26     diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
27     index 21aacc1f45c1..7f85c2c1d681 100644
28     --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
29     +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
30     @@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
31     unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
32     unsigned lane_num, i, max_pix_clock;
33    
34     - for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
35     - for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
36     - max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
37     + if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
38     + ENCODER_OBJECT_ID_NUTMEG) {
39     + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
40     + max_pix_clock = (lane_num * 270000 * 8) / bpp;
41     if (max_pix_clock >= pix_clock) {
42     *dp_lanes = lane_num;
43     - *dp_rate = link_rates[i];
44     + *dp_rate = 270000;
45     return 0;
46     }
47     }
48     + } else {
49     + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
50     + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
51     + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
52     + if (max_pix_clock >= pix_clock) {
53     + *dp_lanes = lane_num;
54     + *dp_rate = link_rates[i];
55     + return 0;
56     + }
57     + }
58     + }
59     }
60    
61     return -EINVAL;
62     diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
63     index 44ee72e04df9..b5760851195c 100644
64     --- a/drivers/gpu/drm/radeon/atombios_dp.c
65     +++ b/drivers/gpu/drm/radeon/atombios_dp.c
66     @@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
67     unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
68     unsigned lane_num, i, max_pix_clock;
69    
70     - for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
71     - for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
72     - max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
73     + if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
74     + ENCODER_OBJECT_ID_NUTMEG) {
75     + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
76     + max_pix_clock = (lane_num * 270000 * 8) / bpp;
77     if (max_pix_clock >= pix_clock) {
78     *dp_lanes = lane_num;
79     - *dp_rate = link_rates[i];
80     + *dp_rate = 270000;
81     return 0;
82     }
83     }
84     + } else {
85     + for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
86     + for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
87     + max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
88     + if (max_pix_clock >= pix_clock) {
89     + *dp_lanes = lane_num;
90     + *dp_rate = link_rates[i];
91     + return 0;
92     + }
93     + }
94     + }
95     }
96    
97     return -EINVAL;
98     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
99     index ca5ac5d6f4e6..49056c33be74 100644
100     --- a/drivers/net/ethernet/broadcom/tg3.c
101     +++ b/drivers/net/ethernet/broadcom/tg3.c
102     @@ -18142,14 +18142,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
103    
104     rtnl_lock();
105    
106     - /* We needn't recover from permanent error */
107     - if (state == pci_channel_io_frozen)
108     - tp->pcierr_recovery = true;
109     -
110     /* We probably don't have netdev yet */
111     if (!netdev || !netif_running(netdev))
112     goto done;
113    
114     + /* We needn't recover from permanent error */
115     + if (state == pci_channel_io_frozen)
116     + tp->pcierr_recovery = true;
117     +
118     tg3_phy_stop(tp);
119    
120     tg3_netif_stop(tp);
121     @@ -18246,7 +18246,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
122    
123     rtnl_lock();
124    
125     - if (!netif_running(netdev))
126     + if (!netdev || !netif_running(netdev))
127     goto done;
128    
129     tg3_full_lock(tp, 0);
130     diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
131     index f6147ffc7fbc..ab716042bdd2 100644
132     --- a/drivers/net/ethernet/freescale/fec_main.c
133     +++ b/drivers/net/ethernet/freescale/fec_main.c
134     @@ -944,11 +944,11 @@ fec_restart(struct net_device *ndev)
135     * enet-mac reset will reset mac address registers too,
136     * so need to reconfigure it.
137     */
138     - if (fep->quirks & FEC_QUIRK_ENET_MAC) {
139     - memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
140     - writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
141     - writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
142     - }
143     + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
144     + writel((__force u32)cpu_to_be32(temp_mac[0]),
145     + fep->hwp + FEC_ADDR_LOW);
146     + writel((__force u32)cpu_to_be32(temp_mac[1]),
147     + fep->hwp + FEC_ADDR_HIGH);
148    
149     /* Clear any outstanding interrupt. */
150     writel(0xffffffff, fep->hwp + FEC_IEVENT);
151     diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
152     index 69e31e2a68fc..4827c6987ac3 100644
153     --- a/drivers/net/geneve.c
154     +++ b/drivers/net/geneve.c
155     @@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
156    
157     skb_gro_pull(skb, gh_len);
158     skb_gro_postpull_rcsum(skb, gh, gh_len);
159     - pp = ptype->callbacks.gro_receive(head, skb);
160     + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
161    
162     out_unlock:
163     rcu_read_unlock();
164     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
165     index 003780901628..6fa8e165878e 100644
166     --- a/drivers/net/vxlan.c
167     +++ b/drivers/net/vxlan.c
168     @@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
169     }
170     }
171    
172     - pp = eth_gro_receive(head, skb);
173     + pp = call_gro_receive(eth_gro_receive, head, skb);
174    
175     out:
176     skb_gro_remcsum_cleanup(skb, &grc);
177     diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
178     index ed01c0172e4a..07dd81586c52 100644
179     --- a/drivers/of/of_reserved_mem.c
180     +++ b/drivers/of/of_reserved_mem.c
181     @@ -127,8 +127,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
182     }
183    
184     /* Need adjust the alignment to satisfy the CMA requirement */
185     - if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool"))
186     - align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
187     + if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) {
188     + unsigned long order =
189     + max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
190     +
191     + align = max(align, (phys_addr_t)PAGE_SIZE << order);
192     + }
193    
194     prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
195     if (prop) {
196     diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
197     index ef4ff03242ea..aaf7da07a358 100644
198     --- a/drivers/scsi/megaraid/megaraid_sas.h
199     +++ b/drivers/scsi/megaraid/megaraid_sas.h
200     @@ -1923,7 +1923,7 @@ struct megasas_instance_template {
201     };
202    
203     #define MEGASAS_IS_LOGICAL(scp) \
204     - (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
205     + ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
206    
207     #define MEGASAS_DEV_INDEX(scp) \
208     (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
209     diff --git a/include/linux/mroute.h b/include/linux/mroute.h
210     index 79aaa9fc1a15..d5277fc3ce2e 100644
211     --- a/include/linux/mroute.h
212     +++ b/include/linux/mroute.h
213     @@ -103,5 +103,5 @@ struct mfc_cache {
214     struct rtmsg;
215     extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
216     __be32 saddr, __be32 daddr,
217     - struct rtmsg *rtm, int nowait);
218     + struct rtmsg *rtm, int nowait, u32 portid);
219     #endif
220     diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
221     index 66982e764051..f831155dc7d1 100644
222     --- a/include/linux/mroute6.h
223     +++ b/include/linux/mroute6.h
224     @@ -115,7 +115,7 @@ struct mfc6_cache {
225    
226     struct rtmsg;
227     extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
228     - struct rtmsg *rtm, int nowait);
229     + struct rtmsg *rtm, int nowait, u32 portid);
230    
231     #ifdef CONFIG_IPV6_MROUTE
232     extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
233     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
234     index 12b4d54a8ffa..9d6025703f73 100644
235     --- a/include/linux/netdevice.h
236     +++ b/include/linux/netdevice.h
237     @@ -2003,7 +2003,10 @@ struct napi_gro_cb {
238     /* Used in foo-over-udp, set in udp[46]_gro_receive */
239     u8 is_ipv6:1;
240    
241     - /* 7 bit hole */
242     + /* Number of gro_receive callbacks this packet already went through */
243     + u8 recursion_counter:4;
244     +
245     + /* 3 bit hole */
246    
247     /* used to support CHECKSUM_COMPLETE for tunneling protocols */
248     __wsum csum;
249     @@ -2014,6 +2017,25 @@ struct napi_gro_cb {
250    
251     #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
252    
253     +#define GRO_RECURSION_LIMIT 15
254     +static inline int gro_recursion_inc_test(struct sk_buff *skb)
255     +{
256     + return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
257     +}
258     +
259     +typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
260     +static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
261     + struct sk_buff **head,
262     + struct sk_buff *skb)
263     +{
264     + if (unlikely(gro_recursion_inc_test(skb))) {
265     + NAPI_GRO_CB(skb)->flush |= 1;
266     + return NULL;
267     + }
268     +
269     + return cb(head, skb);
270     +}
271     +
272     struct packet_type {
273     __be16 type; /* This is really htons(ether_type). */
274     struct net_device *dev; /* NULL is wildcarded here */
275     @@ -2059,6 +2081,22 @@ struct udp_offload {
276     struct udp_offload_callbacks callbacks;
277     };
278    
279     +typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **,
280     + struct sk_buff *,
281     + struct udp_offload *);
282     +static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb,
283     + struct sk_buff **head,
284     + struct sk_buff *skb,
285     + struct udp_offload *uoff)
286     +{
287     + if (unlikely(gro_recursion_inc_test(skb))) {
288     + NAPI_GRO_CB(skb)->flush |= 1;
289     + return NULL;
290     + }
291     +
292     + return cb(head, skb, uoff);
293     +}
294     +
295     /* often modified stats are per cpu, other are shared (netdev->stats) */
296     struct pcpu_sw_netstats {
297     u64 rx_packets;
298     diff --git a/include/net/ip.h b/include/net/ip.h
299     index 1a98f1ca1638..b450d8653b30 100644
300     --- a/include/net/ip.h
301     +++ b/include/net/ip.h
302     @@ -553,7 +553,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
303     */
304    
305     void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
306     -void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
307     +void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
308     int ip_cmsg_send(struct net *net, struct msghdr *msg,
309     struct ipcm_cookie *ipc, bool allow_ipv6);
310     int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
311     @@ -575,7 +575,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
312    
313     static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
314     {
315     - ip_cmsg_recv_offset(msg, skb, 0);
316     + ip_cmsg_recv_offset(msg, skb, 0, 0);
317     }
318    
319     bool icmp_global_allow(void);
320     diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
321     index 86df0835f6b5..e5bba897d206 100644
322     --- a/include/net/sch_generic.h
323     +++ b/include/net/sch_generic.h
324     @@ -408,6 +408,15 @@ bool tcf_destroy(struct tcf_proto *tp, bool force);
325     void tcf_destroy_chain(struct tcf_proto __rcu **fl);
326     int skb_do_redirect(struct sk_buff *);
327    
328     +static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
329     +{
330     +#ifdef CONFIG_NET_CLS_ACT
331     + return G_TC_AT(skb->tc_verd) & AT_INGRESS;
332     +#else
333     + return false;
334     +#endif
335     +}
336     +
337     /* Reset all TX qdiscs greater then index of a device. */
338     static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
339     {
340     diff --git a/include/net/sock.h b/include/net/sock.h
341     index 14d3c0734007..3d5ff7436f41 100644
342     --- a/include/net/sock.h
343     +++ b/include/net/sock.h
344     @@ -1425,6 +1425,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
345     if (!sk_has_account(sk))
346     return;
347     sk->sk_forward_alloc += size;
348     +
349     + /* Avoid a possible overflow.
350     + * TCP send queues can make this happen, if sk_mem_reclaim()
351     + * is not called and more than 2 GBytes are released at once.
352     + *
353     + * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
354     + * no need to hold that much forward allocation anyway.
355     + */
356     + if (unlikely(sk->sk_forward_alloc >= 1 << 21))
357     + __sk_mem_reclaim(sk, 1 << 20);
358     }
359    
360     static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
361     diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
362     index 123a5af4e8bb..fa3b34365560 100644
363     --- a/include/uapi/linux/rtnetlink.h
364     +++ b/include/uapi/linux/rtnetlink.h
365     @@ -343,7 +343,7 @@ struct rtnexthop {
366     #define RTNH_F_OFFLOAD 8 /* offloaded route */
367     #define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
368    
369     -#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
370     +#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
371    
372     /* Macros to handle hexthops */
373    
374     diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
375     index d2cd9de4b724..ad8d6e6b87ca 100644
376     --- a/net/8021q/vlan.c
377     +++ b/net/8021q/vlan.c
378     @@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
379    
380     skb_gro_pull(skb, sizeof(*vhdr));
381     skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
382     - pp = ptype->callbacks.gro_receive(head, skb);
383     + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
384    
385     out_unlock:
386     rcu_read_unlock();
387     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
388     index 9542e84a9455..d80c15d028fe 100644
389     --- a/net/bridge/br_multicast.c
390     +++ b/net/bridge/br_multicast.c
391     @@ -951,13 +951,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
392     mod_timer(&query->timer, jiffies);
393     }
394    
395     -void br_multicast_enable_port(struct net_bridge_port *port)
396     +static void __br_multicast_enable_port(struct net_bridge_port *port)
397     {
398     struct net_bridge *br = port->br;
399    
400     - spin_lock(&br->multicast_lock);
401     if (br->multicast_disabled || !netif_running(br->dev))
402     - goto out;
403     + return;
404    
405     br_multicast_enable(&port->ip4_own_query);
406     #if IS_ENABLED(CONFIG_IPV6)
407     @@ -965,8 +964,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
408     #endif
409     if (port->multicast_router == 2 && hlist_unhashed(&port->rlist))
410     br_multicast_add_router(br, port);
411     +}
412    
413     -out:
414     +void br_multicast_enable_port(struct net_bridge_port *port)
415     +{
416     + struct net_bridge *br = port->br;
417     +
418     + spin_lock(&br->multicast_lock);
419     + __br_multicast_enable_port(port);
420     spin_unlock(&br->multicast_lock);
421     }
422    
423     @@ -1905,8 +1910,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
424    
425     int br_multicast_toggle(struct net_bridge *br, unsigned long val)
426     {
427     - int err = 0;
428     struct net_bridge_mdb_htable *mdb;
429     + struct net_bridge_port *port;
430     + int err = 0;
431    
432     spin_lock_bh(&br->multicast_lock);
433     if (br->multicast_disabled == !val)
434     @@ -1934,10 +1940,9 @@ rollback:
435     goto rollback;
436     }
437    
438     - br_multicast_start_querier(br, &br->ip4_own_query);
439     -#if IS_ENABLED(CONFIG_IPV6)
440     - br_multicast_start_querier(br, &br->ip6_own_query);
441     -#endif
442     + br_multicast_open(br);
443     + list_for_each_entry(port, &br->port_list, list)
444     + __br_multicast_enable_port(port);
445    
446     unlock:
447     spin_unlock_bh(&br->multicast_lock);
448     diff --git a/net/core/dev.c b/net/core/dev.c
449     index 0989fea88c44..b3fa4b86ab4c 100644
450     --- a/net/core/dev.c
451     +++ b/net/core/dev.c
452     @@ -2836,6 +2836,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
453     }
454     return head;
455     }
456     +EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
457    
458     static void qdisc_pkt_len_init(struct sk_buff *skb)
459     {
460     @@ -4240,6 +4241,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
461     NAPI_GRO_CB(skb)->flush = 0;
462     NAPI_GRO_CB(skb)->free = 0;
463     NAPI_GRO_CB(skb)->encap_mark = 0;
464     + NAPI_GRO_CB(skb)->recursion_counter = 0;
465     NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
466    
467     /* Setup for GRO checksum validation */
468     @@ -5204,6 +5206,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
469    
470     static int __netdev_adjacent_dev_insert(struct net_device *dev,
471     struct net_device *adj_dev,
472     + u16 ref_nr,
473     struct list_head *dev_list,
474     void *private, bool master)
475     {
476     @@ -5213,7 +5216,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
477     adj = __netdev_find_adj(adj_dev, dev_list);
478    
479     if (adj) {
480     - adj->ref_nr++;
481     + adj->ref_nr += ref_nr;
482     return 0;
483     }
484    
485     @@ -5223,7 +5226,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
486    
487     adj->dev = adj_dev;
488     adj->master = master;
489     - adj->ref_nr = 1;
490     + adj->ref_nr = ref_nr;
491     adj->private = private;
492     dev_hold(adj_dev);
493    
494     @@ -5262,6 +5265,7 @@ free_adj:
495    
496     static void __netdev_adjacent_dev_remove(struct net_device *dev,
497     struct net_device *adj_dev,
498     + u16 ref_nr,
499     struct list_head *dev_list)
500     {
501     struct netdev_adjacent *adj;
502     @@ -5274,10 +5278,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
503     BUG();
504     }
505    
506     - if (adj->ref_nr > 1) {
507     - pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
508     - adj->ref_nr-1);
509     - adj->ref_nr--;
510     + if (adj->ref_nr > ref_nr) {
511     + pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name,
512     + ref_nr, adj->ref_nr-ref_nr);
513     + adj->ref_nr -= ref_nr;
514     return;
515     }
516    
517     @@ -5296,21 +5300,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
518    
519     static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
520     struct net_device *upper_dev,
521     + u16 ref_nr,
522     struct list_head *up_list,
523     struct list_head *down_list,
524     void *private, bool master)
525     {
526     int ret;
527    
528     - ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
529     - master);
530     + ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list,
531     + private, master);
532     if (ret)
533     return ret;
534    
535     - ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
536     - false);
537     + ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list,
538     + private, false);
539     if (ret) {
540     - __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
541     + __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
542     return ret;
543     }
544    
545     @@ -5318,9 +5323,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
546     }
547    
548     static int __netdev_adjacent_dev_link(struct net_device *dev,
549     - struct net_device *upper_dev)
550     + struct net_device *upper_dev,
551     + u16 ref_nr)
552     {
553     - return __netdev_adjacent_dev_link_lists(dev, upper_dev,
554     + return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr,
555     &dev->all_adj_list.upper,
556     &upper_dev->all_adj_list.lower,
557     NULL, false);
558     @@ -5328,17 +5334,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev,
559    
560     static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
561     struct net_device *upper_dev,
562     + u16 ref_nr,
563     struct list_head *up_list,
564     struct list_head *down_list)
565     {
566     - __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
567     - __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
568     + __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
569     + __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
570     }
571    
572     static void __netdev_adjacent_dev_unlink(struct net_device *dev,
573     - struct net_device *upper_dev)
574     + struct net_device *upper_dev,
575     + u16 ref_nr)
576     {
577     - __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
578     + __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr,
579     &dev->all_adj_list.upper,
580     &upper_dev->all_adj_list.lower);
581     }
582     @@ -5347,17 +5355,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
583     struct net_device *upper_dev,
584     void *private, bool master)
585     {
586     - int ret = __netdev_adjacent_dev_link(dev, upper_dev);
587     + int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1);
588    
589     if (ret)
590     return ret;
591    
592     - ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
593     + ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1,
594     &dev->adj_list.upper,
595     &upper_dev->adj_list.lower,
596     private, master);
597     if (ret) {
598     - __netdev_adjacent_dev_unlink(dev, upper_dev);
599     + __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
600     return ret;
601     }
602    
603     @@ -5367,8 +5375,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
604     static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
605     struct net_device *upper_dev)
606     {
607     - __netdev_adjacent_dev_unlink(dev, upper_dev);
608     - __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
609     + __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
610     + __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
611     &dev->adj_list.upper,
612     &upper_dev->adj_list.lower);
613     }
614     @@ -5420,7 +5428,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
615     list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
616     pr_debug("Interlinking %s with %s, non-neighbour\n",
617     i->dev->name, j->dev->name);
618     - ret = __netdev_adjacent_dev_link(i->dev, j->dev);
619     + ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr);
620     if (ret)
621     goto rollback_mesh;
622     }
623     @@ -5430,7 +5438,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
624     list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
625     pr_debug("linking %s's upper device %s with %s\n",
626     upper_dev->name, i->dev->name, dev->name);
627     - ret = __netdev_adjacent_dev_link(dev, i->dev);
628     + ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
629     if (ret)
630     goto rollback_upper_mesh;
631     }
632     @@ -5439,7 +5447,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
633     list_for_each_entry(i, &dev->all_adj_list.lower, list) {
634     pr_debug("linking %s's lower device %s with %s\n", dev->name,
635     i->dev->name, upper_dev->name);
636     - ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
637     + ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
638     if (ret)
639     goto rollback_lower_mesh;
640     }
641     @@ -5453,7 +5461,7 @@ rollback_lower_mesh:
642     list_for_each_entry(i, &dev->all_adj_list.lower, list) {
643     if (i == to_i)
644     break;
645     - __netdev_adjacent_dev_unlink(i->dev, upper_dev);
646     + __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
647     }
648    
649     i = NULL;
650     @@ -5463,7 +5471,7 @@ rollback_upper_mesh:
651     list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
652     if (i == to_i)
653     break;
654     - __netdev_adjacent_dev_unlink(dev, i->dev);
655     + __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
656     }
657    
658     i = j = NULL;
659     @@ -5475,7 +5483,7 @@ rollback_mesh:
660     list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
661     if (i == to_i && j == to_j)
662     break;
663     - __netdev_adjacent_dev_unlink(i->dev, j->dev);
664     + __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
665     }
666     if (i == to_i)
667     break;
668     @@ -5559,16 +5567,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
669     */
670     list_for_each_entry(i, &dev->all_adj_list.lower, list)
671     list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
672     - __netdev_adjacent_dev_unlink(i->dev, j->dev);
673     + __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
674    
675     /* remove also the devices itself from lower/upper device
676     * list
677     */
678     list_for_each_entry(i, &dev->all_adj_list.lower, list)
679     - __netdev_adjacent_dev_unlink(i->dev, upper_dev);
680     + __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
681    
682     list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
683     - __netdev_adjacent_dev_unlink(dev, i->dev);
684     + __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
685    
686     call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
687     &changeupper_info.info);
688     diff --git a/net/core/pktgen.c b/net/core/pktgen.c
689     index 4da4d51a2ccf..b6327601f979 100644
690     --- a/net/core/pktgen.c
691     +++ b/net/core/pktgen.c
692     @@ -215,8 +215,8 @@
693     #define M_NETIF_RECEIVE 1 /* Inject packets into stack */
694    
695     /* If lock -- protects updating of if_list */
696     -#define if_lock(t) spin_lock(&(t->if_lock));
697     -#define if_unlock(t) spin_unlock(&(t->if_lock));
698     +#define if_lock(t) mutex_lock(&(t->if_lock));
699     +#define if_unlock(t) mutex_unlock(&(t->if_lock));
700    
701     /* Used to help with determining the pkts on receive */
702     #define PKTGEN_MAGIC 0xbe9be955
703     @@ -422,7 +422,7 @@ struct pktgen_net {
704     };
705    
706     struct pktgen_thread {
707     - spinlock_t if_lock; /* for list of devices */
708     + struct mutex if_lock; /* for list of devices */
709     struct list_head if_list; /* All device here */
710     struct list_head th_list;
711     struct task_struct *tsk;
712     @@ -2002,11 +2002,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
713     {
714     struct pktgen_thread *t;
715    
716     + mutex_lock(&pktgen_thread_lock);
717     +
718     list_for_each_entry(t, &pn->pktgen_threads, th_list) {
719     struct pktgen_dev *pkt_dev;
720    
721     - rcu_read_lock();
722     - list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
723     + if_lock(t);
724     + list_for_each_entry(pkt_dev, &t->if_list, list) {
725     if (pkt_dev->odev != dev)
726     continue;
727    
728     @@ -2021,8 +2023,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
729     dev->name);
730     break;
731     }
732     - rcu_read_unlock();
733     + if_unlock(t);
734     }
735     + mutex_unlock(&pktgen_thread_lock);
736     }
737    
738     static int pktgen_device_event(struct notifier_block *unused,
739     @@ -2278,7 +2281,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
740    
741     static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
742     {
743     - pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
744     + pkt_dev->pkt_overhead = 0;
745     pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
746     pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
747     pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
748     @@ -2769,13 +2772,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
749     }
750    
751     static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
752     - struct pktgen_dev *pkt_dev,
753     - unsigned int extralen)
754     + struct pktgen_dev *pkt_dev)
755     {
756     + unsigned int extralen = LL_RESERVED_SPACE(dev);
757     struct sk_buff *skb = NULL;
758     - unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
759     - pkt_dev->pkt_overhead;
760     + unsigned int size;
761    
762     + size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
763     if (pkt_dev->flags & F_NODE) {
764     int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
765    
766     @@ -2788,8 +2791,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
767     skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
768     }
769    
770     + /* the caller pre-fetches from skb->data and reserves for the mac hdr */
771     if (likely(skb))
772     - skb_reserve(skb, LL_RESERVED_SPACE(dev));
773     + skb_reserve(skb, extralen - 16);
774    
775     return skb;
776     }
777     @@ -2822,16 +2826,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
778     mod_cur_headers(pkt_dev);
779     queue_map = pkt_dev->cur_queue_map;
780    
781     - datalen = (odev->hard_header_len + 16) & ~0xf;
782     -
783     - skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
784     + skb = pktgen_alloc_skb(odev, pkt_dev);
785     if (!skb) {
786     sprintf(pkt_dev->result, "No memory");
787     return NULL;
788     }
789    
790     prefetchw(skb->data);
791     - skb_reserve(skb, datalen);
792     + skb_reserve(skb, 16);
793    
794     /* Reserve for ethernet and IP header */
795     eth = (__u8 *) skb_push(skb, 14);
796     @@ -2951,7 +2953,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
797     mod_cur_headers(pkt_dev);
798     queue_map = pkt_dev->cur_queue_map;
799    
800     - skb = pktgen_alloc_skb(odev, pkt_dev, 16);
801     + skb = pktgen_alloc_skb(odev, pkt_dev);
802     if (!skb) {
803     sprintf(pkt_dev->result, "No memory");
804     return NULL;
805     @@ -3727,7 +3729,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
806     return -ENOMEM;
807     }
808    
809     - spin_lock_init(&t->if_lock);
810     + mutex_init(&t->if_lock);
811     t->cpu = cpu;
812    
813     INIT_LIST_HEAD(&t->if_list);
814     diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
815     index 9e63f252a89e..de85d4e1cf43 100644
816     --- a/net/ethernet/eth.c
817     +++ b/net/ethernet/eth.c
818     @@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
819    
820     skb_gro_pull(skb, sizeof(*eh));
821     skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
822     - pp = ptype->callbacks.gro_receive(head, skb);
823     + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
824    
825     out_unlock:
826     rcu_read_unlock();
827     diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
828     index 1a5c1ca3ad3c..afc18e9ca94a 100644
829     --- a/net/ipv4/af_inet.c
830     +++ b/net/ipv4/af_inet.c
831     @@ -1372,7 +1372,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
832     skb_gro_pull(skb, sizeof(*iph));
833     skb_set_transport_header(skb, skb_gro_offset(skb));
834    
835     - pp = ops->callbacks.gro_receive(head, skb);
836     + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
837    
838     out_unlock:
839     rcu_read_unlock();
840     diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
841     index 08d7de55e57e..08d8ee124538 100644
842     --- a/net/ipv4/fou.c
843     +++ b/net/ipv4/fou.c
844     @@ -201,7 +201,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
845     if (!ops || !ops->callbacks.gro_receive)
846     goto out_unlock;
847    
848     - pp = ops->callbacks.gro_receive(head, skb);
849     + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
850    
851     out_unlock:
852     rcu_read_unlock();
853     @@ -360,7 +360,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
854     if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
855     goto out_unlock;
856    
857     - pp = ops->callbacks.gro_receive(head, skb);
858     + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
859    
860     out_unlock:
861     rcu_read_unlock();
862     diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
863     index e603004c1af8..79ae0d7becbf 100644
864     --- a/net/ipv4/gre_offload.c
865     +++ b/net/ipv4/gre_offload.c
866     @@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
867     /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
868     skb_gro_postpull_rcsum(skb, greh, grehlen);
869    
870     - pp = ptype->callbacks.gro_receive(head, skb);
871     + pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
872    
873     out_unlock:
874     rcu_read_unlock();
875     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
876     index a50124260f5a..9ce202549e7a 100644
877     --- a/net/ipv4/ip_sockglue.c
878     +++ b/net/ipv4/ip_sockglue.c
879     @@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
880     }
881    
882     static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
883     - int offset)
884     + int tlen, int offset)
885     {
886     __wsum csum = skb->csum;
887    
888     @@ -106,7 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
889     return;
890    
891     if (offset != 0)
892     - csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
893     + csum = csum_sub(csum,
894     + csum_partial(skb->data + tlen,
895     + offset, 0));
896    
897     put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
898     }
899     @@ -152,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
900     }
901    
902     void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
903     - int offset)
904     + int tlen, int offset)
905     {
906     struct inet_sock *inet = inet_sk(skb->sk);
907     unsigned int flags = inet->cmsg_flags;
908     @@ -215,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
909     }
910    
911     if (flags & IP_CMSG_CHECKSUM)
912     - ip_cmsg_recv_checksum(msg, skb, offset);
913     + ip_cmsg_recv_checksum(msg, skb, tlen, offset);
914     }
915     EXPORT_SYMBOL(ip_cmsg_recv_offset);
916    
917     diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
918     index 9d1e555496e3..8e77786549c6 100644
919     --- a/net/ipv4/ipmr.c
920     +++ b/net/ipv4/ipmr.c
921     @@ -2192,7 +2192,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
922    
923     int ipmr_get_route(struct net *net, struct sk_buff *skb,
924     __be32 saddr, __be32 daddr,
925     - struct rtmsg *rtm, int nowait)
926     + struct rtmsg *rtm, int nowait, u32 portid)
927     {
928     struct mfc_cache *cache;
929     struct mr_table *mrt;
930     @@ -2237,6 +2237,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
931     return -ENOMEM;
932     }
933    
934     + NETLINK_CB(skb2).portid = portid;
935     skb_push(skb2, sizeof(struct iphdr));
936     skb_reset_network_header(skb2);
937     iph = ip_hdr(skb2);
938     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
939     index b050cf980a57..8533a75a9328 100644
940     --- a/net/ipv4/route.c
941     +++ b/net/ipv4/route.c
942     @@ -2492,7 +2492,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
943     IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
944     int err = ipmr_get_route(net, skb,
945     fl4->saddr, fl4->daddr,
946     - r, nowait);
947     + r, nowait, portid);
948     +
949     if (err <= 0) {
950     if (!nowait) {
951     if (err == 0)
952     diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
953     index a0bd7a55193e..70fb352e317f 100644
954     --- a/net/ipv4/sysctl_net_ipv4.c
955     +++ b/net/ipv4/sysctl_net_ipv4.c
956     @@ -97,11 +97,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
957     container_of(table->data, struct net, ipv4.ping_group_range.range);
958     unsigned int seq;
959     do {
960     - seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
961     + seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
962    
963     *low = data[0];
964     *high = data[1];
965     - } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
966     + } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
967     }
968    
969     /* Update system visible IP port range */
970     @@ -110,10 +110,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
971     kgid_t *data = table->data;
972     struct net *net =
973     container_of(table->data, struct net, ipv4.ping_group_range.range);
974     - write_seqlock(&net->ipv4.ip_local_ports.lock);
975     + write_seqlock(&net->ipv4.ping_group_range.lock);
976     data[0] = low;
977     data[1] = high;
978     - write_sequnlock(&net->ipv4.ip_local_ports.lock);
979     + write_sequnlock(&net->ipv4.ping_group_range.lock);
980     }
981    
982     /* Validate changes from /proc interface. */
983     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
984     index 12b98e257c5f..7cc0f8aac28f 100644
985     --- a/net/ipv4/tcp_input.c
986     +++ b/net/ipv4/tcp_input.c
987     @@ -2324,10 +2324,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
988     }
989     #if IS_ENABLED(CONFIG_IPV6)
990     else if (sk->sk_family == AF_INET6) {
991     - struct ipv6_pinfo *np = inet6_sk(sk);
992     pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
993     msg,
994     - &np->daddr, ntohs(inet->inet_dport),
995     + &sk->sk_v6_daddr, ntohs(inet->inet_dport),
996     tp->snd_cwnd, tcp_left_out(tp),
997     tp->snd_ssthresh, tp->prior_ssthresh,
998     tp->packets_out);
999     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1000     index 660c967ba84a..0795647e94c6 100644
1001     --- a/net/ipv4/tcp_output.c
1002     +++ b/net/ipv4/tcp_output.c
1003     @@ -1950,12 +1950,14 @@ static int tcp_mtu_probe(struct sock *sk)
1004     len = 0;
1005     tcp_for_write_queue_from_safe(skb, next, sk) {
1006     copy = min_t(int, skb->len, probe_size - len);
1007     - if (nskb->ip_summed)
1008     + if (nskb->ip_summed) {
1009     skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1010     - else
1011     - nskb->csum = skb_copy_and_csum_bits(skb, 0,
1012     - skb_put(nskb, copy),
1013     - copy, nskb->csum);
1014     + } else {
1015     + __wsum csum = skb_copy_and_csum_bits(skb, 0,
1016     + skb_put(nskb, copy),
1017     + copy, 0);
1018     + nskb->csum = csum_block_add(nskb->csum, csum, len);
1019     + }
1020    
1021     if (skb->len <= copy) {
1022     /* We've eaten all the data from this skb.
1023     @@ -2569,7 +2571,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1024     * copying overhead: fragmentation, tunneling, mangling etc.
1025     */
1026     if (atomic_read(&sk->sk_wmem_alloc) >
1027     - min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
1028     + min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
1029     + sk->sk_sndbuf))
1030     return -EAGAIN;
1031    
1032     if (skb_still_in_host_queue(sk, skb))
1033     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1034     index 0b1ea5abcc04..e9513e397c4f 100644
1035     --- a/net/ipv4/udp.c
1036     +++ b/net/ipv4/udp.c
1037     @@ -1342,7 +1342,7 @@ try_again:
1038     *addr_len = sizeof(*sin);
1039     }
1040     if (inet->cmsg_flags)
1041     - ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
1042     + ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
1043    
1044     err = copied;
1045     if (flags & MSG_TRUNC)
1046     diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
1047     index 0e36e56dfd22..6396f1c80ae9 100644
1048     --- a/net/ipv4/udp_offload.c
1049     +++ b/net/ipv4/udp_offload.c
1050     @@ -339,8 +339,8 @@ unflush:
1051     skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
1052     skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
1053     NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
1054     - pp = uo_priv->offload->callbacks.gro_receive(head, skb,
1055     - uo_priv->offload);
1056     + pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive,
1057     + head, skb, uo_priv->offload);
1058    
1059     out_unlock:
1060     rcu_read_unlock();
1061     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1062     index 036b39eb1220..cb8bb5988c03 100644
1063     --- a/net/ipv6/addrconf.c
1064     +++ b/net/ipv6/addrconf.c
1065     @@ -2916,7 +2916,7 @@ static void init_loopback(struct net_device *dev)
1066     * lo device down, release this obsolete dst and
1067     * reallocate a new router for ifa.
1068     */
1069     - if (sp_ifa->rt->dst.obsolete > 0) {
1070     + if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
1071     ip6_rt_put(sp_ifa->rt);
1072     sp_ifa->rt = NULL;
1073     } else {
1074     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
1075     index 4650c6824783..17430f341073 100644
1076     --- a/net/ipv6/ip6_gre.c
1077     +++ b/net/ipv6/ip6_gre.c
1078     @@ -886,7 +886,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
1079     encap_limit = t->parms.encap_limit;
1080    
1081     memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1082     - fl6.flowi6_proto = skb->protocol;
1083    
1084     err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
1085    
1086     diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
1087     index 82e9f3076028..efe6268b8bc3 100644
1088     --- a/net/ipv6/ip6_offload.c
1089     +++ b/net/ipv6/ip6_offload.c
1090     @@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
1091    
1092     skb_gro_postpull_rcsum(skb, iph, nlen);
1093    
1094     - pp = ops->callbacks.gro_receive(head, skb);
1095     + pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1096    
1097     out_unlock:
1098     rcu_read_unlock();
1099     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1100     index 3991b21e24ad..e8878886eba4 100644
1101     --- a/net/ipv6/ip6_tunnel.c
1102     +++ b/net/ipv6/ip6_tunnel.c
1103     @@ -246,6 +246,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
1104     hash = HASH(&any, local);
1105     for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
1106     if (ipv6_addr_equal(local, &t->parms.laddr) &&
1107     + ipv6_addr_any(&t->parms.raddr) &&
1108     (t->dev->flags & IFF_UP))
1109     return t;
1110     }
1111     @@ -253,6 +254,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
1112     hash = HASH(remote, &any);
1113     for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
1114     if (ipv6_addr_equal(remote, &t->parms.raddr) &&
1115     + ipv6_addr_any(&t->parms.laddr) &&
1116     (t->dev->flags & IFF_UP))
1117     return t;
1118     }
1119     diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1120     index e207cb2468da..d9843e5a667f 100644
1121     --- a/net/ipv6/ip6mr.c
1122     +++ b/net/ipv6/ip6mr.c
1123     @@ -2276,8 +2276,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
1124     return 1;
1125     }
1126    
1127     -int ip6mr_get_route(struct net *net,
1128     - struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1129     +int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
1130     + int nowait, u32 portid)
1131     {
1132     int err;
1133     struct mr6_table *mrt;
1134     @@ -2322,6 +2322,7 @@ int ip6mr_get_route(struct net *net,
1135     return -ENOMEM;
1136     }
1137    
1138     + NETLINK_CB(skb2).portid = portid;
1139     skb_reset_transport_header(skb2);
1140    
1141     skb_put(skb2, sizeof(struct ipv6hdr));
1142     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1143     index 5af2cca0a46d..dbffc9de184b 100644
1144     --- a/net/ipv6/route.c
1145     +++ b/net/ipv6/route.c
1146     @@ -3140,7 +3140,9 @@ static int rt6_fill_node(struct net *net,
1147     if (iif) {
1148     #ifdef CONFIG_IPV6_MROUTE
1149     if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
1150     - int err = ip6mr_get_route(net, skb, rtm, nowait);
1151     + int err = ip6mr_get_route(net, skb, rtm, nowait,
1152     + portid);
1153     +
1154     if (err <= 0) {
1155     if (!nowait) {
1156     if (err == 0)
1157     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1158     index 2d81e2f33ef2..fbd521fdae53 100644
1159     --- a/net/ipv6/tcp_ipv6.c
1160     +++ b/net/ipv6/tcp_ipv6.c
1161     @@ -1179,6 +1179,16 @@ out:
1162     return NULL;
1163     }
1164    
1165     +static void tcp_v6_restore_cb(struct sk_buff *skb)
1166     +{
1167     + /* We need to move header back to the beginning if xfrm6_policy_check()
1168     + * and tcp_v6_fill_cb() are going to be called again.
1169     + * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1170     + */
1171     + memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1172     + sizeof(struct inet6_skb_parm));
1173     +}
1174     +
1175     /* The socket must have it's spinlock held when we get
1176     * here, unless it is a TCP_LISTEN socket.
1177     *
1178     @@ -1308,6 +1318,7 @@ ipv6_pktoptions:
1179     np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1180     if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1181     skb_set_owner_r(opt_skb, sk);
1182     + tcp_v6_restore_cb(opt_skb);
1183     opt_skb = xchg(&np->pktoptions, opt_skb);
1184     } else {
1185     __kfree_skb(opt_skb);
1186     @@ -1341,15 +1352,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1187     TCP_SKB_CB(skb)->sacked = 0;
1188     }
1189    
1190     -static void tcp_v6_restore_cb(struct sk_buff *skb)
1191     -{
1192     - /* We need to move header back to the beginning if xfrm6_policy_check()
1193     - * and tcp_v6_fill_cb() are going to be called again.
1194     - */
1195     - memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1196     - sizeof(struct inet6_skb_parm));
1197     -}
1198     -
1199     static int tcp_v6_rcv(struct sk_buff *skb)
1200     {
1201     const struct tcphdr *th;
1202     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1203     index e6092bd72ee2..dfa85e7264df 100644
1204     --- a/net/ipv6/udp.c
1205     +++ b/net/ipv6/udp.c
1206     @@ -498,7 +498,8 @@ try_again:
1207    
1208     if (is_udp4) {
1209     if (inet->cmsg_flags)
1210     - ip_cmsg_recv(msg, skb);
1211     + ip_cmsg_recv_offset(msg, skb,
1212     + sizeof(struct udphdr), off);
1213     } else {
1214     if (np->rxopt.all)
1215     ip6_datagram_recv_specific_ctl(sk, msg, skb);
1216     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1217     index 7a5fa0c98377..28fc283c1ec1 100644
1218     --- a/net/netlink/af_netlink.c
1219     +++ b/net/netlink/af_netlink.c
1220     @@ -2557,7 +2557,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1221     /* Record the max length of recvmsg() calls for future allocations */
1222     nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
1223     nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
1224     - 16384);
1225     + SKB_WITH_OVERHEAD(32768));
1226    
1227     copied = data_skb->len;
1228     if (len < copied) {
1229     @@ -2810,14 +2810,13 @@ static int netlink_dump(struct sock *sk)
1230     if (alloc_min_size < nlk->max_recvmsg_len) {
1231     alloc_size = nlk->max_recvmsg_len;
1232     skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
1233     - GFP_KERNEL |
1234     - __GFP_NOWARN |
1235     - __GFP_NORETRY);
1236     + (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
1237     + __GFP_NOWARN | __GFP_NORETRY);
1238     }
1239     if (!skb) {
1240     alloc_size = alloc_min_size;
1241     skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
1242     - GFP_KERNEL);
1243     + (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM));
1244     }
1245     if (!skb)
1246     goto errout_skb;
1247     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1248     index a86f26d05bc2..34e4fcfd240b 100644
1249     --- a/net/packet/af_packet.c
1250     +++ b/net/packet/af_packet.c
1251     @@ -249,7 +249,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
1252     static int packet_direct_xmit(struct sk_buff *skb)
1253     {
1254     struct net_device *dev = skb->dev;
1255     - netdev_features_t features;
1256     + struct sk_buff *orig_skb = skb;
1257     struct netdev_queue *txq;
1258     int ret = NETDEV_TX_BUSY;
1259    
1260     @@ -257,9 +257,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
1261     !netif_carrier_ok(dev)))
1262     goto drop;
1263    
1264     - features = netif_skb_features(skb);
1265     - if (skb_needs_linearize(skb, features) &&
1266     - __skb_linearize(skb))
1267     + skb = validate_xmit_skb_list(skb, dev);
1268     + if (skb != orig_skb)
1269     goto drop;
1270    
1271     txq = skb_get_tx_queue(dev, skb);
1272     @@ -279,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
1273     return ret;
1274     drop:
1275     atomic_long_inc(&dev->tx_dropped);
1276     - kfree_skb(skb);
1277     + kfree_skb_list(skb);
1278     return NET_XMIT_DROP;
1279     }
1280    
1281     @@ -3855,6 +3854,7 @@ static int packet_notifier(struct notifier_block *this,
1282     }
1283     if (msg == NETDEV_UNREGISTER) {
1284     packet_cached_dev_reset(po);
1285     + fanout_release(sk);
1286     po->ifindex = -1;
1287     if (po->prot_hook.dev)
1288     dev_put(po->prot_hook.dev);
1289     diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
1290     index 796785e0bf96..d7edba4536bd 100644
1291     --- a/net/sched/act_vlan.c
1292     +++ b/net/sched/act_vlan.c
1293     @@ -33,6 +33,12 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
1294     bstats_update(&v->tcf_bstats, skb);
1295     action = v->tcf_action;
1296    
1297     + /* Ensure 'data' points at mac_header prior calling vlan manipulating
1298     + * functions.
1299     + */
1300     + if (skb_at_tc_ingress(skb))
1301     + skb_push_rcsum(skb, skb->mac_len);
1302     +
1303     switch (v->tcfv_action) {
1304     case TCA_VLAN_ACT_POP:
1305     err = skb_vlan_pop(skb);
1306     @@ -54,6 +60,9 @@ drop:
1307     action = TC_ACT_SHOT;
1308     v->tcf_qstats.drops++;
1309     unlock:
1310     + if (skb_at_tc_ingress(skb))
1311     + skb_pull_rcsum(skb, skb->mac_len);
1312     +
1313     spin_unlock(&v->tcf_lock);
1314     return action;
1315     }
1316     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
1317     index a75864d93142..ecc1904e454f 100644
1318     --- a/net/sched/cls_api.c
1319     +++ b/net/sched/cls_api.c
1320     @@ -315,7 +315,8 @@ replay:
1321     if (err == 0) {
1322     struct tcf_proto *next = rtnl_dereference(tp->next);
1323    
1324     - tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
1325     + tfilter_notify(net, skb, n, tp,
1326     + t->tcm_handle, RTM_DELTFILTER);
1327     if (tcf_destroy(tp, false))
1328     RCU_INIT_POINTER(*back, next);
1329     }
1330     diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
1331     index 22c2bf367d7e..29c7c43de108 100644
1332     --- a/net/sctp/sm_statefuns.c
1333     +++ b/net/sctp/sm_statefuns.c
1334     @@ -3426,6 +3426,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
1335     return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1336     commands);
1337    
1338     + /* Report violation if chunk len overflows */
1339     + ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1340     + if (ch_end > skb_tail_pointer(skb))
1341     + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1342     + commands);
1343     +
1344     /* Now that we know we at least have a chunk header,
1345     * do things that are type appropriate.
1346     */
1347     @@ -3457,12 +3463,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
1348     }
1349     }
1350    
1351     - /* Report violation if chunk len overflows */
1352     - ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1353     - if (ch_end > skb_tail_pointer(skb))
1354     - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
1355     - commands);
1356     -
1357     ch = (sctp_chunkhdr_t *) ch_end;
1358     } while (ch_end < skb_tail_pointer(skb));
1359    
1360     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1361     index be1489fc3234..402817be3873 100644
1362     --- a/net/sctp/socket.c
1363     +++ b/net/sctp/socket.c
1364     @@ -4371,7 +4371,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
1365     static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
1366     int __user *optlen)
1367     {
1368     - if (len <= 0)
1369     + if (len == 0)
1370     return -EINVAL;
1371     if (len > sizeof(struct sctp_event_subscribe))
1372     len = sizeof(struct sctp_event_subscribe);
1373     @@ -5972,6 +5972,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
1374     if (get_user(len, optlen))
1375     return -EFAULT;
1376    
1377     + if (len < 0)
1378     + return -EINVAL;
1379     +
1380     lock_sock(sk);
1381    
1382     switch (optname) {