Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0110-4.9.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2956 - (hide annotations) (download)
Mon Jul 24 12:03:46 2017 UTC (6 years, 10 months ago) by niro
File size: 60460 byte(s)
-added patches-4.9
1 niro 2956 diff --git a/Makefile b/Makefile
2     index d2fe757a979d..18b0c5adad3b 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 10
9     +SUBLEVEL = 11
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
14     index ebb4e95fbd74..96d80dfac383 100644
15     --- a/arch/x86/kernel/fpu/core.c
16     +++ b/arch/x86/kernel/fpu/core.c
17     @@ -236,7 +236,8 @@ void fpstate_init(union fpregs_state *state)
18     * it will #GP. Make sure it is replaced after the memset().
19     */
20     if (static_cpu_has(X86_FEATURE_XSAVES))
21     - state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
22     + state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
23     + xfeatures_mask;
24    
25     if (static_cpu_has(X86_FEATURE_FXSR))
26     fpstate_init_fxstate(&state->fxsave);
27     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
28     index f2e8beddcf44..4d3ddc2f7e43 100644
29     --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
30     +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
31     @@ -507,8 +507,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
32     return;
33    
34     for (ring = 0; ring < priv->rx_ring_num; ring++) {
35     - if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
36     + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
37     + local_bh_disable();
38     napi_reschedule(&priv->rx_cq[ring]->napi);
39     + local_bh_enable();
40     + }
41     }
42     }
43    
44     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
45     index 71382df59fc0..81d8e3bd01b6 100644
46     --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
47     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
48     @@ -765,7 +765,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
49     int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
50    
51     int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
52     -void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
53     +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
54     + enum mlx5e_traffic_types tt);
55    
56     int mlx5e_open_locked(struct net_device *netdev);
57     int mlx5e_close_locked(struct net_device *netdev);
58     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
59     index 51c6a57ca873..126cfeb7e0ec 100644
60     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
61     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
62     @@ -975,15 +975,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
63    
64     static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
65     {
66     - struct mlx5_core_dev *mdev = priv->mdev;
67     void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
68     - int i;
69     + struct mlx5_core_dev *mdev = priv->mdev;
70     + int ctxlen = MLX5_ST_SZ_BYTES(tirc);
71     + int tt;
72    
73     MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
74     - mlx5e_build_tir_ctx_hash(tirc, priv);
75    
76     - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
77     - mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
78     + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
79     + memset(tirc, 0, ctxlen);
80     + mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
81     + mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
82     + }
83     }
84    
85     static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
86     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
87     index 5dc3e2453ff5..b30671376a3d 100644
88     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
89     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
90     @@ -1978,8 +1978,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
91     MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
92     }
93    
94     -void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
95     +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
96     + enum mlx5e_traffic_types tt)
97     {
98     + void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
99     +
100     +#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
101     + MLX5_HASH_FIELD_SEL_DST_IP)
102     +
103     +#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
104     + MLX5_HASH_FIELD_SEL_DST_IP |\
105     + MLX5_HASH_FIELD_SEL_L4_SPORT |\
106     + MLX5_HASH_FIELD_SEL_L4_DPORT)
107     +
108     +#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
109     + MLX5_HASH_FIELD_SEL_DST_IP |\
110     + MLX5_HASH_FIELD_SEL_IPSEC_SPI)
111     +
112     MLX5_SET(tirc, tirc, rx_hash_fn,
113     mlx5e_rx_hash_fn(priv->params.rss_hfunc));
114     if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
115     @@ -1991,6 +2006,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
116     MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
117     memcpy(rss_key, priv->params.toeplitz_hash_key, len);
118     }
119     +
120     + switch (tt) {
121     + case MLX5E_TT_IPV4_TCP:
122     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
123     + MLX5_L3_PROT_TYPE_IPV4);
124     + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
125     + MLX5_L4_PROT_TYPE_TCP);
126     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
127     + MLX5_HASH_IP_L4PORTS);
128     + break;
129     +
130     + case MLX5E_TT_IPV6_TCP:
131     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
132     + MLX5_L3_PROT_TYPE_IPV6);
133     + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
134     + MLX5_L4_PROT_TYPE_TCP);
135     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
136     + MLX5_HASH_IP_L4PORTS);
137     + break;
138     +
139     + case MLX5E_TT_IPV4_UDP:
140     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
141     + MLX5_L3_PROT_TYPE_IPV4);
142     + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
143     + MLX5_L4_PROT_TYPE_UDP);
144     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
145     + MLX5_HASH_IP_L4PORTS);
146     + break;
147     +
148     + case MLX5E_TT_IPV6_UDP:
149     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
150     + MLX5_L3_PROT_TYPE_IPV6);
151     + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
152     + MLX5_L4_PROT_TYPE_UDP);
153     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
154     + MLX5_HASH_IP_L4PORTS);
155     + break;
156     +
157     + case MLX5E_TT_IPV4_IPSEC_AH:
158     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
159     + MLX5_L3_PROT_TYPE_IPV4);
160     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
161     + MLX5_HASH_IP_IPSEC_SPI);
162     + break;
163     +
164     + case MLX5E_TT_IPV6_IPSEC_AH:
165     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
166     + MLX5_L3_PROT_TYPE_IPV6);
167     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
168     + MLX5_HASH_IP_IPSEC_SPI);
169     + break;
170     +
171     + case MLX5E_TT_IPV4_IPSEC_ESP:
172     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
173     + MLX5_L3_PROT_TYPE_IPV4);
174     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
175     + MLX5_HASH_IP_IPSEC_SPI);
176     + break;
177     +
178     + case MLX5E_TT_IPV6_IPSEC_ESP:
179     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
180     + MLX5_L3_PROT_TYPE_IPV6);
181     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
182     + MLX5_HASH_IP_IPSEC_SPI);
183     + break;
184     +
185     + case MLX5E_TT_IPV4:
186     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
187     + MLX5_L3_PROT_TYPE_IPV4);
188     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
189     + MLX5_HASH_IP);
190     + break;
191     +
192     + case MLX5E_TT_IPV6:
193     + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
194     + MLX5_L3_PROT_TYPE_IPV6);
195     + MLX5_SET(rx_hash_field_select, hfso, selected_fields,
196     + MLX5_HASH_IP);
197     + break;
198     + default:
199     + WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
200     + }
201     }
202    
203     static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
204     @@ -2360,110 +2457,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
205     static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
206     enum mlx5e_traffic_types tt)
207     {
208     - void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
209     -
210     MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
211    
212     -#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
213     - MLX5_HASH_FIELD_SEL_DST_IP)
214     -
215     -#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
216     - MLX5_HASH_FIELD_SEL_DST_IP |\
217     - MLX5_HASH_FIELD_SEL_L4_SPORT |\
218     - MLX5_HASH_FIELD_SEL_L4_DPORT)
219     -
220     -#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
221     - MLX5_HASH_FIELD_SEL_DST_IP |\
222     - MLX5_HASH_FIELD_SEL_IPSEC_SPI)
223     -
224     mlx5e_build_tir_ctx_lro(tirc, priv);
225    
226     MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
227     MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
228     - mlx5e_build_tir_ctx_hash(tirc, priv);
229     -
230     - switch (tt) {
231     - case MLX5E_TT_IPV4_TCP:
232     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
233     - MLX5_L3_PROT_TYPE_IPV4);
234     - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
235     - MLX5_L4_PROT_TYPE_TCP);
236     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
237     - MLX5_HASH_IP_L4PORTS);
238     - break;
239     -
240     - case MLX5E_TT_IPV6_TCP:
241     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
242     - MLX5_L3_PROT_TYPE_IPV6);
243     - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
244     - MLX5_L4_PROT_TYPE_TCP);
245     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
246     - MLX5_HASH_IP_L4PORTS);
247     - break;
248     -
249     - case MLX5E_TT_IPV4_UDP:
250     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
251     - MLX5_L3_PROT_TYPE_IPV4);
252     - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
253     - MLX5_L4_PROT_TYPE_UDP);
254     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
255     - MLX5_HASH_IP_L4PORTS);
256     - break;
257     -
258     - case MLX5E_TT_IPV6_UDP:
259     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
260     - MLX5_L3_PROT_TYPE_IPV6);
261     - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
262     - MLX5_L4_PROT_TYPE_UDP);
263     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
264     - MLX5_HASH_IP_L4PORTS);
265     - break;
266     -
267     - case MLX5E_TT_IPV4_IPSEC_AH:
268     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
269     - MLX5_L3_PROT_TYPE_IPV4);
270     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
271     - MLX5_HASH_IP_IPSEC_SPI);
272     - break;
273     -
274     - case MLX5E_TT_IPV6_IPSEC_AH:
275     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
276     - MLX5_L3_PROT_TYPE_IPV6);
277     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
278     - MLX5_HASH_IP_IPSEC_SPI);
279     - break;
280     -
281     - case MLX5E_TT_IPV4_IPSEC_ESP:
282     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
283     - MLX5_L3_PROT_TYPE_IPV4);
284     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
285     - MLX5_HASH_IP_IPSEC_SPI);
286     - break;
287     -
288     - case MLX5E_TT_IPV6_IPSEC_ESP:
289     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
290     - MLX5_L3_PROT_TYPE_IPV6);
291     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
292     - MLX5_HASH_IP_IPSEC_SPI);
293     - break;
294     -
295     - case MLX5E_TT_IPV4:
296     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
297     - MLX5_L3_PROT_TYPE_IPV4);
298     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
299     - MLX5_HASH_IP);
300     - break;
301     -
302     - case MLX5E_TT_IPV6:
303     - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
304     - MLX5_L3_PROT_TYPE_IPV6);
305     - MLX5_SET(rx_hash_field_select, hfso, selected_fields,
306     - MLX5_HASH_IP);
307     - break;
308     - default:
309     - WARN_ONCE(true,
310     - "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
311     - }
312     + mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
313     }
314    
315     static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
316     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
317     index 914e5466f729..7e20e4bc4cc7 100644
318     --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
319     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
320     @@ -1110,9 +1110,8 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
321     return rule;
322     }
323     rule = add_rule_fte(fte, fg, dest);
324     - unlock_ref_node(&fte->node);
325     if (IS_ERR(rule))
326     - goto unlock_fg;
327     + goto unlock_fte;
328     else
329     goto add_rule;
330     }
331     @@ -1130,6 +1129,7 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
332     goto unlock_fg;
333     }
334     tree_init_node(&fte->node, 0, del_fte);
335     + nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
336     rule = add_rule_fte(fte, fg, dest);
337     if (IS_ERR(rule)) {
338     kfree(fte);
339     @@ -1142,6 +1142,8 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
340     list_add(&fte->node.list, prev);
341     add_rule:
342     tree_add_node(&rule->node, &fte->node);
343     +unlock_fte:
344     + unlock_ref_node(&fte->node);
345     unlock_fg:
346     unlock_ref_node(&fg->node);
347     return rule;
348     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
349     index 7df4ff158f3d..7d19029e2564 100644
350     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
351     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
352     @@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
353     {
354     void __iomem *ioaddr = hw->pcsr;
355     u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
356     + u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
357     int ret = 0;
358    
359     + /* Discard masked bits */
360     + intr_status &= ~intr_mask;
361     +
362     /* Not used events (e.g. MMC interrupts) are not handled. */
363     if ((intr_status & GMAC_INT_STATUS_MMCTIS))
364     x->mmc_tx_irq_n++;
365     diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
366     index 6255973e3dda..1b65f0f975cf 100644
367     --- a/drivers/net/loopback.c
368     +++ b/drivers/net/loopback.c
369     @@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev)
370     {
371     dev->mtu = 64 * 1024;
372     dev->hard_header_len = ETH_HLEN; /* 14 */
373     + dev->min_header_len = ETH_HLEN; /* 14 */
374     dev->addr_len = ETH_ALEN; /* 6 */
375     dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
376     dev->flags = IFF_LOOPBACK;
377     diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
378     index 6f38daf2d978..adea6f5a4d71 100644
379     --- a/drivers/net/macvtap.c
380     +++ b/drivers/net/macvtap.c
381     @@ -682,7 +682,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
382     ssize_t n;
383    
384     if (q->flags & IFF_VNET_HDR) {
385     - vnet_hdr_len = q->vnet_hdr_sz;
386     + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
387    
388     err = -EINVAL;
389     if (len < vnet_hdr_len)
390     @@ -822,7 +822,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
391    
392     if (q->flags & IFF_VNET_HDR) {
393     struct virtio_net_hdr vnet_hdr;
394     - vnet_hdr_len = q->vnet_hdr_sz;
395     + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
396     if (iov_iter_count(iter) < vnet_hdr_len)
397     return -EINVAL;
398    
399     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
400     index 18402d79539e..b31aca8146bb 100644
401     --- a/drivers/net/tun.c
402     +++ b/drivers/net/tun.c
403     @@ -1187,9 +1187,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
404     }
405    
406     if (tun->flags & IFF_VNET_HDR) {
407     - if (len < tun->vnet_hdr_sz)
408     + int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
409     +
410     + if (len < vnet_hdr_sz)
411     return -EINVAL;
412     - len -= tun->vnet_hdr_sz;
413     + len -= vnet_hdr_sz;
414    
415     n = copy_from_iter(&gso, sizeof(gso), from);
416     if (n != sizeof(gso))
417     @@ -1201,7 +1203,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
418    
419     if (tun16_to_cpu(tun, gso.hdr_len) > len)
420     return -EINVAL;
421     - iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
422     + iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
423     }
424    
425     if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
426     @@ -1348,7 +1350,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
427     vlan_hlen = VLAN_HLEN;
428    
429     if (tun->flags & IFF_VNET_HDR)
430     - vnet_hdr_sz = tun->vnet_hdr_sz;
431     + vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
432    
433     total = skb->len + vlan_hlen + vnet_hdr_sz;
434    
435     diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
436     index d9ca05d3ac8e..40864159021d 100644
437     --- a/drivers/net/usb/catc.c
438     +++ b/drivers/net/usb/catc.c
439     @@ -777,7 +777,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
440     struct net_device *netdev;
441     struct catc *catc;
442     u8 broadcast[ETH_ALEN];
443     - int i, pktsz;
444     + int pktsz, ret;
445    
446     if (usb_set_interface(usbdev,
447     intf->altsetting->desc.bInterfaceNumber, 1)) {
448     @@ -812,12 +812,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
449     if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
450     (!catc->rx_urb) || (!catc->irq_urb)) {
451     dev_err(&intf->dev, "No free urbs available.\n");
452     - usb_free_urb(catc->ctrl_urb);
453     - usb_free_urb(catc->tx_urb);
454     - usb_free_urb(catc->rx_urb);
455     - usb_free_urb(catc->irq_urb);
456     - free_netdev(netdev);
457     - return -ENOMEM;
458     + ret = -ENOMEM;
459     + goto fail_free;
460     }
461    
462     /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
463     @@ -845,15 +841,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
464     catc->irq_buf, 2, catc_irq_done, catc, 1);
465    
466     if (!catc->is_f5u011) {
467     + u32 *buf;
468     + int i;
469     +
470     dev_dbg(dev, "Checking memory size\n");
471    
472     - i = 0x12345678;
473     - catc_write_mem(catc, 0x7a80, &i, 4);
474     - i = 0x87654321;
475     - catc_write_mem(catc, 0xfa80, &i, 4);
476     - catc_read_mem(catc, 0x7a80, &i, 4);
477     + buf = kmalloc(4, GFP_KERNEL);
478     + if (!buf) {
479     + ret = -ENOMEM;
480     + goto fail_free;
481     + }
482     +
483     + *buf = 0x12345678;
484     + catc_write_mem(catc, 0x7a80, buf, 4);
485     + *buf = 0x87654321;
486     + catc_write_mem(catc, 0xfa80, buf, 4);
487     + catc_read_mem(catc, 0x7a80, buf, 4);
488    
489     - switch (i) {
490     + switch (*buf) {
491     case 0x12345678:
492     catc_set_reg(catc, TxBufCount, 8);
493     catc_set_reg(catc, RxBufCount, 32);
494     @@ -868,6 +873,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
495     dev_dbg(dev, "32k Memory\n");
496     break;
497     }
498     +
499     + kfree(buf);
500    
501     dev_dbg(dev, "Getting MAC from SEEROM.\n");
502    
503     @@ -914,16 +921,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
504     usb_set_intfdata(intf, catc);
505    
506     SET_NETDEV_DEV(netdev, &intf->dev);
507     - if (register_netdev(netdev) != 0) {
508     - usb_set_intfdata(intf, NULL);
509     - usb_free_urb(catc->ctrl_urb);
510     - usb_free_urb(catc->tx_urb);
511     - usb_free_urb(catc->rx_urb);
512     - usb_free_urb(catc->irq_urb);
513     - free_netdev(netdev);
514     - return -EIO;
515     - }
516     + ret = register_netdev(netdev);
517     + if (ret)
518     + goto fail_clear_intfdata;
519     +
520     return 0;
521     +
522     +fail_clear_intfdata:
523     + usb_set_intfdata(intf, NULL);
524     +fail_free:
525     + usb_free_urb(catc->ctrl_urb);
526     + usb_free_urb(catc->tx_urb);
527     + usb_free_urb(catc->rx_urb);
528     + usb_free_urb(catc->irq_urb);
529     + free_netdev(netdev);
530     + return ret;
531     }
532    
533     static void catc_disconnect(struct usb_interface *intf)
534     diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
535     index 1434e5dd5f9c..ee40ac23507a 100644
536     --- a/drivers/net/usb/pegasus.c
537     +++ b/drivers/net/usb/pegasus.c
538     @@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
539    
540     static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
541     {
542     + u8 *buf;
543     int ret;
544    
545     + buf = kmalloc(size, GFP_NOIO);
546     + if (!buf)
547     + return -ENOMEM;
548     +
549     ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
550     PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
551     - indx, data, size, 1000);
552     + indx, buf, size, 1000);
553     if (ret < 0)
554     netif_dbg(pegasus, drv, pegasus->net,
555     "%s returned %d\n", __func__, ret);
556     + else if (ret <= size)
557     + memcpy(data, buf, ret);
558     + kfree(buf);
559     return ret;
560     }
561    
562     -static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
563     +static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
564     + const void *data)
565     {
566     + u8 *buf;
567     int ret;
568    
569     + buf = kmemdup(data, size, GFP_NOIO);
570     + if (!buf)
571     + return -ENOMEM;
572     +
573     ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
574     PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
575     - indx, data, size, 100);
576     + indx, buf, size, 100);
577     if (ret < 0)
578     netif_dbg(pegasus, drv, pegasus->net,
579     "%s returned %d\n", __func__, ret);
580     + kfree(buf);
581     return ret;
582     }
583    
584     static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
585     {
586     + u8 *buf;
587     int ret;
588    
589     + buf = kmemdup(&data, 1, GFP_NOIO);
590     + if (!buf)
591     + return -ENOMEM;
592     +
593     ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
594     PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
595     - indx, &data, 1, 1000);
596     + indx, buf, 1, 1000);
597     if (ret < 0)
598     netif_dbg(pegasus, drv, pegasus->net,
599     "%s returned %d\n", __func__, ret);
600     + kfree(buf);
601     return ret;
602     }
603    
604     diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
605     index 7c72bfac89d0..dc4f7ea95c9b 100644
606     --- a/drivers/net/usb/rtl8150.c
607     +++ b/drivers/net/usb/rtl8150.c
608     @@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
609     */
610     static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
611     {
612     - return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
613     - RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
614     - indx, 0, data, size, 500);
615     + void *buf;
616     + int ret;
617     +
618     + buf = kmalloc(size, GFP_NOIO);
619     + if (!buf)
620     + return -ENOMEM;
621     +
622     + ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
623     + RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
624     + indx, 0, buf, size, 500);
625     + if (ret > 0 && ret <= size)
626     + memcpy(data, buf, ret);
627     + kfree(buf);
628     + return ret;
629     }
630    
631     -static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
632     +static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
633     {
634     - return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
635     - RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
636     - indx, 0, data, size, 500);
637     + void *buf;
638     + int ret;
639     +
640     + buf = kmemdup(data, size, GFP_NOIO);
641     + if (!buf)
642     + return -ENOMEM;
643     +
644     + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
645     + RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
646     + indx, 0, buf, size, 500);
647     + kfree(buf);
648     + return ret;
649     }
650    
651     static void async_set_reg_cb(struct urb *urb)
652     diff --git a/include/linux/can/core.h b/include/linux/can/core.h
653     index a0875001b13c..df08a41d5be5 100644
654     --- a/include/linux/can/core.h
655     +++ b/include/linux/can/core.h
656     @@ -45,10 +45,9 @@ struct can_proto {
657     extern int can_proto_register(const struct can_proto *cp);
658     extern void can_proto_unregister(const struct can_proto *cp);
659    
660     -extern int can_rx_register(struct net_device *dev, canid_t can_id,
661     - canid_t mask,
662     - void (*func)(struct sk_buff *, void *),
663     - void *data, char *ident);
664     +int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
665     + void (*func)(struct sk_buff *, void *),
666     + void *data, char *ident, struct sock *sk);
667    
668     extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
669     canid_t mask,
670     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
671     index d83590ef74a1..bb9b102c15cd 100644
672     --- a/include/linux/netdevice.h
673     +++ b/include/linux/netdevice.h
674     @@ -1508,6 +1508,7 @@ enum netdev_priv_flags {
675     * @mtu: Interface MTU value
676     * @type: Interface hardware type
677     * @hard_header_len: Maximum hardware header length.
678     + * @min_header_len: Minimum hardware header length
679     *
680     * @needed_headroom: Extra headroom the hardware may need, but not in all
681     * cases can this be guaranteed
682     @@ -1728,6 +1729,7 @@ struct net_device {
683     unsigned int mtu;
684     unsigned short type;
685     unsigned short hard_header_len;
686     + unsigned short min_header_len;
687    
688     unsigned short needed_headroom;
689     unsigned short needed_tailroom;
690     @@ -2783,6 +2785,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
691     {
692     if (likely(len >= dev->hard_header_len))
693     return true;
694     + if (len < dev->min_header_len)
695     + return false;
696    
697     if (capable(CAP_SYS_RAWIO)) {
698     memset(ll_header + len, 0, dev->hard_header_len - len);
699     diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
700     index 3ebb168b9afc..a34b141f125f 100644
701     --- a/include/net/cipso_ipv4.h
702     +++ b/include/net/cipso_ipv4.h
703     @@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
704     }
705    
706     for (opt_iter = 6; opt_iter < opt_len;) {
707     + if (opt_iter + 1 == opt_len) {
708     + err_offset = opt_iter;
709     + goto out;
710     + }
711     tag_len = opt[opt_iter + 1];
712     if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
713     err_offset = opt_iter + 1;
714     diff --git a/include/net/ipv6.h b/include/net/ipv6.h
715     index f11ca837361b..7f15f95625e7 100644
716     --- a/include/net/ipv6.h
717     +++ b/include/net/ipv6.h
718     @@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
719     * upper-layer output functions
720     */
721     int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
722     - struct ipv6_txoptions *opt, int tclass);
723     + __u32 mark, struct ipv6_txoptions *opt, int tclass);
724    
725     int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
726    
727     diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
728     index fc7c0dbdd1ff..3f40132e2129 100644
729     --- a/include/net/lwtunnel.h
730     +++ b/include/net/lwtunnel.h
731     @@ -176,7 +176,10 @@ static inline int lwtunnel_valid_encap_type(u16 encap_type)
732     }
733     static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
734     {
735     - return -EOPNOTSUPP;
736     + /* return 0 since we are not walking attr looking for
737     + * RTA_ENCAP_TYPE attribute on nexthops.
738     + */
739     + return 0;
740     }
741    
742     static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
743     diff --git a/net/can/af_can.c b/net/can/af_can.c
744     index 1108079d934f..5488e4a6ccd0 100644
745     --- a/net/can/af_can.c
746     +++ b/net/can/af_can.c
747     @@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
748     * @func: callback function on filter match
749     * @data: returned parameter for callback function
750     * @ident: string for calling module identification
751     + * @sk: socket pointer (might be NULL)
752     *
753     * Description:
754     * Invokes the callback function with the received sk_buff and the given
755     @@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
756     */
757     int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
758     void (*func)(struct sk_buff *, void *), void *data,
759     - char *ident)
760     + char *ident, struct sock *sk)
761     {
762     struct receiver *r;
763     struct hlist_head *rl;
764     @@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
765     r->func = func;
766     r->data = data;
767     r->ident = ident;
768     + r->sk = sk;
769    
770     hlist_add_head_rcu(&r->list, rl);
771     d->entries++;
772     @@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
773     static void can_rx_delete_receiver(struct rcu_head *rp)
774     {
775     struct receiver *r = container_of(rp, struct receiver, rcu);
776     + struct sock *sk = r->sk;
777    
778     kmem_cache_free(rcv_cache, r);
779     + if (sk)
780     + sock_put(sk);
781     }
782    
783     /**
784     @@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
785     spin_unlock(&can_rcvlists_lock);
786    
787     /* schedule the receiver item for deletion */
788     - if (r)
789     + if (r) {
790     + if (r->sk)
791     + sock_hold(r->sk);
792     call_rcu(&r->rcu, can_rx_delete_receiver);
793     + }
794     }
795     EXPORT_SYMBOL(can_rx_unregister);
796    
797     diff --git a/net/can/af_can.h b/net/can/af_can.h
798     index fca0fe9fc45a..b86f5129e838 100644
799     --- a/net/can/af_can.h
800     +++ b/net/can/af_can.h
801     @@ -50,13 +50,14 @@
802    
803     struct receiver {
804     struct hlist_node list;
805     - struct rcu_head rcu;
806     canid_t can_id;
807     canid_t mask;
808     unsigned long matches;
809     void (*func)(struct sk_buff *, void *);
810     void *data;
811     char *ident;
812     + struct sock *sk;
813     + struct rcu_head rcu;
814     };
815    
816     #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
817     diff --git a/net/can/bcm.c b/net/can/bcm.c
818     index 5e9ed5ec2860..e4f694dfcf83 100644
819     --- a/net/can/bcm.c
820     +++ b/net/can/bcm.c
821     @@ -1225,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
822     err = can_rx_register(dev, op->can_id,
823     REGMASK(op->can_id),
824     bcm_rx_handler, op,
825     - "bcm");
826     + "bcm", sk);
827    
828     op->rx_reg_dev = dev;
829     dev_put(dev);
830     @@ -1234,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
831     } else
832     err = can_rx_register(NULL, op->can_id,
833     REGMASK(op->can_id),
834     - bcm_rx_handler, op, "bcm");
835     + bcm_rx_handler, op, "bcm", sk);
836     if (err) {
837     /* this bcm rx op is broken -> remove it */
838     list_del(&op->list);
839     diff --git a/net/can/gw.c b/net/can/gw.c
840     index 455168718c2e..77c8af4047ef 100644
841     --- a/net/can/gw.c
842     +++ b/net/can/gw.c
843     @@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
844     {
845     return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
846     gwj->ccgw.filter.can_mask, can_can_gw_rcv,
847     - gwj, "gw");
848     + gwj, "gw", NULL);
849     }
850    
851     static inline void cgw_unregister_filter(struct cgw_job *gwj)
852     diff --git a/net/can/raw.c b/net/can/raw.c
853     index b075f028d7e2..6dc546a06673 100644
854     --- a/net/can/raw.c
855     +++ b/net/can/raw.c
856     @@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
857     for (i = 0; i < count; i++) {
858     err = can_rx_register(dev, filter[i].can_id,
859     filter[i].can_mask,
860     - raw_rcv, sk, "raw");
861     + raw_rcv, sk, "raw", sk);
862     if (err) {
863     /* clean up successfully registered filters */
864     while (--i >= 0)
865     @@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
866    
867     if (err_mask)
868     err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
869     - raw_rcv, sk, "raw");
870     + raw_rcv, sk, "raw", sk);
871    
872     return err;
873     }
874     diff --git a/net/core/dev.c b/net/core/dev.c
875     index df51c50927ab..60b0a6049e72 100644
876     --- a/net/core/dev.c
877     +++ b/net/core/dev.c
878     @@ -1696,24 +1696,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
879    
880     static struct static_key netstamp_needed __read_mostly;
881     #ifdef HAVE_JUMP_LABEL
882     -/* We are not allowed to call static_key_slow_dec() from irq context
883     - * If net_disable_timestamp() is called from irq context, defer the
884     - * static_key_slow_dec() calls.
885     - */
886     static atomic_t netstamp_needed_deferred;
887     -#endif
888     -
889     -void net_enable_timestamp(void)
890     +static void netstamp_clear(struct work_struct *work)
891     {
892     -#ifdef HAVE_JUMP_LABEL
893     int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
894    
895     - if (deferred) {
896     - while (--deferred)
897     - static_key_slow_dec(&netstamp_needed);
898     - return;
899     - }
900     + while (deferred--)
901     + static_key_slow_dec(&netstamp_needed);
902     +}
903     +static DECLARE_WORK(netstamp_work, netstamp_clear);
904     #endif
905     +
906     +void net_enable_timestamp(void)
907     +{
908     static_key_slow_inc(&netstamp_needed);
909     }
910     EXPORT_SYMBOL(net_enable_timestamp);
911     @@ -1721,12 +1716,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
912     void net_disable_timestamp(void)
913     {
914     #ifdef HAVE_JUMP_LABEL
915     - if (in_interrupt()) {
916     - atomic_inc(&netstamp_needed_deferred);
917     - return;
918     - }
919     -#endif
920     + /* net_disable_timestamp() can be called from non process context */
921     + atomic_inc(&netstamp_needed_deferred);
922     + schedule_work(&netstamp_work);
923     +#else
924     static_key_slow_dec(&netstamp_needed);
925     +#endif
926     }
927     EXPORT_SYMBOL(net_disable_timestamp);
928    
929     diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
930     index 715e5d1dc107..7506c03a7db9 100644
931     --- a/net/dccp/ipv6.c
932     +++ b/net/dccp/ipv6.c
933     @@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
934     opt = ireq->ipv6_opt;
935     if (!opt)
936     opt = rcu_dereference(np->opt);
937     - err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
938     + err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
939     rcu_read_unlock();
940     err = net_xmit_eval(err);
941     }
942     @@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
943     dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
944     if (!IS_ERR(dst)) {
945     skb_dst_set(skb, dst);
946     - ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
947     + ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
948     DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
949     DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
950     return;
951     diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
952     index da3862124545..0f99297b2fb3 100644
953     --- a/net/dsa/dsa2.c
954     +++ b/net/dsa/dsa2.c
955     @@ -273,6 +273,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index,
956     if (err) {
957     dev_warn(ds->dev, "Failed to create slave %d: %d\n",
958     index, err);
959     + ds->ports[index].netdev = NULL;
960     return err;
961     }
962    
963     diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
964     index 02acfff36028..24d7aff8db1a 100644
965     --- a/net/ethernet/eth.c
966     +++ b/net/ethernet/eth.c
967     @@ -356,6 +356,7 @@ void ether_setup(struct net_device *dev)
968     dev->header_ops = &eth_header_ops;
969     dev->type = ARPHRD_ETHER;
970     dev->hard_header_len = ETH_HLEN;
971     + dev->min_header_len = ETH_HLEN;
972     dev->mtu = ETH_DATA_LEN;
973     dev->addr_len = ETH_ALEN;
974     dev->tx_queue_len = 1000; /* Ethernet wants good queues */
975     diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
976     index 72d6f056d863..ae206163c273 100644
977     --- a/net/ipv4/cipso_ipv4.c
978     +++ b/net/ipv4/cipso_ipv4.c
979     @@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
980     goto validate_return_locked;
981     }
982    
983     + if (opt_iter + 1 == opt_len) {
984     + err_offset = opt_iter;
985     + goto validate_return_locked;
986     + }
987     tag_len = tag[1];
988     if (tag_len > (opt_len - opt_iter)) {
989     err_offset = opt_iter + 1;
990     diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
991     index 32a08bc010bf..1bc623d7f754 100644
992     --- a/net/ipv4/igmp.c
993     +++ b/net/ipv4/igmp.c
994     @@ -1172,6 +1172,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
995     psf->sf_crcount = im->crcount;
996     }
997     in_dev_put(pmc->interface);
998     + kfree(pmc);
999     }
1000     spin_unlock_bh(&im->lock);
1001     }
1002     diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1003     index 877bdb02e887..e5c1dbef3626 100644
1004     --- a/net/ipv4/ip_output.c
1005     +++ b/net/ipv4/ip_output.c
1006     @@ -1606,6 +1606,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1007     sk->sk_protocol = ip_hdr(skb)->protocol;
1008     sk->sk_bound_dev_if = arg->bound_dev_if;
1009     sk->sk_sndbuf = sysctl_wmem_default;
1010     + sk->sk_mark = fl4.flowi4_mark;
1011     err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1012     len, 0, &ipc, &rt, MSG_DONTWAIT);
1013     if (unlikely(err)) {
1014     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1015     index f226f4086e05..65336f38a5d8 100644
1016     --- a/net/ipv4/ip_sockglue.c
1017     +++ b/net/ipv4/ip_sockglue.c
1018     @@ -1215,7 +1215,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1019     pktinfo->ipi_ifindex = 0;
1020     pktinfo->ipi_spec_dst.s_addr = 0;
1021     }
1022     - skb_dst_drop(skb);
1023     + /* We need to keep the dst for __ip_options_echo()
1024     + * We could restrict the test to opt.ts_needtime || opt.srr,
1025     + * but the following is good enough as IP options are not often used.
1026     + */
1027     + if (unlikely(IPCB(skb)->opt.optlen))
1028     + skb_dst_force(skb);
1029     + else
1030     + skb_dst_drop(skb);
1031     }
1032    
1033     int ip_setsockopt(struct sock *sk, int level,
1034     diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
1035     index 96b8e2b95731..105c0748c52f 100644
1036     --- a/net/ipv4/ping.c
1037     +++ b/net/ipv4/ping.c
1038     @@ -642,6 +642,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
1039     {
1040     struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
1041    
1042     + if (!skb)
1043     + return 0;
1044     pfh->wcheck = csum_partial((char *)&pfh->icmph,
1045     sizeof(struct icmphdr), pfh->wcheck);
1046     pfh->icmph.checksum = csum_fold(pfh->wcheck);
1047     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1048     index 814af89c1bd3..6a90a0e130dc 100644
1049     --- a/net/ipv4/tcp.c
1050     +++ b/net/ipv4/tcp.c
1051     @@ -772,6 +772,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
1052     ret = -EAGAIN;
1053     break;
1054     }
1055     + /* if __tcp_splice_read() got nothing while we have
1056     + * an skb in receive queue, we do not want to loop.
1057     + * This might happen with URG data.
1058     + */
1059     + if (!skb_queue_empty(&sk->sk_receive_queue))
1060     + break;
1061     sk_wait_data(sk, &timeo, NULL);
1062     if (signal_pending(current)) {
1063     ret = sock_intr_errno(timeo);
1064     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1065     index 896e9dfbdb5c..65d6189140bc 100644
1066     --- a/net/ipv4/tcp_output.c
1067     +++ b/net/ipv4/tcp_output.c
1068     @@ -2436,9 +2436,11 @@ u32 __tcp_select_window(struct sock *sk)
1069     int full_space = min_t(int, tp->window_clamp, allowed_space);
1070     int window;
1071    
1072     - if (mss > full_space)
1073     + if (unlikely(mss > full_space)) {
1074     mss = full_space;
1075     -
1076     + if (mss <= 0)
1077     + return 0;
1078     + }
1079     if (free_space < (full_space >> 1)) {
1080     icsk->icsk_ack.quick = 0;
1081    
1082     diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
1083     index 532c3ef282c5..798a0950e9a6 100644
1084     --- a/net/ipv6/inet6_connection_sock.c
1085     +++ b/net/ipv6/inet6_connection_sock.c
1086     @@ -173,7 +173,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
1087     /* Restore final destination back after routing done */
1088     fl6.daddr = sk->sk_v6_daddr;
1089    
1090     - res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
1091     + res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
1092     np->tclass);
1093     rcu_read_unlock();
1094     return res;
1095     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
1096     index d7d6d3ae0b3b..0a5922055da2 100644
1097     --- a/net/ipv6/ip6_gre.c
1098     +++ b/net/ipv6/ip6_gre.c
1099     @@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
1100    
1101    
1102     static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1103     - u8 type, u8 code, int offset, __be32 info)
1104     + u8 type, u8 code, int offset, __be32 info)
1105     {
1106     - const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
1107     - __be16 *p = (__be16 *)(skb->data + offset);
1108     - int grehlen = offset + 4;
1109     + const struct gre_base_hdr *greh;
1110     + const struct ipv6hdr *ipv6h;
1111     + int grehlen = sizeof(*greh);
1112     struct ip6_tnl *t;
1113     + int key_off = 0;
1114     __be16 flags;
1115     + __be32 key;
1116    
1117     - flags = p[0];
1118     - if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
1119     - if (flags&(GRE_VERSION|GRE_ROUTING))
1120     - return;
1121     - if (flags&GRE_KEY) {
1122     - grehlen += 4;
1123     - if (flags&GRE_CSUM)
1124     - grehlen += 4;
1125     - }
1126     + if (!pskb_may_pull(skb, offset + grehlen))
1127     + return;
1128     + greh = (const struct gre_base_hdr *)(skb->data + offset);
1129     + flags = greh->flags;
1130     + if (flags & (GRE_VERSION | GRE_ROUTING))
1131     + return;
1132     + if (flags & GRE_CSUM)
1133     + grehlen += 4;
1134     + if (flags & GRE_KEY) {
1135     + key_off = grehlen + offset;
1136     + grehlen += 4;
1137     }
1138    
1139     - /* If only 8 bytes returned, keyed message will be dropped here */
1140     - if (!pskb_may_pull(skb, grehlen))
1141     + if (!pskb_may_pull(skb, offset + grehlen))
1142     return;
1143     ipv6h = (const struct ipv6hdr *)skb->data;
1144     - p = (__be16 *)(skb->data + offset);
1145     + greh = (const struct gre_base_hdr *)(skb->data + offset);
1146     + key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
1147    
1148     t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
1149     - flags & GRE_KEY ?
1150     - *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
1151     - p[1]);
1152     + key, greh->protocol);
1153     if (!t)
1154     return;
1155    
1156     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1157     index 59eb4ed99ce8..9a87bfb2ec16 100644
1158     --- a/net/ipv6/ip6_output.c
1159     +++ b/net/ipv6/ip6_output.c
1160     @@ -163,7 +163,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1161     * which are using proper atomic operations or spinlocks.
1162     */
1163     int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
1164     - struct ipv6_txoptions *opt, int tclass)
1165     + __u32 mark, struct ipv6_txoptions *opt, int tclass)
1166     {
1167     struct net *net = sock_net(sk);
1168     const struct ipv6_pinfo *np = inet6_sk(sk);
1169     @@ -230,7 +230,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
1170    
1171     skb->protocol = htons(ETH_P_IPV6);
1172     skb->priority = sk->sk_priority;
1173     - skb->mark = sk->sk_mark;
1174     + skb->mark = mark;
1175    
1176     mtu = dst_mtu(dst);
1177     if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
1178     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1179     index f95437f1087c..f6ba45242851 100644
1180     --- a/net/ipv6/ip6_tunnel.c
1181     +++ b/net/ipv6/ip6_tunnel.c
1182     @@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
1183    
1184     __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
1185     {
1186     - const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
1187     - __u8 nexthdr = ipv6h->nexthdr;
1188     - __u16 off = sizeof(*ipv6h);
1189     + const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
1190     + unsigned int nhoff = raw - skb->data;
1191     + unsigned int off = nhoff + sizeof(*ipv6h);
1192     + u8 next, nexthdr = ipv6h->nexthdr;
1193    
1194     while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
1195     - __u16 optlen = 0;
1196     struct ipv6_opt_hdr *hdr;
1197     - if (raw + off + sizeof(*hdr) > skb->data &&
1198     - !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
1199     + u16 optlen;
1200     +
1201     + if (!pskb_may_pull(skb, off + sizeof(*hdr)))
1202     break;
1203    
1204     - hdr = (struct ipv6_opt_hdr *) (raw + off);
1205     + hdr = (struct ipv6_opt_hdr *)(skb->data + off);
1206     if (nexthdr == NEXTHDR_FRAGMENT) {
1207     struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
1208     if (frag_hdr->frag_off)
1209     @@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
1210     } else {
1211     optlen = ipv6_optlen(hdr);
1212     }
1213     + /* cache hdr->nexthdr, since pskb_may_pull() might
1214     + * invalidate hdr
1215     + */
1216     + next = hdr->nexthdr;
1217     if (nexthdr == NEXTHDR_DEST) {
1218     - __u16 i = off + 2;
1219     + u16 i = 2;
1220     +
1221     + /* Remember : hdr is no longer valid at this point. */
1222     + if (!pskb_may_pull(skb, off + optlen))
1223     + break;
1224     +
1225     while (1) {
1226     struct ipv6_tlv_tnl_enc_lim *tel;
1227    
1228     /* No more room for encapsulation limit */
1229     - if (i + sizeof (*tel) > off + optlen)
1230     + if (i + sizeof(*tel) > optlen)
1231     break;
1232    
1233     - tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
1234     + tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
1235     /* return index of option if found and valid */
1236     if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
1237     tel->length == 1)
1238     - return i;
1239     + return i + off - nhoff;
1240     /* else jump to next option */
1241     if (tel->type)
1242     i += tel->length + 2;
1243     @@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
1244     i++;
1245     }
1246     }
1247     - nexthdr = hdr->nexthdr;
1248     + nexthdr = next;
1249     off += optlen;
1250     }
1251     return 0;
1252     diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1253     index 14a3903f1c82..1bdc703cb966 100644
1254     --- a/net/ipv6/mcast.c
1255     +++ b/net/ipv6/mcast.c
1256     @@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
1257     static void mld_ifc_timer_expire(unsigned long data);
1258     static void mld_ifc_event(struct inet6_dev *idev);
1259     static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
1260     -static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
1261     +static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
1262     static void mld_clear_delrec(struct inet6_dev *idev);
1263     static bool mld_in_v1_mode(const struct inet6_dev *idev);
1264     static int sf_setstate(struct ifmcaddr6 *pmc);
1265     @@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
1266     dev_mc_del(dev, buf);
1267     }
1268    
1269     - if (mc->mca_flags & MAF_NOREPORT)
1270     - goto done;
1271     spin_unlock_bh(&mc->mca_lock);
1272     + if (mc->mca_flags & MAF_NOREPORT)
1273     + return;
1274    
1275     if (!mc->idev->dead)
1276     igmp6_leave_group(mc);
1277     @@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
1278     spin_lock_bh(&mc->mca_lock);
1279     if (del_timer(&mc->mca_timer))
1280     atomic_dec(&mc->mca_refcnt);
1281     -done:
1282     - ip6_mc_clear_src(mc);
1283     spin_unlock_bh(&mc->mca_lock);
1284     }
1285    
1286     @@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
1287     spin_unlock_bh(&idev->mc_lock);
1288     }
1289    
1290     -static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
1291     +static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
1292     {
1293     struct ifmcaddr6 *pmc, *pmc_prev;
1294     - struct ip6_sf_list *psf, *psf_next;
1295     + struct ip6_sf_list *psf;
1296     + struct in6_addr *pmca = &im->mca_addr;
1297    
1298     spin_lock_bh(&idev->mc_lock);
1299     pmc_prev = NULL;
1300     @@ -768,14 +767,21 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
1301     }
1302     spin_unlock_bh(&idev->mc_lock);
1303    
1304     + spin_lock_bh(&im->mca_lock);
1305     if (pmc) {
1306     - for (psf = pmc->mca_tomb; psf; psf = psf_next) {
1307     - psf_next = psf->sf_next;
1308     - kfree(psf);
1309     + im->idev = pmc->idev;
1310     + im->mca_crcount = idev->mc_qrv;
1311     + im->mca_sfmode = pmc->mca_sfmode;
1312     + if (pmc->mca_sfmode == MCAST_INCLUDE) {
1313     + im->mca_tomb = pmc->mca_tomb;
1314     + im->mca_sources = pmc->mca_sources;
1315     + for (psf = im->mca_sources; psf; psf = psf->sf_next)
1316     + psf->sf_crcount = im->mca_crcount;
1317     }
1318     in6_dev_put(pmc->idev);
1319     kfree(pmc);
1320     }
1321     + spin_unlock_bh(&im->mca_lock);
1322     }
1323    
1324     static void mld_clear_delrec(struct inet6_dev *idev)
1325     @@ -904,7 +910,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
1326     mca_get(mc);
1327     write_unlock_bh(&idev->lock);
1328    
1329     - mld_del_delrec(idev, &mc->mca_addr);
1330     + mld_del_delrec(idev, mc);
1331     igmp6_group_added(mc);
1332     ma_put(mc);
1333     return 0;
1334     @@ -927,6 +933,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
1335     write_unlock_bh(&idev->lock);
1336    
1337     igmp6_group_dropped(ma);
1338     + ip6_mc_clear_src(ma);
1339    
1340     ma_put(ma);
1341     return 0;
1342     @@ -2501,15 +2508,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
1343     /* Withdraw multicast list */
1344    
1345     read_lock_bh(&idev->lock);
1346     - mld_ifc_stop_timer(idev);
1347     - mld_gq_stop_timer(idev);
1348     - mld_dad_stop_timer(idev);
1349    
1350     for (i = idev->mc_list; i; i = i->next)
1351     igmp6_group_dropped(i);
1352     - read_unlock_bh(&idev->lock);
1353    
1354     - mld_clear_delrec(idev);
1355     + /* Should stop timer after group drop. or we will
1356     + * start timer again in mld_ifc_event()
1357     + */
1358     + mld_ifc_stop_timer(idev);
1359     + mld_gq_stop_timer(idev);
1360     + mld_dad_stop_timer(idev);
1361     + read_unlock_bh(&idev->lock);
1362     }
1363    
1364     static void ipv6_mc_reset(struct inet6_dev *idev)
1365     @@ -2531,8 +2540,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
1366    
1367     read_lock_bh(&idev->lock);
1368     ipv6_mc_reset(idev);
1369     - for (i = idev->mc_list; i; i = i->next)
1370     + for (i = idev->mc_list; i; i = i->next) {
1371     + mld_del_delrec(idev, i);
1372     igmp6_group_added(i);
1373     + }
1374     read_unlock_bh(&idev->lock);
1375     }
1376    
1377     @@ -2565,6 +2576,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
1378    
1379     /* Deactivate timers */
1380     ipv6_mc_down(idev);
1381     + mld_clear_delrec(idev);
1382    
1383     /* Delete all-nodes address. */
1384     /* We cannot call ipv6_dev_mc_dec() directly, our caller in
1385     @@ -2579,11 +2591,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
1386     write_lock_bh(&idev->lock);
1387     while ((i = idev->mc_list) != NULL) {
1388     idev->mc_list = i->next;
1389     - write_unlock_bh(&idev->lock);
1390    
1391     - igmp6_group_dropped(i);
1392     + write_unlock_bh(&idev->lock);
1393     ma_put(i);
1394     -
1395     write_lock_bh(&idev->lock);
1396     }
1397     write_unlock_bh(&idev->lock);
1398     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1399     index b1cdf8009d29..40d740572354 100644
1400     --- a/net/ipv6/sit.c
1401     +++ b/net/ipv6/sit.c
1402     @@ -1390,6 +1390,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
1403     err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1404     if (err) {
1405     free_percpu(dev->tstats);
1406     + dev->tstats = NULL;
1407     return err;
1408     }
1409    
1410     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1411     index b9f1fee9a886..667396536feb 100644
1412     --- a/net/ipv6/tcp_ipv6.c
1413     +++ b/net/ipv6/tcp_ipv6.c
1414     @@ -467,7 +467,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
1415     opt = ireq->ipv6_opt;
1416     if (!opt)
1417     opt = rcu_dereference(np->opt);
1418     - err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
1419     + err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
1420     rcu_read_unlock();
1421     err = net_xmit_eval(err);
1422     }
1423     @@ -837,7 +837,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
1424     dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
1425     if (!IS_ERR(dst)) {
1426     skb_dst_set(buff, dst);
1427     - ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
1428     + ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
1429     TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1430     if (rst)
1431     TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
1432     @@ -987,6 +987,16 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1433     return 0; /* don't send reset */
1434     }
1435    
1436     +static void tcp_v6_restore_cb(struct sk_buff *skb)
1437     +{
1438     + /* We need to move header back to the beginning if xfrm6_policy_check()
1439     + * and tcp_v6_fill_cb() are going to be called again.
1440     + * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1441     + */
1442     + memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1443     + sizeof(struct inet6_skb_parm));
1444     +}
1445     +
1446     static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1447     struct request_sock *req,
1448     struct dst_entry *dst,
1449     @@ -1178,8 +1188,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1450     sk_gfp_mask(sk, GFP_ATOMIC));
1451     consume_skb(ireq->pktopts);
1452     ireq->pktopts = NULL;
1453     - if (newnp->pktoptions)
1454     + if (newnp->pktoptions) {
1455     + tcp_v6_restore_cb(newnp->pktoptions);
1456     skb_set_owner_r(newnp->pktoptions, newsk);
1457     + }
1458     }
1459     }
1460    
1461     @@ -1194,16 +1206,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1462     return NULL;
1463     }
1464    
1465     -static void tcp_v6_restore_cb(struct sk_buff *skb)
1466     -{
1467     - /* We need to move header back to the beginning if xfrm6_policy_check()
1468     - * and tcp_v6_fill_cb() are going to be called again.
1469     - * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1470     - */
1471     - memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1472     - sizeof(struct inet6_skb_parm));
1473     -}
1474     -
1475     /* The socket must have it's spinlock held when we get
1476     * here, unless it is a TCP_LISTEN socket.
1477     *
1478     diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
1479     index 2599af6378e4..181e755c2fc4 100644
1480     --- a/net/l2tp/l2tp_core.h
1481     +++ b/net/l2tp/l2tp_core.h
1482     @@ -273,6 +273,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
1483     int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
1484     const struct l2tp_nl_cmd_ops *ops);
1485     void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
1486     +int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
1487    
1488     /* Session reference counts. Incremented when code obtains a reference
1489     * to a session.
1490     diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
1491     index 8938b6ba57a0..c0f0750639bd 100644
1492     --- a/net/l2tp/l2tp_ip.c
1493     +++ b/net/l2tp/l2tp_ip.c
1494     @@ -11,6 +11,7 @@
1495    
1496     #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1497    
1498     +#include <asm/ioctls.h>
1499     #include <linux/icmp.h>
1500     #include <linux/module.h>
1501     #include <linux/skbuff.h>
1502     @@ -560,6 +561,30 @@ static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
1503     return err ? err : copied;
1504     }
1505    
1506     +int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
1507     +{
1508     + struct sk_buff *skb;
1509     + int amount;
1510     +
1511     + switch (cmd) {
1512     + case SIOCOUTQ:
1513     + amount = sk_wmem_alloc_get(sk);
1514     + break;
1515     + case SIOCINQ:
1516     + spin_lock_bh(&sk->sk_receive_queue.lock);
1517     + skb = skb_peek(&sk->sk_receive_queue);
1518     + amount = skb ? skb->len : 0;
1519     + spin_unlock_bh(&sk->sk_receive_queue.lock);
1520     + break;
1521     +
1522     + default:
1523     + return -ENOIOCTLCMD;
1524     + }
1525     +
1526     + return put_user(amount, (int __user *)arg);
1527     +}
1528     +EXPORT_SYMBOL(l2tp_ioctl);
1529     +
1530     static struct proto l2tp_ip_prot = {
1531     .name = "L2TP/IP",
1532     .owner = THIS_MODULE,
1533     @@ -568,7 +593,7 @@ static struct proto l2tp_ip_prot = {
1534     .bind = l2tp_ip_bind,
1535     .connect = l2tp_ip_connect,
1536     .disconnect = l2tp_ip_disconnect,
1537     - .ioctl = udp_ioctl,
1538     + .ioctl = l2tp_ioctl,
1539     .destroy = l2tp_ip_destroy_sock,
1540     .setsockopt = ip_setsockopt,
1541     .getsockopt = ip_getsockopt,
1542     diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
1543     index aa821cb639e5..1a65c9a517b6 100644
1544     --- a/net/l2tp/l2tp_ip6.c
1545     +++ b/net/l2tp/l2tp_ip6.c
1546     @@ -729,7 +729,7 @@ static struct proto l2tp_ip6_prot = {
1547     .bind = l2tp_ip6_bind,
1548     .connect = l2tp_ip6_connect,
1549     .disconnect = l2tp_ip6_disconnect,
1550     - .ioctl = udp_ioctl,
1551     + .ioctl = l2tp_ioctl,
1552     .destroy = l2tp_ip6_destroy_sock,
1553     .setsockopt = ipv6_setsockopt,
1554     .getsockopt = ipv6_getsockopt,
1555     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1556     index 94e4a5941d89..458722b938c7 100644
1557     --- a/net/packet/af_packet.c
1558     +++ b/net/packet/af_packet.c
1559     @@ -2813,7 +2813,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1560     struct virtio_net_hdr vnet_hdr = { 0 };
1561     int offset = 0;
1562     struct packet_sock *po = pkt_sk(sk);
1563     - int hlen, tlen;
1564     + int hlen, tlen, linear;
1565     int extra_len = 0;
1566    
1567     /*
1568     @@ -2874,8 +2874,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1569     err = -ENOBUFS;
1570     hlen = LL_RESERVED_SPACE(dev);
1571     tlen = dev->needed_tailroom;
1572     - skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
1573     - __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
1574     + linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
1575     + linear = max(linear, min_t(int, len, dev->hard_header_len));
1576     + skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
1577     msg->msg_flags & MSG_DONTWAIT, &err);
1578     if (skb == NULL)
1579     goto out_unlock;
1580     diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
1581     index f935429bd5ef..b12bc2abea93 100644
1582     --- a/net/sched/cls_matchall.c
1583     +++ b/net/sched/cls_matchall.c
1584     @@ -16,16 +16,11 @@
1585     #include <net/sch_generic.h>
1586     #include <net/pkt_cls.h>
1587    
1588     -struct cls_mall_filter {
1589     +struct cls_mall_head {
1590     struct tcf_exts exts;
1591     struct tcf_result res;
1592     u32 handle;
1593     - struct rcu_head rcu;
1594     u32 flags;
1595     -};
1596     -
1597     -struct cls_mall_head {
1598     - struct cls_mall_filter *filter;
1599     struct rcu_head rcu;
1600     };
1601    
1602     @@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1603     struct tcf_result *res)
1604     {
1605     struct cls_mall_head *head = rcu_dereference_bh(tp->root);
1606     - struct cls_mall_filter *f = head->filter;
1607    
1608     - if (tc_skip_sw(f->flags))
1609     + if (tc_skip_sw(head->flags))
1610     return -1;
1611    
1612     - return tcf_exts_exec(skb, &f->exts, res);
1613     + return tcf_exts_exec(skb, &head->exts, res);
1614     }
1615    
1616     static int mall_init(struct tcf_proto *tp)
1617     {
1618     - struct cls_mall_head *head;
1619     -
1620     - head = kzalloc(sizeof(*head), GFP_KERNEL);
1621     - if (!head)
1622     - return -ENOBUFS;
1623     -
1624     - rcu_assign_pointer(tp->root, head);
1625     -
1626     return 0;
1627     }
1628    
1629     -static void mall_destroy_filter(struct rcu_head *head)
1630     +static void mall_destroy_rcu(struct rcu_head *rcu)
1631     {
1632     - struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
1633     + struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
1634     + rcu);
1635    
1636     - tcf_exts_destroy(&f->exts);
1637     -
1638     - kfree(f);
1639     + tcf_exts_destroy(&head->exts);
1640     + kfree(head);
1641     }
1642    
1643     static int mall_replace_hw_filter(struct tcf_proto *tp,
1644     - struct cls_mall_filter *f,
1645     + struct cls_mall_head *head,
1646     unsigned long cookie)
1647     {
1648     struct net_device *dev = tp->q->dev_queue->dev;
1649     @@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
1650     offload.type = TC_SETUP_MATCHALL;
1651     offload.cls_mall = &mall_offload;
1652     offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
1653     - offload.cls_mall->exts = &f->exts;
1654     + offload.cls_mall->exts = &head->exts;
1655     offload.cls_mall->cookie = cookie;
1656    
1657     return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
1658     @@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
1659     }
1660    
1661     static void mall_destroy_hw_filter(struct tcf_proto *tp,
1662     - struct cls_mall_filter *f,
1663     + struct cls_mall_head *head,
1664     unsigned long cookie)
1665     {
1666     struct net_device *dev = tp->q->dev_queue->dev;
1667     @@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
1668     {
1669     struct cls_mall_head *head = rtnl_dereference(tp->root);
1670     struct net_device *dev = tp->q->dev_queue->dev;
1671     - struct cls_mall_filter *f = head->filter;
1672    
1673     - if (!force && f)
1674     - return false;
1675     + if (!head)
1676     + return true;
1677    
1678     - if (f) {
1679     - if (tc_should_offload(dev, tp, f->flags))
1680     - mall_destroy_hw_filter(tp, f, (unsigned long) f);
1681     + if (tc_should_offload(dev, tp, head->flags))
1682     + mall_destroy_hw_filter(tp, head, (unsigned long) head);
1683    
1684     - call_rcu(&f->rcu, mall_destroy_filter);
1685     - }
1686     - kfree_rcu(head, rcu);
1687     + call_rcu(&head->rcu, mall_destroy_rcu);
1688     return true;
1689     }
1690    
1691     static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
1692     {
1693     - struct cls_mall_head *head = rtnl_dereference(tp->root);
1694     - struct cls_mall_filter *f = head->filter;
1695     -
1696     - if (f && f->handle == handle)
1697     - return (unsigned long) f;
1698     - return 0;
1699     + return 0UL;
1700     }
1701    
1702     static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
1703     @@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
1704     };
1705    
1706     static int mall_set_parms(struct net *net, struct tcf_proto *tp,
1707     - struct cls_mall_filter *f,
1708     + struct cls_mall_head *head,
1709     unsigned long base, struct nlattr **tb,
1710     struct nlattr *est, bool ovr)
1711     {
1712     @@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
1713     return err;
1714    
1715     if (tb[TCA_MATCHALL_CLASSID]) {
1716     - f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
1717     - tcf_bind_filter(tp, &f->res, base);
1718     + head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
1719     + tcf_bind_filter(tp, &head->res, base);
1720     }
1721    
1722     - tcf_exts_change(tp, &f->exts, &e);
1723     + tcf_exts_change(tp, &head->exts, &e);
1724    
1725     return 0;
1726     }
1727     @@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
1728     unsigned long *arg, bool ovr)
1729     {
1730     struct cls_mall_head *head = rtnl_dereference(tp->root);
1731     - struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
1732     struct net_device *dev = tp->q->dev_queue->dev;
1733     - struct cls_mall_filter *f;
1734     struct nlattr *tb[TCA_MATCHALL_MAX + 1];
1735     + struct cls_mall_head *new;
1736     u32 flags = 0;
1737     int err;
1738    
1739     if (!tca[TCA_OPTIONS])
1740     return -EINVAL;
1741    
1742     - if (head->filter)
1743     - return -EBUSY;
1744     -
1745     - if (fold)
1746     - return -EINVAL;
1747     + if (head)
1748     + return -EEXIST;
1749    
1750     err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
1751     tca[TCA_OPTIONS], mall_policy);
1752     @@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
1753     return -EINVAL;
1754     }
1755    
1756     - f = kzalloc(sizeof(*f), GFP_KERNEL);
1757     - if (!f)
1758     + new = kzalloc(sizeof(*new), GFP_KERNEL);
1759     + if (!new)
1760     return -ENOBUFS;
1761    
1762     - tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
1763     + tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
1764    
1765     if (!handle)
1766     handle = 1;
1767     - f->handle = handle;
1768     - f->flags = flags;
1769     + new->handle = handle;
1770     + new->flags = flags;
1771    
1772     - err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
1773     + err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
1774     if (err)
1775     goto errout;
1776    
1777     if (tc_should_offload(dev, tp, flags)) {
1778     - err = mall_replace_hw_filter(tp, f, (unsigned long) f);
1779     + err = mall_replace_hw_filter(tp, new, (unsigned long) new);
1780     if (err) {
1781     if (tc_skip_sw(flags))
1782     goto errout;
1783     @@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
1784     }
1785     }
1786    
1787     - *arg = (unsigned long) f;
1788     - rcu_assign_pointer(head->filter, f);
1789     -
1790     + *arg = (unsigned long) head;
1791     + rcu_assign_pointer(tp->root, new);
1792     + if (head)
1793     + call_rcu(&head->rcu, mall_destroy_rcu);
1794     return 0;
1795    
1796     errout:
1797     - kfree(f);
1798     + kfree(new);
1799     return err;
1800     }
1801    
1802     static int mall_delete(struct tcf_proto *tp, unsigned long arg)
1803     {
1804     - struct cls_mall_head *head = rtnl_dereference(tp->root);
1805     - struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
1806     - struct net_device *dev = tp->q->dev_queue->dev;
1807     -
1808     - if (tc_should_offload(dev, tp, f->flags))
1809     - mall_destroy_hw_filter(tp, f, (unsigned long) f);
1810     -
1811     - RCU_INIT_POINTER(head->filter, NULL);
1812     - tcf_unbind_filter(tp, &f->res);
1813     - call_rcu(&f->rcu, mall_destroy_filter);
1814     - return 0;
1815     + return -EOPNOTSUPP;
1816     }
1817    
1818     static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1819     {
1820     struct cls_mall_head *head = rtnl_dereference(tp->root);
1821     - struct cls_mall_filter *f = head->filter;
1822    
1823     if (arg->count < arg->skip)
1824     goto skip;
1825     - if (arg->fn(tp, (unsigned long) f, arg) < 0)
1826     + if (arg->fn(tp, (unsigned long) head, arg) < 0)
1827     arg->stop = 1;
1828     skip:
1829     arg->count++;
1830     @@ -255,28 +218,28 @@ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1831     static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
1832     struct sk_buff *skb, struct tcmsg *t)
1833     {
1834     - struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
1835     + struct cls_mall_head *head = (struct cls_mall_head *) fh;
1836     struct nlattr *nest;
1837    
1838     - if (!f)
1839     + if (!head)
1840     return skb->len;
1841    
1842     - t->tcm_handle = f->handle;
1843     + t->tcm_handle = head->handle;
1844    
1845     nest = nla_nest_start(skb, TCA_OPTIONS);
1846     if (!nest)
1847     goto nla_put_failure;
1848    
1849     - if (f->res.classid &&
1850     - nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
1851     + if (head->res.classid &&
1852     + nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
1853     goto nla_put_failure;
1854    
1855     - if (tcf_exts_dump(skb, &f->exts))
1856     + if (tcf_exts_dump(skb, &head->exts))
1857     goto nla_put_failure;
1858    
1859     nla_nest_end(skb, nest);
1860    
1861     - if (tcf_exts_dump_stats(skb, &f->exts) < 0)
1862     + if (tcf_exts_dump_stats(skb, &head->exts) < 0)
1863     goto nla_put_failure;
1864    
1865     return skb->len;
1866     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1867     index 176af3080a2b..6a2532dd31c4 100644
1868     --- a/net/sctp/ipv6.c
1869     +++ b/net/sctp/ipv6.c
1870     @@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
1871     SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
1872    
1873     rcu_read_lock();
1874     - res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
1875     + res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
1876     + np->tclass);
1877     rcu_read_unlock();
1878     return res;
1879     }
1880     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1881     index ca12aa346c0d..6cbe5bdf2b15 100644
1882     --- a/net/sctp/socket.c
1883     +++ b/net/sctp/socket.c
1884     @@ -7427,7 +7427,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
1885     */
1886     release_sock(sk);
1887     current_timeo = schedule_timeout(current_timeo);
1888     - BUG_ON(sk != asoc->base.sk);
1889     + if (sk != asoc->base.sk)
1890     + goto do_error;
1891     lock_sock(sk);
1892    
1893     *timeo_p = current_timeo;