Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0104-5.4.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3485 - (hide annotations) (download)
Mon May 11 14:35:59 2020 UTC (4 years ago) by niro
File size: 87037 byte(s)
-linux-5.4.5
1 niro 3485 diff --git a/Makefile b/Makefile
2     index 144daf02c78a..0f6e72d5e4f1 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 4
10     +SUBLEVEL = 5
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
15     index 6d7ec371e7b2..606fa6d86685 100644
16     --- a/drivers/infiniband/core/addr.c
17     +++ b/drivers/infiniband/core/addr.c
18     @@ -421,16 +421,15 @@ static int addr6_resolve(struct sockaddr *src_sock,
19     (const struct sockaddr_in6 *)dst_sock;
20     struct flowi6 fl6;
21     struct dst_entry *dst;
22     - int ret;
23    
24     memset(&fl6, 0, sizeof fl6);
25     fl6.daddr = dst_in->sin6_addr;
26     fl6.saddr = src_in->sin6_addr;
27     fl6.flowi6_oif = addr->bound_dev_if;
28    
29     - ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
30     - if (ret < 0)
31     - return ret;
32     + dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
33     + if (IS_ERR(dst))
34     + return PTR_ERR(dst);
35    
36     if (ipv6_addr_any(&src_in->sin6_addr))
37     src_in->sin6_addr = fl6.saddr;
38     diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
39     index 5a3474f9351b..312c2fc961c0 100644
40     --- a/drivers/infiniband/sw/rxe/rxe_net.c
41     +++ b/drivers/infiniband/sw/rxe/rxe_net.c
42     @@ -117,10 +117,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
43     memcpy(&fl6.daddr, daddr, sizeof(*daddr));
44     fl6.flowi6_proto = IPPROTO_UDP;
45    
46     - if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
47     - recv_sockets.sk6->sk, &ndst, &fl6))) {
48     + ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
49     + recv_sockets.sk6->sk, &fl6,
50     + NULL);
51     + if (unlikely(IS_ERR(ndst))) {
52     pr_err_ratelimited("no route to %pI6\n", daddr);
53     - goto put;
54     + return NULL;
55     }
56    
57     if (unlikely(ndst->error)) {
58     diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
59     index acb016834f04..6cc100e7d5c0 100644
60     --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
61     +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
62     @@ -1115,7 +1115,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
63     phy_interface_mode(lmac->lmac_type)))
64     return -ENODEV;
65    
66     - phy_start_aneg(lmac->phydev);
67     + phy_start(lmac->phydev);
68     return 0;
69     }
70    
71     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
72     index f1a7bc46f1c0..2c16add0b642 100644
73     --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
74     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
75     @@ -816,7 +816,7 @@ struct mlx5e_xsk {
76     struct mlx5e_priv {
77     /* priv data path fields - start */
78     struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
79     - int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
80     + int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
81     #ifdef CONFIG_MLX5_CORE_EN_DCB
82     struct mlx5e_dcbx_dp dcbx_dp;
83     #endif
84     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
85     index f777994f3005..fce6eccdcf8b 100644
86     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
87     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
88     @@ -73,6 +73,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
89     [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
90     [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
91     [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
92     + [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
93     [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
94     [MLX5E_400GAUI_8] = 400000,
95     };
96     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
97     index 633b117eb13e..99c7cdd0404a 100644
98     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
99     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
100     @@ -155,8 +155,11 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
101     }
102    
103     if (port_buffer->buffer[i].size <
104     - (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
105     + (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
106     + pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
107     + i, port_buffer->buffer[i].size);
108     return -ENOMEM;
109     + }
110    
111     port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
112     port_buffer->buffer[i].xon =
113     @@ -232,6 +235,26 @@ static int update_buffer_lossy(unsigned int max_mtu,
114     return 0;
115     }
116    
117     +static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
118     +{
119     + u32 g_rx_pause, g_tx_pause;
120     + int err;
121     +
122     + err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
123     + if (err)
124     + return err;
125     +
126     + /* If global pause enabled, set all active buffers to lossless.
127     + * Otherwise, check PFC setting.
128     + */
129     + if (g_rx_pause || g_tx_pause)
130     + *pfc_en = 0xff;
131     + else
132     + err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
133     +
134     + return err;
135     +}
136     +
137     #define MINIMUM_MAX_MTU 9216
138     int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
139     u32 change, unsigned int mtu,
140     @@ -277,7 +300,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
141    
142     if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
143     update_prio2buffer = true;
144     - err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL);
145     + err = fill_pfc_en(priv->mdev, &curr_pfc_en);
146     if (err)
147     return err;
148    
149     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
150     index 745ab6cd7c30..362f01bc8372 100644
151     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
152     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
153     @@ -144,10 +144,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
154     #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
155     int ret;
156    
157     - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
158     - fl6);
159     - if (ret < 0)
160     - return ret;
161     + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
162     + NULL);
163     + if (IS_ERR(dst))
164     + return PTR_ERR(dst);
165    
166     if (!(*out_ttl))
167     *out_ttl = ip6_dst_hoplimit(dst);
168     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
169     index 95601269fa2e..c6776f308d5e 100644
170     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
171     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
172     @@ -1027,18 +1027,11 @@ static bool ext_link_mode_requested(const unsigned long *adver)
173     return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
174     }
175    
176     -static bool ext_speed_requested(u32 speed)
177     -{
178     -#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000
179     - return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED);
180     -}
181     -
182     -static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed)
183     +static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
184     {
185     bool ext_link_mode = ext_link_mode_requested(adver);
186     - bool ext_speed = ext_speed_requested(speed);
187    
188     - return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed;
189     + return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
190     }
191    
192     int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
193     @@ -1065,8 +1058,8 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
194     autoneg = link_ksettings->base.autoneg;
195     speed = link_ksettings->base.speed;
196    
197     - ext = ext_requested(autoneg, adver, speed),
198     ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
199     + ext = ext_requested(autoneg, adver, ext_supported);
200     if (!ext_supported && ext)
201     return -EOPNOTSUPP;
202    
203     @@ -1643,7 +1636,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
204     break;
205     case MLX5_MODULE_ID_SFP:
206     modinfo->type = ETH_MODULE_SFF_8472;
207     - modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
208     + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
209     break;
210     default:
211     netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
212     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
213     index 2a56e66f58d8..6abd4ed5b69b 100644
214     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
215     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
216     @@ -1693,11 +1693,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
217     struct mlx5e_params *params,
218     struct mlx5e_channel_param *cparam)
219     {
220     - struct mlx5e_priv *priv = c->priv;
221     int err, tc;
222    
223     for (tc = 0; tc < params->num_tc; tc++) {
224     - int txq_ix = c->ix + tc * priv->max_nch;
225     + int txq_ix = c->ix + tc * params->num_channels;
226    
227     err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
228     params, &cparam->sq, &c->sq[tc], tc);
229     @@ -2878,26 +2877,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
230     netdev_set_tc_queue(netdev, tc, nch, 0);
231     }
232    
233     -static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
234     +static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
235     {
236     - int i, tc;
237     + int i, ch;
238    
239     - for (i = 0; i < priv->max_nch; i++)
240     - for (tc = 0; tc < priv->profile->max_tc; tc++)
241     - priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch;
242     -}
243     + ch = priv->channels.num;
244    
245     -static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
246     -{
247     - struct mlx5e_channel *c;
248     - struct mlx5e_txqsq *sq;
249     - int i, tc;
250     + for (i = 0; i < ch; i++) {
251     + int tc;
252     +
253     + for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
254     + struct mlx5e_channel *c = priv->channels.c[i];
255     + struct mlx5e_txqsq *sq = &c->sq[tc];
256    
257     - for (i = 0; i < priv->channels.num; i++) {
258     - c = priv->channels.c[i];
259     - for (tc = 0; tc < c->num_tc; tc++) {
260     - sq = &c->sq[tc];
261     priv->txq2sq[sq->txq_ix] = sq;
262     + priv->channel_tc2realtxq[i][tc] = i + tc * ch;
263     }
264     }
265     }
266     @@ -2912,7 +2906,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
267     netif_set_real_num_tx_queues(netdev, num_txqs);
268     netif_set_real_num_rx_queues(netdev, num_rxqs);
269    
270     - mlx5e_build_tx2sq_maps(priv);
271     + mlx5e_build_txq_maps(priv);
272     mlx5e_activate_channels(&priv->channels);
273     mlx5e_xdp_tx_enable(priv);
274     netif_tx_start_all_queues(priv->netdev);
275     @@ -5028,7 +5022,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
276     if (err)
277     mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
278     mlx5e_build_nic_netdev(netdev);
279     - mlx5e_build_tc2txq_maps(priv);
280     mlx5e_health_create_reporters(priv);
281    
282     return 0;
283     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
284     index 7e6ebd0505cc..9f09253f9f46 100644
285     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
286     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
287     @@ -1601,7 +1601,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
288     for (j = 0; j < NUM_SQ_STATS; j++)
289     sprintf(data + (idx++) * ETH_GSTRING_LEN,
290     sq_stats_desc[j].format,
291     - priv->channel_tc2txq[i][tc]);
292     + i + tc * max_nch);
293    
294     for (i = 0; i < max_nch; i++) {
295     for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
296     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
297     index f90a9f8e0fc6..c2c7f214a56a 100644
298     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
299     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
300     @@ -1616,7 +1616,7 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
301     flow_flag_clear(flow, DUP);
302    
303     mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
304     - kvfree(flow->peer_flow);
305     + kfree(flow->peer_flow);
306     flow->peer_flow = NULL;
307     }
308    
309     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
310     index 67dc4f0921b6..dee12f17f9c2 100644
311     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
312     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
313     @@ -93,7 +93,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
314     if (txq_ix >= num_channels)
315     txq_ix = priv->txq2sq[txq_ix]->ch_ix;
316    
317     - return priv->channel_tc2txq[txq_ix][up];
318     + return priv->channel_tc2realtxq[txq_ix][up];
319     }
320    
321     static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
322     diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
323     index 672ea1342add..da1fd0e08c36 100644
324     --- a/drivers/net/ethernet/mscc/ocelot.c
325     +++ b/drivers/net/ethernet/mscc/ocelot.c
326     @@ -1979,14 +1979,18 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
327    
328     static int ocelot_init_timestamp(struct ocelot *ocelot)
329     {
330     + struct ptp_clock *ptp_clock;
331     +
332     ocelot->ptp_info = ocelot_ptp_clock_info;
333     - ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
334     - if (IS_ERR(ocelot->ptp_clock))
335     - return PTR_ERR(ocelot->ptp_clock);
336     + ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
337     + if (IS_ERR(ptp_clock))
338     + return PTR_ERR(ptp_clock);
339     /* Check if PHC support is missing at the configuration level */
340     - if (!ocelot->ptp_clock)
341     + if (!ptp_clock)
342     return 0;
343    
344     + ocelot->ptp_clock = ptp_clock;
345     +
346     ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
347     ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
348     ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
349     @@ -2213,6 +2217,8 @@ void ocelot_deinit(struct ocelot *ocelot)
350     destroy_workqueue(ocelot->stats_queue);
351     mutex_destroy(&ocelot->stats_lock);
352     ocelot_ace_deinit();
353     + if (ocelot->ptp_clock)
354     + ptp_clock_unregister(ocelot->ptp_clock);
355    
356     for (i = 0; i < ocelot->num_phys_ports; i++) {
357     port = ocelot->ports[i];
358     diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
359     index 20faa8d24c9f..134640412d7b 100644
360     --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
361     +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
362     @@ -1364,12 +1364,9 @@ int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
363    
364     static int ionic_lif_rss_init(struct ionic_lif *lif)
365     {
366     - u8 rss_key[IONIC_RSS_HASH_KEY_SIZE];
367     unsigned int tbl_sz;
368     unsigned int i;
369    
370     - netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE);
371     -
372     lif->rss_types = IONIC_RSS_TYPE_IPV4 |
373     IONIC_RSS_TYPE_IPV4_TCP |
374     IONIC_RSS_TYPE_IPV4_UDP |
375     @@ -1382,12 +1379,18 @@ static int ionic_lif_rss_init(struct ionic_lif *lif)
376     for (i = 0; i < tbl_sz; i++)
377     lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
378    
379     - return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL);
380     + return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
381     }
382    
383     -static int ionic_lif_rss_deinit(struct ionic_lif *lif)
384     +static void ionic_lif_rss_deinit(struct ionic_lif *lif)
385     {
386     - return ionic_lif_rss_config(lif, 0x0, NULL, NULL);
387     + int tbl_sz;
388     +
389     + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
390     + memset(lif->rss_ind_tbl, 0, tbl_sz);
391     + memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
392     +
393     + ionic_lif_rss_config(lif, 0x0, NULL, NULL);
394     }
395    
396     static void ionic_txrx_disable(struct ionic_lif *lif)
397     @@ -1710,6 +1713,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
398     dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
399     goto err_out_free_qcqs;
400     }
401     + netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
402    
403     list_add_tail(&lif->list, &ionic->lifs);
404    
405     diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
406     index 235d51ea4d39..4fe0977d01fa 100644
407     --- a/drivers/net/ethernet/realtek/r8169_main.c
408     +++ b/drivers/net/ethernet/realtek/r8169_main.c
409     @@ -3920,7 +3920,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
410     case RTL_GIGA_MAC_VER_32:
411     case RTL_GIGA_MAC_VER_33:
412     case RTL_GIGA_MAC_VER_34:
413     - case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51:
414     + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61:
415     RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
416     AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
417     break;
418     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
419     index f826365c979d..271a00f24f45 100644
420     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
421     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
422     @@ -1502,10 +1502,8 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
423     rx_q->dma_erx, rx_q->dma_rx_phy);
424    
425     kfree(rx_q->buf_pool);
426     - if (rx_q->page_pool) {
427     - page_pool_request_shutdown(rx_q->page_pool);
428     + if (rx_q->page_pool)
429     page_pool_destroy(rx_q->page_pool);
430     - }
431     }
432     }
433    
434     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
435     index f298d714efd6..d7a953c647b4 100644
436     --- a/drivers/net/ethernet/ti/cpsw.c
437     +++ b/drivers/net/ethernet/ti/cpsw.c
438     @@ -890,8 +890,8 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
439     {
440     struct cpsw_common *cpsw = dev_id;
441    
442     - cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
443     writel(0, &cpsw->wr_regs->rx_en);
444     + cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
445    
446     if (cpsw->quirk_irq) {
447     disable_irq_nosync(cpsw->irqs_table[0]);
448     diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
449     index 3ab24fdccd3b..5c6b7fc04ea6 100644
450     --- a/drivers/net/geneve.c
451     +++ b/drivers/net/geneve.c
452     @@ -853,7 +853,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
453     if (dst)
454     return dst;
455     }
456     - if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
457     + dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
458     + NULL);
459     + if (IS_ERR(dst)) {
460     netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
461     return ERR_PTR(-ENETUNREACH);
462     }
463     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
464     index 8869154fad88..404ac3a0d1c3 100644
465     --- a/drivers/net/vxlan.c
466     +++ b/drivers/net/vxlan.c
467     @@ -2276,7 +2276,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
468     bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
469     struct dst_entry *ndst;
470     struct flowi6 fl6;
471     - int err;
472    
473     if (!sock6)
474     return ERR_PTR(-EIO);
475     @@ -2299,10 +2298,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
476     fl6.fl6_dport = dport;
477     fl6.fl6_sport = sport;
478    
479     - err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
480     - sock6->sock->sk,
481     - &ndst, &fl6);
482     - if (unlikely(err < 0)) {
483     + ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
484     + &fl6, NULL);
485     + if (unlikely(IS_ERR(ndst))) {
486     netdev_dbg(dev, "no route to %pI6\n", daddr);
487     return ERR_PTR(-ENETUNREACH);
488     }
489     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
490     index c20f190b4c18..76d952aeb0fc 100644
491     --- a/include/linux/netdevice.h
492     +++ b/include/linux/netdevice.h
493     @@ -1867,6 +1867,11 @@ struct net_device {
494     unsigned char if_port;
495     unsigned char dma;
496    
497     + /* Note : dev->mtu is often read without holding a lock.
498     + * Writers usually hold RTNL.
499     + * It is recommended to use READ_ONCE() to annotate the reads,
500     + * and to use WRITE_ONCE() to annotate the writes.
501     + */
502     unsigned int mtu;
503     unsigned int min_mtu;
504     unsigned int max_mtu;
505     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
506     index 8688f7adfda7..1ba6e2cc2725 100644
507     --- a/include/linux/skbuff.h
508     +++ b/include/linux/skbuff.h
509     @@ -3527,8 +3527,9 @@ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
510     int skb_vlan_pop(struct sk_buff *skb);
511     int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
512     int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
513     - int mac_len);
514     -int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
515     + int mac_len, bool ethernet);
516     +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
517     + bool ethernet);
518     int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
519     int skb_mpls_dec_ttl(struct sk_buff *skb);
520     struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
521     diff --git a/include/linux/time.h b/include/linux/time.h
522     index 27d83fd2ae61..5f3e49978837 100644
523     --- a/include/linux/time.h
524     +++ b/include/linux/time.h
525     @@ -96,4 +96,17 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
526     */
527     #define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
528     #define time_before32(b, a) time_after32(a, b)
529     +
530     +/**
531     + * time_between32 - check if a 32-bit timestamp is within a given time range
532     + * @t: the time which may be within [l,h]
533     + * @l: the lower bound of the range
534     + * @h: the higher bound of the range
535     + *
536     + * time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are
537     + * treated as 32-bit integers.
538     + *
539     + * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)).
540     + */
541     +#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
542     #endif
543     diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
544     index 5cd12276ae21..e5fc8db1f783 100644
545     --- a/include/net/flow_dissector.h
546     +++ b/include/net/flow_dissector.h
547     @@ -229,6 +229,7 @@ enum flow_dissector_key_id {
548     FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
549     FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
550     FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
551     + FLOW_DISSECTOR_KEY_PORTS_RANGE, /* struct flow_dissector_key_ports */
552     FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
553     FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
554     FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
555     diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
556     index 86c567f531f3..c6f7bd22db60 100644
557     --- a/include/net/flow_offload.h
558     +++ b/include/net/flow_offload.h
559     @@ -380,19 +380,18 @@ static inline void flow_block_init(struct flow_block *flow_block)
560     typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
561     enum tc_setup_type type, void *type_data);
562    
563     -typedef void flow_indr_block_ing_cmd_t(struct net_device *dev,
564     - flow_indr_block_bind_cb_t *cb,
565     - void *cb_priv,
566     - enum flow_block_command command);
567     +typedef void flow_indr_block_cmd_t(struct net_device *dev,
568     + flow_indr_block_bind_cb_t *cb, void *cb_priv,
569     + enum flow_block_command command);
570    
571     -struct flow_indr_block_ing_entry {
572     - flow_indr_block_ing_cmd_t *cb;
573     +struct flow_indr_block_entry {
574     + flow_indr_block_cmd_t *cb;
575     struct list_head list;
576     };
577    
578     -void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry);
579     +void flow_indr_add_block_cb(struct flow_indr_block_entry *entry);
580    
581     -void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry);
582     +void flow_indr_del_block_cb(struct flow_indr_block_entry *entry);
583    
584     int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
585     flow_indr_block_bind_cb_t *cb,
586     diff --git a/include/net/ip.h b/include/net/ip.h
587     index a2c61c36dc4a..4b15cc1c224c 100644
588     --- a/include/net/ip.h
589     +++ b/include/net/ip.h
590     @@ -760,4 +760,9 @@ int ip_misc_proc_init(void);
591     int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
592     struct netlink_ext_ack *extack);
593    
594     +static inline bool inetdev_valid_mtu(unsigned int mtu)
595     +{
596     + return likely(mtu >= IPV4_MIN_MTU);
597     +}
598     +
599     #endif /* _IP_H */
600     diff --git a/include/net/ipv6.h b/include/net/ipv6.h
601     index 009605c56f20..b59b3dae0f71 100644
602     --- a/include/net/ipv6.h
603     +++ b/include/net/ipv6.h
604     @@ -1017,7 +1017,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
605    
606     int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
607     struct flowi6 *fl6);
608     -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
609     +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
610     const struct in6_addr *final_dst);
611     struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
612     const struct in6_addr *final_dst,
613     diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
614     index 5c93e942c50b..3e7d2c0e79ca 100644
615     --- a/include/net/ipv6_stubs.h
616     +++ b/include/net/ipv6_stubs.h
617     @@ -24,8 +24,10 @@ struct ipv6_stub {
618     const struct in6_addr *addr);
619     int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
620     const struct in6_addr *addr);
621     - int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
622     - struct dst_entry **dst, struct flowi6 *fl6);
623     + struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
624     + const struct sock *sk,
625     + struct flowi6 *fl6,
626     + const struct in6_addr *final_dst);
627     int (*ipv6_route_input)(struct sk_buff *skb);
628    
629     struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
630     diff --git a/include/net/page_pool.h b/include/net/page_pool.h
631     index 2cbcdbdec254..1121faa99c12 100644
632     --- a/include/net/page_pool.h
633     +++ b/include/net/page_pool.h
634     @@ -70,7 +70,12 @@ struct page_pool_params {
635     struct page_pool {
636     struct page_pool_params p;
637    
638     - u32 pages_state_hold_cnt;
639     + struct delayed_work release_dw;
640     + void (*disconnect)(void *);
641     + unsigned long defer_start;
642     + unsigned long defer_warn;
643     +
644     + u32 pages_state_hold_cnt;
645    
646     /*
647     * Data structure for allocation side
648     @@ -129,25 +134,19 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
649    
650     struct page_pool *page_pool_create(const struct page_pool_params *params);
651    
652     -void __page_pool_free(struct page_pool *pool);
653     -static inline void page_pool_free(struct page_pool *pool)
654     -{
655     - /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
656     - * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
657     - */
658     #ifdef CONFIG_PAGE_POOL
659     - __page_pool_free(pool);
660     -#endif
661     -}
662     -
663     -/* Drivers use this instead of page_pool_free */
664     +void page_pool_destroy(struct page_pool *pool);
665     +void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
666     +#else
667     static inline void page_pool_destroy(struct page_pool *pool)
668     {
669     - if (!pool)
670     - return;
671     +}
672    
673     - page_pool_free(pool);
674     +static inline void page_pool_use_xdp_mem(struct page_pool *pool,
675     + void (*disconnect)(void *))
676     +{
677     }
678     +#endif
679    
680     /* Never call this directly, use helpers below */
681     void __page_pool_put_page(struct page_pool *pool,
682     @@ -170,24 +169,6 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
683     __page_pool_put_page(pool, page, true);
684     }
685    
686     -/* API user MUST have disconnected alloc-side (not allowed to call
687     - * page_pool_alloc_pages()) before calling this. The free-side can
688     - * still run concurrently, to handle in-flight packet-pages.
689     - *
690     - * A request to shutdown can fail (with false) if there are still
691     - * in-flight packet-pages.
692     - */
693     -bool __page_pool_request_shutdown(struct page_pool *pool);
694     -static inline bool page_pool_request_shutdown(struct page_pool *pool)
695     -{
696     - bool safe_to_remove = false;
697     -
698     -#ifdef CONFIG_PAGE_POOL
699     - safe_to_remove = __page_pool_request_shutdown(pool);
700     -#endif
701     - return safe_to_remove;
702     -}
703     -
704     /* Disconnects a page (from a page_pool). API users can have a need
705     * to disconnect a page (from a page_pool), to allow it to be used as
706     * a regular page (that will eventually be returned to the normal
707     @@ -216,11 +197,6 @@ static inline bool is_page_pool_compiled_in(void)
708     #endif
709     }
710    
711     -static inline void page_pool_get(struct page_pool *pool)
712     -{
713     - refcount_inc(&pool->user_cnt);
714     -}
715     -
716     static inline bool page_pool_put(struct page_pool *pool)
717     {
718     return refcount_dec_and_test(&pool->user_cnt);
719     diff --git a/include/net/tcp.h b/include/net/tcp.h
720     index ab4eb5eb5d07..b2367cfe0bda 100644
721     --- a/include/net/tcp.h
722     +++ b/include/net/tcp.h
723     @@ -494,15 +494,16 @@ static inline void tcp_synq_overflow(const struct sock *sk)
724     reuse = rcu_dereference(sk->sk_reuseport_cb);
725     if (likely(reuse)) {
726     last_overflow = READ_ONCE(reuse->synq_overflow_ts);
727     - if (time_after32(now, last_overflow + HZ))
728     + if (!time_between32(now, last_overflow,
729     + last_overflow + HZ))
730     WRITE_ONCE(reuse->synq_overflow_ts, now);
731     return;
732     }
733     }
734    
735     - last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
736     - if (time_after32(now, last_overflow + HZ))
737     - tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
738     + last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
739     + if (!time_between32(now, last_overflow, last_overflow + HZ))
740     + WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
741     }
742    
743     /* syncookies: no recent synqueue overflow on this listening socket? */
744     @@ -517,13 +518,23 @@ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
745     reuse = rcu_dereference(sk->sk_reuseport_cb);
746     if (likely(reuse)) {
747     last_overflow = READ_ONCE(reuse->synq_overflow_ts);
748     - return time_after32(now, last_overflow +
749     - TCP_SYNCOOKIE_VALID);
750     + return !time_between32(now, last_overflow - HZ,
751     + last_overflow +
752     + TCP_SYNCOOKIE_VALID);
753     }
754     }
755    
756     - last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
757     - return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
758     + last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
759     +
760     + /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
761     + * then we're under synflood. However, we have to use
762     + * 'last_overflow - HZ' as lower bound. That's because a concurrent
763     + * tcp_synq_overflow() could update .ts_recent_stamp after we read
764     + * jiffies but before we store .ts_recent_stamp into last_overflow,
765     + * which could lead to rejecting a valid syncookie.
766     + */
767     + return !time_between32(now, last_overflow - HZ,
768     + last_overflow + TCP_SYNCOOKIE_VALID);
769     }
770    
771     static inline u32 tcp_cookie_time(void)
772     diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h
773     index 6a8cba6ea79a..a9d5b7603b89 100644
774     --- a/include/net/xdp_priv.h
775     +++ b/include/net/xdp_priv.h
776     @@ -12,12 +12,8 @@ struct xdp_mem_allocator {
777     struct page_pool *page_pool;
778     struct zero_copy_allocator *zc_alloc;
779     };
780     - int disconnect_cnt;
781     - unsigned long defer_start;
782     struct rhash_head node;
783     struct rcu_head rcu;
784     - struct delayed_work defer_wq;
785     - unsigned long defer_warn;
786     };
787    
788     #endif /* __LINUX_NET_XDP_PRIV_H__ */
789     diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
790     index 8c8420230a10..c79943e82a54 100644
791     --- a/include/trace/events/xdp.h
792     +++ b/include/trace/events/xdp.h
793     @@ -317,19 +317,15 @@ __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
794    
795     TRACE_EVENT(mem_disconnect,
796    
797     - TP_PROTO(const struct xdp_mem_allocator *xa,
798     - bool safe_to_remove, bool force),
799     + TP_PROTO(const struct xdp_mem_allocator *xa),
800    
801     - TP_ARGS(xa, safe_to_remove, force),
802     + TP_ARGS(xa),
803    
804     TP_STRUCT__entry(
805     __field(const struct xdp_mem_allocator *, xa)
806     __field(u32, mem_id)
807     __field(u32, mem_type)
808     __field(const void *, allocator)
809     - __field(bool, safe_to_remove)
810     - __field(bool, force)
811     - __field(int, disconnect_cnt)
812     ),
813    
814     TP_fast_assign(
815     @@ -337,19 +333,12 @@ TRACE_EVENT(mem_disconnect,
816     __entry->mem_id = xa->mem.id;
817     __entry->mem_type = xa->mem.type;
818     __entry->allocator = xa->allocator;
819     - __entry->safe_to_remove = safe_to_remove;
820     - __entry->force = force;
821     - __entry->disconnect_cnt = xa->disconnect_cnt;
822     ),
823    
824     - TP_printk("mem_id=%d mem_type=%s allocator=%p"
825     - " safe_to_remove=%s force=%s disconnect_cnt=%d",
826     + TP_printk("mem_id=%d mem_type=%s allocator=%p",
827     __entry->mem_id,
828     __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
829     - __entry->allocator,
830     - __entry->safe_to_remove ? "true" : "false",
831     - __entry->force ? "true" : "false",
832     - __entry->disconnect_cnt
833     + __entry->allocator
834     )
835     );
836    
837     diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
838     index e804a3016902..022dc6e504c4 100644
839     --- a/net/bridge/br_device.c
840     +++ b/net/bridge/br_device.c
841     @@ -245,6 +245,12 @@ static int br_set_mac_address(struct net_device *dev, void *p)
842     if (!is_valid_ether_addr(addr->sa_data))
843     return -EADDRNOTAVAIL;
844    
845     + /* dev_set_mac_addr() can be called by a master device on bridge's
846     + * NETDEV_UNREGISTER, but since it's being destroyed do nothing
847     + */
848     + if (dev->reg_state != NETREG_REGISTERED)
849     + return -EBUSY;
850     +
851     spin_lock_bh(&br->lock);
852     if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
853     /* Mac address will be changed in br_stp_change_bridge_id(). */
854     diff --git a/net/core/dev.c b/net/core/dev.c
855     index 99ac84ff398f..046307445ece 100644
856     --- a/net/core/dev.c
857     +++ b/net/core/dev.c
858     @@ -7967,7 +7967,8 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
859     if (ops->ndo_change_mtu)
860     return ops->ndo_change_mtu(dev, new_mtu);
861    
862     - dev->mtu = new_mtu;
863     + /* Pairs with all the lockless reads of dev->mtu in the stack */
864     + WRITE_ONCE(dev->mtu, new_mtu);
865     return 0;
866     }
867     EXPORT_SYMBOL(__dev_set_mtu);
868     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
869     index 68eda10d0680..1292f3f0f93f 100644
870     --- a/net/core/flow_dissector.c
871     +++ b/net/core/flow_dissector.c
872     @@ -683,6 +683,31 @@ __skb_flow_dissect_tcp(const struct sk_buff *skb,
873     key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
874     }
875    
876     +static void
877     +__skb_flow_dissect_ports(const struct sk_buff *skb,
878     + struct flow_dissector *flow_dissector,
879     + void *target_container, void *data, int nhoff,
880     + u8 ip_proto, int hlen)
881     +{
882     + enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
883     + struct flow_dissector_key_ports *key_ports;
884     +
885     + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
886     + dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
887     + else if (dissector_uses_key(flow_dissector,
888     + FLOW_DISSECTOR_KEY_PORTS_RANGE))
889     + dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
890     +
891     + if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
892     + return;
893     +
894     + key_ports = skb_flow_dissector_target(flow_dissector,
895     + dissector_ports,
896     + target_container);
897     + key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
898     + data, hlen);
899     +}
900     +
901     static void
902     __skb_flow_dissect_ipv4(const struct sk_buff *skb,
903     struct flow_dissector *flow_dissector,
904     @@ -852,7 +877,6 @@ bool __skb_flow_dissect(const struct net *net,
905     struct flow_dissector_key_control *key_control;
906     struct flow_dissector_key_basic *key_basic;
907     struct flow_dissector_key_addrs *key_addrs;
908     - struct flow_dissector_key_ports *key_ports;
909     struct flow_dissector_key_icmp *key_icmp;
910     struct flow_dissector_key_tags *key_tags;
911     struct flow_dissector_key_vlan *key_vlan;
912     @@ -870,9 +894,10 @@ bool __skb_flow_dissect(const struct net *net,
913     nhoff = skb_network_offset(skb);
914     hlen = skb_headlen(skb);
915     #if IS_ENABLED(CONFIG_NET_DSA)
916     - if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
917     + if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
918     + proto == htons(ETH_P_XDSA))) {
919     const struct dsa_device_ops *ops;
920     - int offset;
921     + int offset = 0;
922    
923     ops = skb->dev->dsa_ptr->tag_ops;
924     if (ops->flow_dissect &&
925     @@ -1299,14 +1324,9 @@ ip_proto_again:
926     break;
927     }
928    
929     - if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
930     - !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
931     - key_ports = skb_flow_dissector_target(flow_dissector,
932     - FLOW_DISSECTOR_KEY_PORTS,
933     - target_container);
934     - key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
935     - data, hlen);
936     - }
937     + if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT))
938     + __skb_flow_dissect_ports(skb, flow_dissector, target_container,
939     + data, nhoff, ip_proto, hlen);
940    
941     if (dissector_uses_key(flow_dissector,
942     FLOW_DISSECTOR_KEY_ICMP)) {
943     diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
944     index cf52d9c422fa..45b6a59ac124 100644
945     --- a/net/core/flow_offload.c
946     +++ b/net/core/flow_offload.c
947     @@ -283,7 +283,7 @@ int flow_block_cb_setup_simple(struct flow_block_offload *f,
948     }
949     EXPORT_SYMBOL(flow_block_cb_setup_simple);
950    
951     -static LIST_HEAD(block_ing_cb_list);
952     +static LIST_HEAD(block_cb_list);
953    
954     static struct rhashtable indr_setup_block_ht;
955    
956     @@ -391,20 +391,19 @@ static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
957     kfree(indr_block_cb);
958     }
959    
960     -static DEFINE_MUTEX(flow_indr_block_ing_cb_lock);
961     +static DEFINE_MUTEX(flow_indr_block_cb_lock);
962    
963     -static void flow_block_ing_cmd(struct net_device *dev,
964     - flow_indr_block_bind_cb_t *cb,
965     - void *cb_priv,
966     - enum flow_block_command command)
967     +static void flow_block_cmd(struct net_device *dev,
968     + flow_indr_block_bind_cb_t *cb, void *cb_priv,
969     + enum flow_block_command command)
970     {
971     - struct flow_indr_block_ing_entry *entry;
972     + struct flow_indr_block_entry *entry;
973    
974     - mutex_lock(&flow_indr_block_ing_cb_lock);
975     - list_for_each_entry(entry, &block_ing_cb_list, list) {
976     + mutex_lock(&flow_indr_block_cb_lock);
977     + list_for_each_entry(entry, &block_cb_list, list) {
978     entry->cb(dev, cb, cb_priv, command);
979     }
980     - mutex_unlock(&flow_indr_block_ing_cb_lock);
981     + mutex_unlock(&flow_indr_block_cb_lock);
982     }
983    
984     int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
985     @@ -424,8 +423,8 @@ int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
986     if (err)
987     goto err_dev_put;
988    
989     - flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
990     - FLOW_BLOCK_BIND);
991     + flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
992     + FLOW_BLOCK_BIND);
993    
994     return 0;
995    
996     @@ -464,8 +463,8 @@ void __flow_indr_block_cb_unregister(struct net_device *dev,
997     if (!indr_block_cb)
998     return;
999    
1000     - flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
1001     - FLOW_BLOCK_UNBIND);
1002     + flow_block_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
1003     + FLOW_BLOCK_UNBIND);
1004    
1005     flow_indr_block_cb_del(indr_block_cb);
1006     flow_indr_block_dev_put(indr_dev);
1007     @@ -499,21 +498,21 @@ void flow_indr_block_call(struct net_device *dev,
1008     }
1009     EXPORT_SYMBOL_GPL(flow_indr_block_call);
1010    
1011     -void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry)
1012     +void flow_indr_add_block_cb(struct flow_indr_block_entry *entry)
1013     {
1014     - mutex_lock(&flow_indr_block_ing_cb_lock);
1015     - list_add_tail(&entry->list, &block_ing_cb_list);
1016     - mutex_unlock(&flow_indr_block_ing_cb_lock);
1017     + mutex_lock(&flow_indr_block_cb_lock);
1018     + list_add_tail(&entry->list, &block_cb_list);
1019     + mutex_unlock(&flow_indr_block_cb_lock);
1020     }
1021     -EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb);
1022     +EXPORT_SYMBOL_GPL(flow_indr_add_block_cb);
1023    
1024     -void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry)
1025     +void flow_indr_del_block_cb(struct flow_indr_block_entry *entry)
1026     {
1027     - mutex_lock(&flow_indr_block_ing_cb_lock);
1028     + mutex_lock(&flow_indr_block_cb_lock);
1029     list_del(&entry->list);
1030     - mutex_unlock(&flow_indr_block_ing_cb_lock);
1031     + mutex_unlock(&flow_indr_block_cb_lock);
1032     }
1033     -EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb);
1034     +EXPORT_SYMBOL_GPL(flow_indr_del_block_cb);
1035    
1036     static int __init init_flow_indr_rhashtable(void)
1037     {
1038     diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
1039     index 74cfb8b5ab33..99a6de52b21d 100644
1040     --- a/net/core/lwt_bpf.c
1041     +++ b/net/core/lwt_bpf.c
1042     @@ -230,9 +230,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
1043     fl6.daddr = iph6->daddr;
1044     fl6.saddr = iph6->saddr;
1045    
1046     - err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
1047     - if (unlikely(err))
1048     - goto err;
1049     + dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL);
1050     if (IS_ERR(dst)) {
1051     err = PTR_ERR(dst);
1052     goto err;
1053     diff --git a/net/core/page_pool.c b/net/core/page_pool.c
1054     index 5bc65587f1c4..dfc2501c35d9 100644
1055     --- a/net/core/page_pool.c
1056     +++ b/net/core/page_pool.c
1057     @@ -18,6 +18,9 @@
1058    
1059     #include <trace/events/page_pool.h>
1060    
1061     +#define DEFER_TIME (msecs_to_jiffies(1000))
1062     +#define DEFER_WARN_INTERVAL (60 * HZ)
1063     +
1064     static int page_pool_init(struct page_pool *pool,
1065     const struct page_pool_params *params)
1066     {
1067     @@ -193,22 +196,14 @@ static s32 page_pool_inflight(struct page_pool *pool)
1068     {
1069     u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
1070     u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
1071     - s32 distance;
1072     -
1073     - distance = _distance(hold_cnt, release_cnt);
1074     -
1075     - trace_page_pool_inflight(pool, distance, hold_cnt, release_cnt);
1076     - return distance;
1077     -}
1078     + s32 inflight;
1079    
1080     -static bool __page_pool_safe_to_destroy(struct page_pool *pool)
1081     -{
1082     - s32 inflight = page_pool_inflight(pool);
1083     + inflight = _distance(hold_cnt, release_cnt);
1084    
1085     - /* The distance should not be able to become negative */
1086     + trace_page_pool_inflight(pool, inflight, hold_cnt, release_cnt);
1087     WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
1088    
1089     - return (inflight == 0);
1090     + return inflight;
1091     }
1092    
1093     /* Cleanup page_pool state from page */
1094     @@ -216,6 +211,7 @@ static void __page_pool_clean_page(struct page_pool *pool,
1095     struct page *page)
1096     {
1097     dma_addr_t dma;
1098     + int count;
1099    
1100     if (!(pool->p.flags & PP_FLAG_DMA_MAP))
1101     goto skip_dma_unmap;
1102     @@ -227,9 +223,11 @@ static void __page_pool_clean_page(struct page_pool *pool,
1103     DMA_ATTR_SKIP_CPU_SYNC);
1104     page->dma_addr = 0;
1105     skip_dma_unmap:
1106     - atomic_inc(&pool->pages_state_release_cnt);
1107     - trace_page_pool_state_release(pool, page,
1108     - atomic_read(&pool->pages_state_release_cnt));
1109     + /* This may be the last page returned, releasing the pool, so
1110     + * it is not safe to reference pool afterwards.
1111     + */
1112     + count = atomic_inc_return(&pool->pages_state_release_cnt);
1113     + trace_page_pool_state_release(pool, page, count);
1114     }
1115    
1116     /* unmap the page and clean our state */
1117     @@ -338,31 +336,10 @@ static void __page_pool_empty_ring(struct page_pool *pool)
1118     }
1119     }
1120    
1121     -static void __warn_in_flight(struct page_pool *pool)
1122     +static void page_pool_free(struct page_pool *pool)
1123     {
1124     - u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
1125     - u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
1126     - s32 distance;
1127     -
1128     - distance = _distance(hold_cnt, release_cnt);
1129     -
1130     - /* Drivers should fix this, but only problematic when DMA is used */
1131     - WARN(1, "Still in-flight pages:%d hold:%u released:%u",
1132     - distance, hold_cnt, release_cnt);
1133     -}
1134     -
1135     -void __page_pool_free(struct page_pool *pool)
1136     -{
1137     - /* Only last user actually free/release resources */
1138     - if (!page_pool_put(pool))
1139     - return;
1140     -
1141     - WARN(pool->alloc.count, "API usage violation");
1142     - WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
1143     -
1144     - /* Can happen due to forced shutdown */
1145     - if (!__page_pool_safe_to_destroy(pool))
1146     - __warn_in_flight(pool);
1147     + if (pool->disconnect)
1148     + pool->disconnect(pool);
1149    
1150     ptr_ring_cleanup(&pool->ring, NULL);
1151    
1152     @@ -371,12 +348,8 @@ void __page_pool_free(struct page_pool *pool)
1153    
1154     kfree(pool);
1155     }
1156     -EXPORT_SYMBOL(__page_pool_free);
1157    
1158     -/* Request to shutdown: release pages cached by page_pool, and check
1159     - * for in-flight pages
1160     - */
1161     -bool __page_pool_request_shutdown(struct page_pool *pool)
1162     +static void page_pool_scrub(struct page_pool *pool)
1163     {
1164     struct page *page;
1165    
1166     @@ -393,7 +366,64 @@ bool __page_pool_request_shutdown(struct page_pool *pool)
1167     * be in-flight.
1168     */
1169     __page_pool_empty_ring(pool);
1170     +}
1171     +
1172     +static int page_pool_release(struct page_pool *pool)
1173     +{
1174     + int inflight;
1175     +
1176     + page_pool_scrub(pool);
1177     + inflight = page_pool_inflight(pool);
1178     + if (!inflight)
1179     + page_pool_free(pool);
1180     +
1181     + return inflight;
1182     +}
1183     +
1184     +static void page_pool_release_retry(struct work_struct *wq)
1185     +{
1186     + struct delayed_work *dwq = to_delayed_work(wq);
1187     + struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
1188     + int inflight;
1189     +
1190     + inflight = page_pool_release(pool);
1191     + if (!inflight)
1192     + return;
1193     +
1194     + /* Periodic warning */
1195     + if (time_after_eq(jiffies, pool->defer_warn)) {
1196     + int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
1197     +
1198     + pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
1199     + __func__, inflight, sec);
1200     + pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1201     + }
1202     +
1203     + /* Still not ready to be disconnected, retry later */
1204     + schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1205     +}
1206     +
1207     +void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
1208     +{
1209     + refcount_inc(&pool->user_cnt);
1210     + pool->disconnect = disconnect;
1211     +}
1212     +
1213     +void page_pool_destroy(struct page_pool *pool)
1214     +{
1215     + if (!pool)
1216     + return;
1217     +
1218     + if (!page_pool_put(pool))
1219     + return;
1220     +
1221     + if (!page_pool_release(pool))
1222     + return;
1223     +
1224     + pool->defer_start = jiffies;
1225     + pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1226    
1227     - return __page_pool_safe_to_destroy(pool);
1228     + INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
1229     + schedule_delayed_work(&pool->release_dw, DEFER_TIME);
1230     }
1231     -EXPORT_SYMBOL(__page_pool_request_shutdown);
1232     +EXPORT_SYMBOL(page_pool_destroy);
1233     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1234     index 867e61df00db..973a71f4bc89 100644
1235     --- a/net/core/skbuff.c
1236     +++ b/net/core/skbuff.c
1237     @@ -5484,7 +5484,7 @@ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
1238     * Returns 0 on success, -errno otherwise.
1239     */
1240     int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
1241     - int mac_len)
1242     + int mac_len, bool ethernet)
1243     {
1244     struct mpls_shim_hdr *lse;
1245     int err;
1246     @@ -5515,7 +5515,7 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
1247     lse->label_stack_entry = mpls_lse;
1248     skb_postpush_rcsum(skb, lse, MPLS_HLEN);
1249    
1250     - if (skb->dev && skb->dev->type == ARPHRD_ETHER)
1251     + if (ethernet)
1252     skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
1253     skb->protocol = mpls_proto;
1254    
1255     @@ -5529,12 +5529,14 @@ EXPORT_SYMBOL_GPL(skb_mpls_push);
1256     * @skb: buffer
1257     * @next_proto: ethertype of header after popped MPLS header
1258     * @mac_len: length of the MAC header
1259     + * @ethernet: flag to indicate if ethernet header is present in packet
1260     *
1261     * Expects skb->data at mac header.
1262     *
1263     * Returns 0 on success, -errno otherwise.
1264     */
1265     -int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
1266     +int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
1267     + bool ethernet)
1268     {
1269     int err;
1270    
1271     @@ -5553,7 +5555,7 @@ int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
1272     skb_reset_mac_header(skb);
1273     skb_set_network_header(skb, mac_len);
1274    
1275     - if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
1276     + if (ethernet) {
1277     struct ethhdr *hdr;
1278    
1279     /* use mpls_hdr() to get ethertype to account for VLANs. */
1280     diff --git a/net/core/xdp.c b/net/core/xdp.c
1281     index d7bf62ffbb5e..b3f463c6543f 100644
1282     --- a/net/core/xdp.c
1283     +++ b/net/core/xdp.c
1284     @@ -70,10 +70,6 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
1285    
1286     xa = container_of(rcu, struct xdp_mem_allocator, rcu);
1287    
1288     - /* Allocator have indicated safe to remove before this is called */
1289     - if (xa->mem.type == MEM_TYPE_PAGE_POOL)
1290     - page_pool_free(xa->page_pool);
1291     -
1292     /* Allow this ID to be reused */
1293     ida_simple_remove(&mem_id_pool, xa->mem.id);
1294    
1295     @@ -85,62 +81,57 @@ static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
1296     kfree(xa);
1297     }
1298    
1299     -static bool __mem_id_disconnect(int id, bool force)
1300     +static void mem_xa_remove(struct xdp_mem_allocator *xa)
1301     +{
1302     + trace_mem_disconnect(xa);
1303     +
1304     + if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
1305     + call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
1306     +}
1307     +
1308     +static void mem_allocator_disconnect(void *allocator)
1309     {
1310     struct xdp_mem_allocator *xa;
1311     - bool safe_to_remove = true;
1312     + struct rhashtable_iter iter;
1313    
1314     mutex_lock(&mem_id_lock);
1315    
1316     - xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
1317     - if (!xa) {
1318     - mutex_unlock(&mem_id_lock);
1319     - WARN(1, "Request remove non-existing id(%d), driver bug?", id);
1320     - return true;
1321     - }
1322     - xa->disconnect_cnt++;
1323     + rhashtable_walk_enter(mem_id_ht, &iter);
1324     + do {
1325     + rhashtable_walk_start(&iter);
1326    
1327     - /* Detects in-flight packet-pages for page_pool */
1328     - if (xa->mem.type == MEM_TYPE_PAGE_POOL)
1329     - safe_to_remove = page_pool_request_shutdown(xa->page_pool);
1330     + while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
1331     + if (xa->allocator == allocator)
1332     + mem_xa_remove(xa);
1333     + }
1334    
1335     - trace_mem_disconnect(xa, safe_to_remove, force);
1336     + rhashtable_walk_stop(&iter);
1337    
1338     - if ((safe_to_remove || force) &&
1339     - !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
1340     - call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
1341     + } while (xa == ERR_PTR(-EAGAIN));
1342     + rhashtable_walk_exit(&iter);
1343    
1344     mutex_unlock(&mem_id_lock);
1345     - return (safe_to_remove|force);
1346     }
1347    
1348     -#define DEFER_TIME (msecs_to_jiffies(1000))
1349     -#define DEFER_WARN_INTERVAL (30 * HZ)
1350     -#define DEFER_MAX_RETRIES 120
1351     -
1352     -static void mem_id_disconnect_defer_retry(struct work_struct *wq)
1353     +static void mem_id_disconnect(int id)
1354     {
1355     - struct delayed_work *dwq = to_delayed_work(wq);
1356     - struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq);
1357     - bool force = false;
1358     + struct xdp_mem_allocator *xa;
1359    
1360     - if (xa->disconnect_cnt > DEFER_MAX_RETRIES)
1361     - force = true;
1362     + mutex_lock(&mem_id_lock);
1363    
1364     - if (__mem_id_disconnect(xa->mem.id, force))
1365     + xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
1366     + if (!xa) {
1367     + mutex_unlock(&mem_id_lock);
1368     + WARN(1, "Request remove non-existing id(%d), driver bug?", id);
1369     return;
1370     + }
1371    
1372     - /* Periodic warning */
1373     - if (time_after_eq(jiffies, xa->defer_warn)) {
1374     - int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ;
1375     + trace_mem_disconnect(xa);
1376    
1377     - pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n",
1378     - __func__, xa->mem.id, xa->disconnect_cnt, sec);
1379     - xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1380     - }
1381     + if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
1382     + call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
1383    
1384     - /* Still not ready to be disconnected, retry later */
1385     - schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
1386     + mutex_unlock(&mem_id_lock);
1387     }
1388    
1389     void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
1390     @@ -153,38 +144,21 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
1391     return;
1392     }
1393    
1394     - if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL &&
1395     - xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) {
1396     - return;
1397     - }
1398     -
1399     if (id == 0)
1400     return;
1401    
1402     - if (__mem_id_disconnect(id, false))
1403     - return;
1404     -
1405     - /* Could not disconnect, defer new disconnect attempt to later */
1406     - mutex_lock(&mem_id_lock);
1407     + if (xdp_rxq->mem.type == MEM_TYPE_ZERO_COPY)
1408     + return mem_id_disconnect(id);
1409    
1410     - xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
1411     - if (!xa) {
1412     - mutex_unlock(&mem_id_lock);
1413     - return;
1414     + if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
1415     + rcu_read_lock();
1416     + xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
1417     + page_pool_destroy(xa->page_pool);
1418     + rcu_read_unlock();
1419     }
1420     - xa->defer_start = jiffies;
1421     - xa->defer_warn = jiffies + DEFER_WARN_INTERVAL;
1422     -
1423     - INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry);
1424     - mutex_unlock(&mem_id_lock);
1425     - schedule_delayed_work(&xa->defer_wq, DEFER_TIME);
1426     }
1427     EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
1428    
1429     -/* This unregister operation will also cleanup and destroy the
1430     - * allocator. The page_pool_free() operation is first called when it's
1431     - * safe to remove, possibly deferred to a workqueue.
1432     - */
1433     void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
1434     {
1435     /* Simplify driver cleanup code paths, allow unreg "unused" */
1436     @@ -371,7 +345,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
1437     }
1438    
1439     if (type == MEM_TYPE_PAGE_POOL)
1440     - page_pool_get(xdp_alloc->page_pool);
1441     + page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
1442    
1443     mutex_unlock(&mem_id_lock);
1444    
1445     @@ -402,15 +376,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
1446     /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
1447     xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
1448     page = virt_to_head_page(data);
1449     - if (likely(xa)) {
1450     - napi_direct &= !xdp_return_frame_no_direct();
1451     - page_pool_put_page(xa->page_pool, page, napi_direct);
1452     - } else {
1453     - /* Hopefully stack show who to blame for late return */
1454     - WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id);
1455     - trace_mem_return_failed(mem, page);
1456     - put_page(page);
1457     - }
1458     + napi_direct &= !xdp_return_frame_no_direct();
1459     + page_pool_put_page(xa->page_pool, page, napi_direct);
1460     rcu_read_unlock();
1461     break;
1462     case MEM_TYPE_PAGE_SHARED:
1463     diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
1464     index 25aab672fc99..1e5e08cc0bfc 100644
1465     --- a/net/dccp/ipv6.c
1466     +++ b/net/dccp/ipv6.c
1467     @@ -210,7 +210,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
1468     final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
1469     rcu_read_unlock();
1470    
1471     - dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1472     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
1473     if (IS_ERR(dst)) {
1474     err = PTR_ERR(dst);
1475     dst = NULL;
1476     @@ -282,7 +282,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
1477     security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
1478    
1479     /* sk = NULL, but it is safe for now. RST socket required. */
1480     - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
1481     + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
1482     if (!IS_ERR(dst)) {
1483     skb_dst_set(skb, dst);
1484     ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0);
1485     @@ -912,7 +912,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1486     opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
1487     final_p = fl6_update_dst(&fl6, opt, &final);
1488    
1489     - dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1490     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
1491     if (IS_ERR(dst)) {
1492     err = PTR_ERR(dst);
1493     goto failure;
1494     diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
1495     index f509b495451a..b01e1bae4ddc 100644
1496     --- a/net/hsr/hsr_device.c
1497     +++ b/net/hsr/hsr_device.c
1498     @@ -227,8 +227,13 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1499     struct hsr_port *master;
1500    
1501     master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
1502     - skb->dev = master->dev;
1503     - hsr_forward_skb(skb, master);
1504     + if (master) {
1505     + skb->dev = master->dev;
1506     + hsr_forward_skb(skb, master);
1507     + } else {
1508     + atomic_long_inc(&dev->tx_dropped);
1509     + dev_kfree_skb_any(skb);
1510     + }
1511     return NETDEV_TX_OK;
1512     }
1513    
1514     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
1515     index a4b5bd4d2c89..e4632bd2026d 100644
1516     --- a/net/ipv4/devinet.c
1517     +++ b/net/ipv4/devinet.c
1518     @@ -1496,11 +1496,6 @@ skip:
1519     }
1520     }
1521    
1522     -static bool inetdev_valid_mtu(unsigned int mtu)
1523     -{
1524     - return mtu >= IPV4_MIN_MTU;
1525     -}
1526     -
1527     static void inetdev_send_gratuitous_arp(struct net_device *dev,
1528     struct in_device *in_dev)
1529    
1530     diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
1531     index 44bfeecac33e..5fd6e8ed02b5 100644
1532     --- a/net/ipv4/gre_demux.c
1533     +++ b/net/ipv4/gre_demux.c
1534     @@ -127,7 +127,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
1535     if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
1536     return -EINVAL;
1537    
1538     - ershdr = (struct erspan_base_hdr *)options;
1539     + ershdr = (struct erspan_base_hdr *)(skb->data + nhs + hdr_len);
1540     tpi->key = cpu_to_be32(get_session_id(ershdr));
1541     }
1542    
1543     diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1544     index 3d8baaaf7086..b268ee1c1b44 100644
1545     --- a/net/ipv4/ip_output.c
1546     +++ b/net/ipv4/ip_output.c
1547     @@ -1258,15 +1258,18 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1548     cork->addr = ipc->addr;
1549     }
1550    
1551     - /*
1552     - * We steal reference to this route, caller should not release it
1553     - */
1554     - *rtp = NULL;
1555     cork->fragsize = ip_sk_use_pmtu(sk) ?
1556     - dst_mtu(&rt->dst) : rt->dst.dev->mtu;
1557     + dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1558     +
1559     + if (!inetdev_valid_mtu(cork->fragsize))
1560     + return -ENETUNREACH;
1561    
1562     cork->gso_size = ipc->gso_size;
1563     +
1564     cork->dst = &rt->dst;
1565     + /* We stole this route, caller should not release it. */
1566     + *rtp = NULL;
1567     +
1568     cork->length = 0;
1569     cork->ttl = ipc->ttl;
1570     cork->tos = ipc->tos;
1571     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1572     index 0488607c5cd3..762edd800d78 100644
1573     --- a/net/ipv4/tcp_output.c
1574     +++ b/net/ipv4/tcp_output.c
1575     @@ -755,8 +755,9 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
1576     min_t(unsigned int, eff_sacks,
1577     (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
1578     TCPOLEN_SACK_PERBLOCK);
1579     - size += TCPOLEN_SACK_BASE_ALIGNED +
1580     - opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
1581     + if (likely(opts->num_sack_blocks))
1582     + size += TCPOLEN_SACK_BASE_ALIGNED +
1583     + opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
1584     }
1585    
1586     return size;
1587     diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
1588     index 2fc079284ca4..ea00ce3d4117 100644
1589     --- a/net/ipv6/addrconf_core.c
1590     +++ b/net/ipv6/addrconf_core.c
1591     @@ -129,11 +129,12 @@ int inet6addr_validator_notifier_call_chain(unsigned long val, void *v)
1592     }
1593     EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain);
1594    
1595     -static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
1596     - struct dst_entry **u2,
1597     - struct flowi6 *u3)
1598     +static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net,
1599     + const struct sock *sk,
1600     + struct flowi6 *fl6,
1601     + const struct in6_addr *final_dst)
1602     {
1603     - return -EAFNOSUPPORT;
1604     + return ERR_PTR(-EAFNOSUPPORT);
1605     }
1606    
1607     static int eafnosupport_ipv6_route_input(struct sk_buff *skb)
1608     @@ -190,7 +191,7 @@ static int eafnosupport_ip6_del_rt(struct net *net, struct fib6_info *rt)
1609     }
1610    
1611     const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
1612     - .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
1613     + .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow,
1614     .ipv6_route_input = eafnosupport_ipv6_route_input,
1615     .fib6_get_table = eafnosupport_fib6_get_table,
1616     .fib6_table_lookup = eafnosupport_fib6_table_lookup,
1617     diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
1618     index ef37e0574f54..14ac1d911287 100644
1619     --- a/net/ipv6/af_inet6.c
1620     +++ b/net/ipv6/af_inet6.c
1621     @@ -765,7 +765,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
1622     &final);
1623     rcu_read_unlock();
1624    
1625     - dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1626     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
1627     if (IS_ERR(dst)) {
1628     sk->sk_route_caps = 0;
1629     sk->sk_err_soft = -PTR_ERR(dst);
1630     @@ -946,7 +946,7 @@ static int ipv6_route_input(struct sk_buff *skb)
1631     static const struct ipv6_stub ipv6_stub_impl = {
1632     .ipv6_sock_mc_join = ipv6_sock_mc_join,
1633     .ipv6_sock_mc_drop = ipv6_sock_mc_drop,
1634     - .ipv6_dst_lookup = ip6_dst_lookup,
1635     + .ipv6_dst_lookup_flow = ip6_dst_lookup_flow,
1636     .ipv6_route_input = ipv6_route_input,
1637     .fib6_get_table = fib6_get_table,
1638     .fib6_table_lookup = fib6_table_lookup,
1639     diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1640     index 96f939248d2f..390bedde21a5 100644
1641     --- a/net/ipv6/datagram.c
1642     +++ b/net/ipv6/datagram.c
1643     @@ -85,7 +85,7 @@ int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
1644     final_p = fl6_update_dst(&fl6, opt, &final);
1645     rcu_read_unlock();
1646    
1647     - dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1648     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
1649     if (IS_ERR(dst)) {
1650     err = PTR_ERR(dst);
1651     goto out;
1652     diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
1653     index 0a0945a5b30d..fe9cb8d1adca 100644
1654     --- a/net/ipv6/inet6_connection_sock.c
1655     +++ b/net/ipv6/inet6_connection_sock.c
1656     @@ -48,7 +48,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
1657     fl6->flowi6_uid = sk->sk_uid;
1658     security_req_classify_flow(req, flowi6_to_flowi(fl6));
1659    
1660     - dst = ip6_dst_lookup_flow(sk, fl6, final_p);
1661     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
1662     if (IS_ERR(dst))
1663     return NULL;
1664    
1665     @@ -103,7 +103,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
1666    
1667     dst = __inet6_csk_dst_check(sk, np->dst_cookie);
1668     if (!dst) {
1669     - dst = ip6_dst_lookup_flow(sk, fl6, final_p);
1670     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
1671    
1672     if (!IS_ERR(dst))
1673     ip6_dst_store(sk, dst, NULL, NULL);
1674     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1675     index 71827b56c006..78d495581d69 100644
1676     --- a/net/ipv6/ip6_output.c
1677     +++ b/net/ipv6/ip6_output.c
1678     @@ -1144,19 +1144,19 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1679     * It returns a valid dst pointer on success, or a pointer encoded
1680     * error code.
1681     */
1682     -struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1683     +struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
1684     const struct in6_addr *final_dst)
1685     {
1686     struct dst_entry *dst = NULL;
1687     int err;
1688    
1689     - err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1690     + err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
1691     if (err)
1692     return ERR_PTR(err);
1693     if (final_dst)
1694     fl6->daddr = *final_dst;
1695    
1696     - return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1697     + return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
1698     }
1699     EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1700    
1701     @@ -1188,7 +1188,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1702     if (dst)
1703     return dst;
1704    
1705     - dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1706     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
1707     if (connected && !IS_ERR(dst))
1708     ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
1709    
1710     diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
1711     index a77f6b7d3a7c..dfe5e603ffe1 100644
1712     --- a/net/ipv6/raw.c
1713     +++ b/net/ipv6/raw.c
1714     @@ -925,7 +925,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1715    
1716     fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1717    
1718     - dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1719     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
1720     if (IS_ERR(dst)) {
1721     err = PTR_ERR(dst);
1722     goto out;
1723     diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
1724     index 16632e02e9b0..30915f6f31e3 100644
1725     --- a/net/ipv6/syncookies.c
1726     +++ b/net/ipv6/syncookies.c
1727     @@ -235,7 +235,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
1728     fl6.flowi6_uid = sk->sk_uid;
1729     security_req_classify_flow(req, flowi6_to_flowi(&fl6));
1730    
1731     - dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1732     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
1733     if (IS_ERR(dst))
1734     goto out_free;
1735     }
1736     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1737     index 4804b6dc5e65..b42fa41cfceb 100644
1738     --- a/net/ipv6/tcp_ipv6.c
1739     +++ b/net/ipv6/tcp_ipv6.c
1740     @@ -275,7 +275,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1741    
1742     security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1743    
1744     - dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1745     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
1746     if (IS_ERR(dst)) {
1747     err = PTR_ERR(dst);
1748     goto failure;
1749     @@ -906,7 +906,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
1750     * Underlying function will use this to retrieve the network
1751     * namespace
1752     */
1753     - dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
1754     + dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
1755     if (!IS_ERR(dst)) {
1756     skb_dst_set(buff, dst);
1757     ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
1758     diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
1759     index 802f19aba7e3..d148766f40d1 100644
1760     --- a/net/l2tp/l2tp_ip6.c
1761     +++ b/net/l2tp/l2tp_ip6.c
1762     @@ -615,7 +615,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1763    
1764     fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1765    
1766     - dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
1767     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
1768     if (IS_ERR(dst)) {
1769     err = PTR_ERR(dst);
1770     goto out;
1771     diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
1772     index c312741df2ce..4701edffb1f7 100644
1773     --- a/net/mpls/af_mpls.c
1774     +++ b/net/mpls/af_mpls.c
1775     @@ -617,16 +617,15 @@ static struct net_device *inet6_fib_lookup_dev(struct net *net,
1776     struct net_device *dev;
1777     struct dst_entry *dst;
1778     struct flowi6 fl6;
1779     - int err;
1780    
1781     if (!ipv6_stub)
1782     return ERR_PTR(-EAFNOSUPPORT);
1783    
1784     memset(&fl6, 0, sizeof(fl6));
1785     memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
1786     - err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
1787     - if (err)
1788     - return ERR_PTR(err);
1789     + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
1790     + if (IS_ERR(dst))
1791     + return ERR_CAST(dst);
1792    
1793     dev = dst->dev;
1794     dev_hold(dev);
1795     diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
1796     index e25dab8128db..5f6037695dee 100644
1797     --- a/net/netfilter/nf_tables_offload.c
1798     +++ b/net/netfilter/nf_tables_offload.c
1799     @@ -455,7 +455,7 @@ static int nft_offload_netdev_event(struct notifier_block *this,
1800     return NOTIFY_DONE;
1801     }
1802    
1803     -static struct flow_indr_block_ing_entry block_ing_entry = {
1804     +static struct flow_indr_block_entry block_ing_entry = {
1805     .cb = nft_indr_block_cb,
1806     .list = LIST_HEAD_INIT(block_ing_entry.list),
1807     };
1808     @@ -472,13 +472,13 @@ int nft_offload_init(void)
1809     if (err < 0)
1810     return err;
1811    
1812     - flow_indr_add_block_ing_cb(&block_ing_entry);
1813     + flow_indr_add_block_cb(&block_ing_entry);
1814    
1815     return 0;
1816     }
1817    
1818     void nft_offload_exit(void)
1819     {
1820     - flow_indr_del_block_ing_cb(&block_ing_entry);
1821     + flow_indr_del_block_cb(&block_ing_entry);
1822     unregister_netdevice_notifier(&nft_offload_netdev_notifier);
1823     }
1824     diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
1825     index 1c77f520f474..99352f09deaa 100644
1826     --- a/net/openvswitch/actions.c
1827     +++ b/net/openvswitch/actions.c
1828     @@ -166,7 +166,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
1829     int err;
1830    
1831     err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
1832     - skb->mac_len);
1833     + skb->mac_len,
1834     + ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
1835     if (err)
1836     return err;
1837    
1838     @@ -179,7 +180,8 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
1839     {
1840     int err;
1841    
1842     - err = skb_mpls_pop(skb, ethertype, skb->mac_len);
1843     + err = skb_mpls_pop(skb, ethertype, skb->mac_len,
1844     + ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET);
1845     if (err)
1846     return err;
1847    
1848     diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
1849     index 05249eb45082..283e8f9a5fd2 100644
1850     --- a/net/openvswitch/conntrack.c
1851     +++ b/net/openvswitch/conntrack.c
1852     @@ -903,6 +903,17 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
1853     }
1854     err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
1855    
1856     + if (err == NF_ACCEPT &&
1857     + ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
1858     + if (maniptype == NF_NAT_MANIP_SRC)
1859     + maniptype = NF_NAT_MANIP_DST;
1860     + else
1861     + maniptype = NF_NAT_MANIP_SRC;
1862     +
1863     + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
1864     + maniptype);
1865     + }
1866     +
1867     /* Mark NAT done if successful and update the flow key. */
1868     if (err == NF_ACCEPT)
1869     ovs_nat_update_key(key, skb, maniptype);
1870     diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
1871     index fcc46025e790..f3232a00970f 100644
1872     --- a/net/sched/act_ct.c
1873     +++ b/net/sched/act_ct.c
1874     @@ -329,6 +329,7 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
1875     bool commit)
1876     {
1877     #if IS_ENABLED(CONFIG_NF_NAT)
1878     + int err;
1879     enum nf_nat_manip_type maniptype;
1880    
1881     if (!(ct_action & TCA_CT_ACT_NAT))
1882     @@ -359,7 +360,17 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
1883     return NF_ACCEPT;
1884     }
1885    
1886     - return ct_nat_execute(skb, ct, ctinfo, range, maniptype);
1887     + err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
1888     + if (err == NF_ACCEPT &&
1889     + ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
1890     + if (maniptype == NF_NAT_MANIP_SRC)
1891     + maniptype = NF_NAT_MANIP_DST;
1892     + else
1893     + maniptype = NF_NAT_MANIP_SRC;
1894     +
1895     + err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
1896     + }
1897     + return err;
1898     #else
1899     return NF_ACCEPT;
1900     #endif
1901     diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
1902     index 4cf6c553bb0b..db570d2bd0e0 100644
1903     --- a/net/sched/act_mpls.c
1904     +++ b/net/sched/act_mpls.c
1905     @@ -1,6 +1,7 @@
1906     // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
1907     /* Copyright (C) 2019 Netronome Systems, Inc. */
1908    
1909     +#include <linux/if_arp.h>
1910     #include <linux/init.h>
1911     #include <linux/kernel.h>
1912     #include <linux/module.h>
1913     @@ -76,12 +77,14 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
1914    
1915     switch (p->tcfm_action) {
1916     case TCA_MPLS_ACT_POP:
1917     - if (skb_mpls_pop(skb, p->tcfm_proto, mac_len))
1918     + if (skb_mpls_pop(skb, p->tcfm_proto, mac_len,
1919     + skb->dev && skb->dev->type == ARPHRD_ETHER))
1920     goto drop;
1921     break;
1922     case TCA_MPLS_ACT_PUSH:
1923     new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
1924     - if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len))
1925     + if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
1926     + skb->dev && skb->dev->type == ARPHRD_ETHER))
1927     goto drop;
1928     break;
1929     case TCA_MPLS_ACT_MODIFY:
1930     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
1931     index 20d60b8fcb70..6a0eacafdb19 100644
1932     --- a/net/sched/cls_api.c
1933     +++ b/net/sched/cls_api.c
1934     @@ -626,15 +626,15 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
1935     static int tcf_block_setup(struct tcf_block *block,
1936     struct flow_block_offload *bo);
1937    
1938     -static void tc_indr_block_ing_cmd(struct net_device *dev,
1939     - struct tcf_block *block,
1940     - flow_indr_block_bind_cb_t *cb,
1941     - void *cb_priv,
1942     - enum flow_block_command command)
1943     +static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
1944     + flow_indr_block_bind_cb_t *cb, void *cb_priv,
1945     + enum flow_block_command command, bool ingress)
1946     {
1947     struct flow_block_offload bo = {
1948     .command = command,
1949     - .binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
1950     + .binder_type = ingress ?
1951     + FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS :
1952     + FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
1953     .net = dev_net(dev),
1954     .block_shared = tcf_block_non_null_shared(block),
1955     };
1956     @@ -652,9 +652,10 @@ static void tc_indr_block_ing_cmd(struct net_device *dev,
1957     up_write(&block->cb_lock);
1958     }
1959    
1960     -static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
1961     +static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress)
1962     {
1963     const struct Qdisc_class_ops *cops;
1964     + const struct Qdisc_ops *ops;
1965     struct Qdisc *qdisc;
1966    
1967     if (!dev_ingress_queue(dev))
1968     @@ -664,24 +665,37 @@ static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
1969     if (!qdisc)
1970     return NULL;
1971    
1972     - cops = qdisc->ops->cl_ops;
1973     + ops = qdisc->ops;
1974     + if (!ops)
1975     + return NULL;
1976     +
1977     + if (!ingress && !strcmp("ingress", ops->id))
1978     + return NULL;
1979     +
1980     + cops = ops->cl_ops;
1981     if (!cops)
1982     return NULL;
1983    
1984     if (!cops->tcf_block)
1985     return NULL;
1986    
1987     - return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
1988     + return cops->tcf_block(qdisc,
1989     + ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS,
1990     + NULL);
1991     }
1992    
1993     -static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
1994     - flow_indr_block_bind_cb_t *cb,
1995     - void *cb_priv,
1996     - enum flow_block_command command)
1997     +static void tc_indr_block_get_and_cmd(struct net_device *dev,
1998     + flow_indr_block_bind_cb_t *cb,
1999     + void *cb_priv,
2000     + enum flow_block_command command)
2001     {
2002     - struct tcf_block *block = tc_dev_ingress_block(dev);
2003     + struct tcf_block *block;
2004     +
2005     + block = tc_dev_block(dev, true);
2006     + tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
2007    
2008     - tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
2009     + block = tc_dev_block(dev, false);
2010     + tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
2011     }
2012    
2013     static void tc_indr_block_call(struct tcf_block *block,
2014     @@ -2721,13 +2735,19 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2015     struct netlink_ext_ack *extack)
2016     {
2017     const struct tcf_proto_ops *ops;
2018     + char name[IFNAMSIZ];
2019     void *tmplt_priv;
2020    
2021     /* If kind is not set, user did not specify template. */
2022     if (!tca[TCA_KIND])
2023     return 0;
2024    
2025     - ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
2026     + if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2027     + NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2028     + return -EINVAL;
2029     + }
2030     +
2031     + ops = tcf_proto_lookup_ops(name, true, extack);
2032     if (IS_ERR(ops))
2033     return PTR_ERR(ops);
2034     if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2035     @@ -3626,9 +3646,9 @@ static struct pernet_operations tcf_net_ops = {
2036     .size = sizeof(struct tcf_net),
2037     };
2038    
2039     -static struct flow_indr_block_ing_entry block_ing_entry = {
2040     - .cb = tc_indr_block_get_and_ing_cmd,
2041     - .list = LIST_HEAD_INIT(block_ing_entry.list),
2042     +static struct flow_indr_block_entry block_entry = {
2043     + .cb = tc_indr_block_get_and_cmd,
2044     + .list = LIST_HEAD_INIT(block_entry.list),
2045     };
2046    
2047     static int __init tc_filter_init(void)
2048     @@ -3643,7 +3663,7 @@ static int __init tc_filter_init(void)
2049     if (err)
2050     goto err_register_pernet_subsys;
2051    
2052     - flow_indr_add_block_ing_cb(&block_ing_entry);
2053     + flow_indr_add_block_cb(&block_entry);
2054    
2055     rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
2056     RTNL_FLAG_DOIT_UNLOCKED);
2057     diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
2058     index 74221e3351c3..4ac110bf19c5 100644
2059     --- a/net/sched/cls_flower.c
2060     +++ b/net/sched/cls_flower.c
2061     @@ -54,8 +54,13 @@ struct fl_flow_key {
2062     struct flow_dissector_key_ip ip;
2063     struct flow_dissector_key_ip enc_ip;
2064     struct flow_dissector_key_enc_opts enc_opts;
2065     - struct flow_dissector_key_ports tp_min;
2066     - struct flow_dissector_key_ports tp_max;
2067     + union {
2068     + struct flow_dissector_key_ports tp;
2069     + struct {
2070     + struct flow_dissector_key_ports tp_min;
2071     + struct flow_dissector_key_ports tp_max;
2072     + };
2073     + } tp_range;
2074     struct flow_dissector_key_ct ct;
2075     } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
2076    
2077     @@ -198,19 +203,19 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
2078     {
2079     __be16 min_mask, max_mask, min_val, max_val;
2080    
2081     - min_mask = htons(filter->mask->key.tp_min.dst);
2082     - max_mask = htons(filter->mask->key.tp_max.dst);
2083     - min_val = htons(filter->key.tp_min.dst);
2084     - max_val = htons(filter->key.tp_max.dst);
2085     + min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
2086     + max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
2087     + min_val = htons(filter->key.tp_range.tp_min.dst);
2088     + max_val = htons(filter->key.tp_range.tp_max.dst);
2089    
2090     if (min_mask && max_mask) {
2091     - if (htons(key->tp.dst) < min_val ||
2092     - htons(key->tp.dst) > max_val)
2093     + if (htons(key->tp_range.tp.dst) < min_val ||
2094     + htons(key->tp_range.tp.dst) > max_val)
2095     return false;
2096    
2097     /* skb does not have min and max values */
2098     - mkey->tp_min.dst = filter->mkey.tp_min.dst;
2099     - mkey->tp_max.dst = filter->mkey.tp_max.dst;
2100     + mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
2101     + mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
2102     }
2103     return true;
2104     }
2105     @@ -221,19 +226,19 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
2106     {
2107     __be16 min_mask, max_mask, min_val, max_val;
2108    
2109     - min_mask = htons(filter->mask->key.tp_min.src);
2110     - max_mask = htons(filter->mask->key.tp_max.src);
2111     - min_val = htons(filter->key.tp_min.src);
2112     - max_val = htons(filter->key.tp_max.src);
2113     + min_mask = htons(filter->mask->key.tp_range.tp_min.src);
2114     + max_mask = htons(filter->mask->key.tp_range.tp_max.src);
2115     + min_val = htons(filter->key.tp_range.tp_min.src);
2116     + max_val = htons(filter->key.tp_range.tp_max.src);
2117    
2118     if (min_mask && max_mask) {
2119     - if (htons(key->tp.src) < min_val ||
2120     - htons(key->tp.src) > max_val)
2121     + if (htons(key->tp_range.tp.src) < min_val ||
2122     + htons(key->tp_range.tp.src) > max_val)
2123     return false;
2124    
2125     /* skb does not have min and max values */
2126     - mkey->tp_min.src = filter->mkey.tp_min.src;
2127     - mkey->tp_max.src = filter->mkey.tp_max.src;
2128     + mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
2129     + mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
2130     }
2131     return true;
2132     }
2133     @@ -715,23 +720,25 @@ static void fl_set_key_val(struct nlattr **tb,
2134     static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
2135     struct fl_flow_key *mask)
2136     {
2137     - fl_set_key_val(tb, &key->tp_min.dst,
2138     - TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
2139     - TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
2140     - fl_set_key_val(tb, &key->tp_max.dst,
2141     - TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
2142     - TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
2143     - fl_set_key_val(tb, &key->tp_min.src,
2144     - TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
2145     - TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
2146     - fl_set_key_val(tb, &key->tp_max.src,
2147     - TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
2148     - TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
2149     -
2150     - if ((mask->tp_min.dst && mask->tp_max.dst &&
2151     - htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
2152     - (mask->tp_min.src && mask->tp_max.src &&
2153     - htons(key->tp_max.src) <= htons(key->tp_min.src)))
2154     + fl_set_key_val(tb, &key->tp_range.tp_min.dst,
2155     + TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
2156     + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
2157     + fl_set_key_val(tb, &key->tp_range.tp_max.dst,
2158     + TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
2159     + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
2160     + fl_set_key_val(tb, &key->tp_range.tp_min.src,
2161     + TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
2162     + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
2163     + fl_set_key_val(tb, &key->tp_range.tp_max.src,
2164     + TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
2165     + TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
2166     +
2167     + if ((mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
2168     + htons(key->tp_range.tp_max.dst) <=
2169     + htons(key->tp_range.tp_min.dst)) ||
2170     + (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
2171     + htons(key->tp_range.tp_max.src) <=
2172     + htons(key->tp_range.tp_min.src)))
2173     return -EINVAL;
2174    
2175     return 0;
2176     @@ -1320,9 +1327,10 @@ static void fl_init_dissector(struct flow_dissector *dissector,
2177     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
2178     FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2179     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
2180     - if (FL_KEY_IS_MASKED(mask, tp) ||
2181     - FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
2182     - FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
2183     + FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2184     + FLOW_DISSECTOR_KEY_PORTS, tp);
2185     + FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2186     + FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
2187     FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2188     FLOW_DISSECTOR_KEY_IP, ip);
2189     FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2190     @@ -1371,8 +1379,10 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
2191    
2192     fl_mask_copy(newmask, mask);
2193    
2194     - if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
2195     - (newmask->key.tp_min.src && newmask->key.tp_max.src))
2196     + if ((newmask->key.tp_range.tp_min.dst &&
2197     + newmask->key.tp_range.tp_max.dst) ||
2198     + (newmask->key.tp_range.tp_min.src &&
2199     + newmask->key.tp_range.tp_max.src))
2200     newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2201    
2202     err = fl_init_mask_hashtable(newmask);
2203     @@ -1970,18 +1980,22 @@ static int fl_dump_key_val(struct sk_buff *skb,
2204     static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2205     struct fl_flow_key *mask)
2206     {
2207     - if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
2208     - &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
2209     - sizeof(key->tp_min.dst)) ||
2210     - fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
2211     - &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
2212     - sizeof(key->tp_max.dst)) ||
2213     - fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
2214     - &mask->tp_min.src, TCA_FLOWER_UNSPEC,
2215     - sizeof(key->tp_min.src)) ||
2216     - fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
2217     - &mask->tp_max.src, TCA_FLOWER_UNSPEC,
2218     - sizeof(key->tp_max.src)))
2219     + if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2220     + TCA_FLOWER_KEY_PORT_DST_MIN,
2221     + &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2222     + sizeof(key->tp_range.tp_min.dst)) ||
2223     + fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2224     + TCA_FLOWER_KEY_PORT_DST_MAX,
2225     + &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2226     + sizeof(key->tp_range.tp_max.dst)) ||
2227     + fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2228     + TCA_FLOWER_KEY_PORT_SRC_MIN,
2229     + &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2230     + sizeof(key->tp_range.tp_min.src)) ||
2231     + fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2232     + TCA_FLOWER_KEY_PORT_SRC_MAX,
2233     + &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2234     + sizeof(key->tp_range.tp_max.src)))
2235     return -1;
2236    
2237     return 0;
2238     diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
2239     index 278c0b2dc523..e79f1afe0cfd 100644
2240     --- a/net/sched/sch_mq.c
2241     +++ b/net/sched/sch_mq.c
2242     @@ -153,6 +153,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
2243     __gnet_stats_copy_queue(&sch->qstats,
2244     qdisc->cpu_qstats,
2245     &qdisc->qstats, qlen);
2246     + sch->q.qlen += qlen;
2247     } else {
2248     sch->q.qlen += qdisc->q.qlen;
2249     sch->bstats.bytes += qdisc->bstats.bytes;
2250     diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
2251     index 0d0113a24962..8766ab5b8788 100644
2252     --- a/net/sched/sch_mqprio.c
2253     +++ b/net/sched/sch_mqprio.c
2254     @@ -411,6 +411,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
2255     __gnet_stats_copy_queue(&sch->qstats,
2256     qdisc->cpu_qstats,
2257     &qdisc->qstats, qlen);
2258     + sch->q.qlen += qlen;
2259     } else {
2260     sch->q.qlen += qdisc->q.qlen;
2261     sch->bstats.bytes += qdisc->bstats.bytes;
2262     @@ -433,7 +434,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
2263     opt.offset[tc] = dev->tc_to_txq[tc].offset;
2264     }
2265    
2266     - if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
2267     + if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
2268     goto nla_put_failure;
2269    
2270     if ((priv->flags & TC_MQPRIO_F_MODE) &&
2271     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
2272     index dd860fea0148..bc734cfaa29e 100644
2273     --- a/net/sctp/ipv6.c
2274     +++ b/net/sctp/ipv6.c
2275     @@ -275,7 +275,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2276     final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
2277     rcu_read_unlock();
2278    
2279     - dst = ip6_dst_lookup_flow(sk, fl6, final_p);
2280     + dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
2281     if (!asoc || saddr)
2282     goto out;
2283    
2284     @@ -328,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
2285     fl6->saddr = laddr->a.v6.sin6_addr;
2286     fl6->fl6_sport = laddr->a.v6.sin6_port;
2287     final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
2288     - bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
2289     + bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
2290    
2291     if (IS_ERR(bdst))
2292     continue;
2293     diff --git a/net/tipc/core.c b/net/tipc/core.c
2294     index 8f35060a24e1..12192e7f4050 100644
2295     --- a/net/tipc/core.c
2296     +++ b/net/tipc/core.c
2297     @@ -125,14 +125,6 @@ static int __init tipc_init(void)
2298     sysctl_tipc_rmem[1] = RCVBUF_DEF;
2299     sysctl_tipc_rmem[2] = RCVBUF_MAX;
2300    
2301     - err = tipc_netlink_start();
2302     - if (err)
2303     - goto out_netlink;
2304     -
2305     - err = tipc_netlink_compat_start();
2306     - if (err)
2307     - goto out_netlink_compat;
2308     -
2309     err = tipc_register_sysctl();
2310     if (err)
2311     goto out_sysctl;
2312     @@ -153,8 +145,21 @@ static int __init tipc_init(void)
2313     if (err)
2314     goto out_bearer;
2315    
2316     + err = tipc_netlink_start();
2317     + if (err)
2318     + goto out_netlink;
2319     +
2320     + err = tipc_netlink_compat_start();
2321     + if (err)
2322     + goto out_netlink_compat;
2323     +
2324     pr_info("Started in single node mode\n");
2325     return 0;
2326     +
2327     +out_netlink_compat:
2328     + tipc_netlink_stop();
2329     +out_netlink:
2330     + tipc_bearer_cleanup();
2331     out_bearer:
2332     unregister_pernet_device(&tipc_topsrv_net_ops);
2333     out_pernet_topsrv:
2334     @@ -164,22 +169,18 @@ out_socket:
2335     out_pernet:
2336     tipc_unregister_sysctl();
2337     out_sysctl:
2338     - tipc_netlink_compat_stop();
2339     -out_netlink_compat:
2340     - tipc_netlink_stop();
2341     -out_netlink:
2342     pr_err("Unable to start in single node mode\n");
2343     return err;
2344     }
2345    
2346     static void __exit tipc_exit(void)
2347     {
2348     + tipc_netlink_compat_stop();
2349     + tipc_netlink_stop();
2350     tipc_bearer_cleanup();
2351     unregister_pernet_device(&tipc_topsrv_net_ops);
2352     tipc_socket_stop();
2353     unregister_pernet_device(&tipc_net_ops);
2354     - tipc_netlink_stop();
2355     - tipc_netlink_compat_stop();
2356     tipc_unregister_sysctl();
2357    
2358     pr_info("Deactivated\n");
2359     diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
2360     index 287df68721df..186c78431217 100644
2361     --- a/net/tipc/udp_media.c
2362     +++ b/net/tipc/udp_media.c
2363     @@ -195,10 +195,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
2364     .saddr = src->ipv6,
2365     .flowi6_proto = IPPROTO_UDP
2366     };
2367     - err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk,
2368     - &ndst, &fl6);
2369     - if (err)
2370     + ndst = ipv6_stub->ipv6_dst_lookup_flow(net,
2371     + ub->ubsock->sk,
2372     + &fl6, NULL);
2373     + if (IS_ERR(ndst)) {
2374     + err = PTR_ERR(ndst);
2375     goto tx_error;
2376     + }
2377     dst_cache_set_ip6(cache, ndst, &fl6.saddr);
2378     }
2379     ttl = ip6_dst_hoplimit(ndst);
2380     diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
2381     index 683d00837693..3f5209e2d4ee 100644
2382     --- a/net/tls/tls_device.c
2383     +++ b/net/tls/tls_device.c
2384     @@ -417,7 +417,7 @@ static int tls_push_data(struct sock *sk,
2385    
2386     if (flags &
2387     ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
2388     - return -ENOTSUPP;
2389     + return -EOPNOTSUPP;
2390    
2391     if (sk->sk_err)
2392     return -sk->sk_err;
2393     @@ -560,7 +560,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
2394     lock_sock(sk);
2395    
2396     if (flags & MSG_OOB) {
2397     - rc = -ENOTSUPP;
2398     + rc = -EOPNOTSUPP;
2399     goto out;
2400     }
2401    
2402     @@ -999,7 +999,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
2403     }
2404    
2405     if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
2406     - rc = -ENOTSUPP;
2407     + rc = -EOPNOTSUPP;
2408     goto release_netdev;
2409     }
2410    
2411     @@ -1071,7 +1071,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
2412     }
2413    
2414     if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
2415     - rc = -ENOTSUPP;
2416     + rc = -EOPNOTSUPP;
2417     goto release_netdev;
2418     }
2419    
2420     diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
2421     index eff444293594..82d0beed8f07 100644
2422     --- a/net/tls/tls_main.c
2423     +++ b/net/tls/tls_main.c
2424     @@ -482,7 +482,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
2425     /* check version */
2426     if (crypto_info->version != TLS_1_2_VERSION &&
2427     crypto_info->version != TLS_1_3_VERSION) {
2428     - rc = -ENOTSUPP;
2429     + rc = -EINVAL;
2430     goto err_crypto_info;
2431     }
2432    
2433     @@ -778,7 +778,7 @@ static int tls_init(struct sock *sk)
2434     * share the ulp context.
2435     */
2436     if (sk->sk_state != TCP_ESTABLISHED)
2437     - return -ENOTSUPP;
2438     + return -ENOTCONN;
2439    
2440     tls_build_proto(sk);
2441    
2442     diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
2443     index 5dd0f01913c0..c70cf30c5492 100644
2444     --- a/net/tls/tls_sw.c
2445     +++ b/net/tls/tls_sw.c
2446     @@ -900,7 +900,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
2447     int ret = 0;
2448    
2449     if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
2450     - return -ENOTSUPP;
2451     + return -EOPNOTSUPP;
2452    
2453     mutex_lock(&tls_ctx->tx_lock);
2454     lock_sock(sk);
2455     @@ -1215,7 +1215,7 @@ int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
2456     if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
2457     MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
2458     MSG_NO_SHARED_FRAGS))
2459     - return -ENOTSUPP;
2460     + return -EOPNOTSUPP;
2461    
2462     return tls_sw_do_sendpage(sk, page, offset, size, flags);
2463     }
2464     @@ -1228,7 +1228,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
2465    
2466     if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
2467     MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
2468     - return -ENOTSUPP;
2469     + return -EOPNOTSUPP;
2470    
2471     mutex_lock(&tls_ctx->tx_lock);
2472     lock_sock(sk);
2473     @@ -1927,7 +1927,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
2474    
2475     /* splice does not support reading control messages */
2476     if (ctx->control != TLS_RECORD_TYPE_DATA) {
2477     - err = -ENOTSUPP;
2478     + err = -EINVAL;
2479     goto splice_read_end;
2480     }
2481    
2482     diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
2483     index 46abcae47dee..13e5ef615026 100644
2484     --- a/tools/testing/selftests/net/tls.c
2485     +++ b/tools/testing/selftests/net/tls.c
2486     @@ -25,10 +25,6 @@
2487     #define TLS_PAYLOAD_MAX_LEN 16384
2488     #define SOL_TLS 282
2489    
2490     -#ifndef ENOTSUPP
2491     -#define ENOTSUPP 524
2492     -#endif
2493     -
2494     FIXTURE(tls_basic)
2495     {
2496     int fd, cfd;
2497     @@ -1205,11 +1201,11 @@ TEST(non_established) {
2498     /* TLS ULP not supported */
2499     if (errno == ENOENT)
2500     return;
2501     - EXPECT_EQ(errno, ENOTSUPP);
2502     + EXPECT_EQ(errno, ENOTCONN);
2503    
2504     ret = setsockopt(sfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
2505     EXPECT_EQ(ret, -1);
2506     - EXPECT_EQ(errno, ENOTSUPP);
2507     + EXPECT_EQ(errno, ENOTCONN);
2508    
2509     ret = getsockname(sfd, &addr, &len);
2510     ASSERT_EQ(ret, 0);