Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.19/0103-4.19.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3256 - (hide annotations) (download)
Tue Nov 27 10:33:04 2018 UTC (5 years, 5 months ago) by niro
File size: 58074 byte(s)
-linux-4.19.4
1 niro 3256 diff --git a/Makefile b/Makefile
2     index e4064fa16f11..1f3c7adeea63 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 3
10     +SUBLEVEL = 4
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
15     index 53eb14a65610..40bdaea97fe7 100644
16     --- a/arch/x86/kernel/cpu/bugs.c
17     +++ b/arch/x86/kernel/cpu/bugs.c
18     @@ -35,10 +35,12 @@ static void __init spectre_v2_select_mitigation(void);
19     static void __init ssb_select_mitigation(void);
20     static void __init l1tf_select_mitigation(void);
21    
22     -/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
23     -u64 x86_spec_ctrl_base;
24     +/*
25     + * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
26     + * writes to SPEC_CTRL contain whatever reserved bits have been set.
27     + */
28     +u64 __ro_after_init x86_spec_ctrl_base;
29     EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
30     -static DEFINE_MUTEX(spec_ctrl_mutex);
31    
32     /*
33     * The vendor and possibly platform specific bits which can be modified in
34     @@ -323,46 +325,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
35     return cmd;
36     }
37    
38     -static bool stibp_needed(void)
39     -{
40     - if (spectre_v2_enabled == SPECTRE_V2_NONE)
41     - return false;
42     -
43     - if (!boot_cpu_has(X86_FEATURE_STIBP))
44     - return false;
45     -
46     - return true;
47     -}
48     -
49     -static void update_stibp_msr(void *info)
50     -{
51     - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
52     -}
53     -
54     -void arch_smt_update(void)
55     -{
56     - u64 mask;
57     -
58     - if (!stibp_needed())
59     - return;
60     -
61     - mutex_lock(&spec_ctrl_mutex);
62     - mask = x86_spec_ctrl_base;
63     - if (cpu_smt_control == CPU_SMT_ENABLED)
64     - mask |= SPEC_CTRL_STIBP;
65     - else
66     - mask &= ~SPEC_CTRL_STIBP;
67     -
68     - if (mask != x86_spec_ctrl_base) {
69     - pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
70     - cpu_smt_control == CPU_SMT_ENABLED ?
71     - "Enabling" : "Disabling");
72     - x86_spec_ctrl_base = mask;
73     - on_each_cpu(update_stibp_msr, NULL, 1);
74     - }
75     - mutex_unlock(&spec_ctrl_mutex);
76     -}
77     -
78     static void __init spectre_v2_select_mitigation(void)
79     {
80     enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
81     @@ -462,9 +424,6 @@ specv2_set_mode:
82     setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
83     pr_info("Enabling Restricted Speculation for firmware calls\n");
84     }
85     -
86     - /* Enable STIBP if appropriate */
87     - arch_smt_update();
88     }
89    
90     #undef pr_fmt
91     @@ -855,8 +814,6 @@ static ssize_t l1tf_show_state(char *buf)
92     static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
93     char *buf, unsigned int bug)
94     {
95     - int ret;
96     -
97     if (!boot_cpu_has_bug(bug))
98     return sprintf(buf, "Not affected\n");
99    
100     @@ -874,12 +831,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
101     return sprintf(buf, "Mitigation: __user pointer sanitization\n");
102    
103     case X86_BUG_SPECTRE_V2:
104     - ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
105     + return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
106     boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
107     boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
108     - (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
109     spectre_v2_module_string());
110     - return ret;
111    
112     case X86_BUG_SPEC_STORE_BYPASS:
113     return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
114     diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
115     index 54e0ca6ed730..86b6464b4525 100644
116     --- a/drivers/net/dsa/microchip/ksz_common.c
117     +++ b/drivers/net/dsa/microchip/ksz_common.c
118     @@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev)
119     {
120     int i;
121    
122     - mutex_init(&dev->reg_mutex);
123     - mutex_init(&dev->stats_mutex);
124     - mutex_init(&dev->alu_mutex);
125     - mutex_init(&dev->vlan_mutex);
126     -
127     dev->ds->ops = &ksz_switch_ops;
128    
129     for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
130     @@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
131     if (dev->pdata)
132     dev->chip_id = dev->pdata->chip_id;
133    
134     + mutex_init(&dev->reg_mutex);
135     + mutex_init(&dev->stats_mutex);
136     + mutex_init(&dev->alu_mutex);
137     + mutex_init(&dev->vlan_mutex);
138     +
139     if (ksz_switch_detect(dev))
140     return -EINVAL;
141    
142     diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
143     index d721ccf7d8be..38e399e0f30e 100644
144     --- a/drivers/net/dsa/mv88e6xxx/global1.c
145     +++ b/drivers/net/dsa/mv88e6xxx/global1.c
146     @@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
147     if (err)
148     return err;
149    
150     + /* Keep the histogram mode bits */
151     + val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
152     val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
153    
154     err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
155     diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
156     index c57238fce863..7b6859e4924e 100644
157     --- a/drivers/net/ethernet/broadcom/bcmsysport.c
158     +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
159     @@ -1897,9 +1897,6 @@ static void bcm_sysport_netif_start(struct net_device *dev)
160     intrl2_1_mask_clear(priv, 0xffffffff);
161     else
162     intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
163     -
164     - /* Last call before we start the real business */
165     - netif_tx_start_all_queues(dev);
166     }
167    
168     static void rbuf_init(struct bcm_sysport_priv *priv)
169     @@ -2045,6 +2042,8 @@ static int bcm_sysport_open(struct net_device *dev)
170    
171     bcm_sysport_netif_start(dev);
172    
173     + netif_tx_start_all_queues(dev);
174     +
175     return 0;
176    
177     out_clear_rx_int:
178     @@ -2068,7 +2067,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
179     struct bcm_sysport_priv *priv = netdev_priv(dev);
180    
181     /* stop all software from updating hardware */
182     - netif_tx_stop_all_queues(dev);
183     + netif_tx_disable(dev);
184     napi_disable(&priv->napi);
185     cancel_work_sync(&priv->dim.dim.work);
186     phy_stop(dev->phydev);
187     @@ -2654,12 +2653,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
188     if (!netif_running(dev))
189     return 0;
190    
191     + netif_device_detach(dev);
192     +
193     bcm_sysport_netif_stop(dev);
194    
195     phy_suspend(dev->phydev);
196    
197     - netif_device_detach(dev);
198     -
199     /* Disable UniMAC RX */
200     umac_enable_set(priv, CMD_RX_EN, 0);
201    
202     @@ -2743,8 +2742,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
203     goto out_free_rx_ring;
204     }
205    
206     - netif_device_attach(dev);
207     -
208     /* RX pipe enable */
209     topctrl_writel(priv, 0, RX_FLUSH_CNTL);
210    
211     @@ -2789,6 +2786,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
212    
213     bcm_sysport_netif_start(dev);
214    
215     + netif_device_attach(dev);
216     +
217     return 0;
218    
219     out_free_rx_ring:
220     diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
221     index 20c1681bb1af..2d6f090bf644 100644
222     --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
223     +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
224     @@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
225    
226     umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
227    
228     - netif_tx_start_all_queues(dev);
229     bcmgenet_enable_tx_napi(priv);
230    
231     /* Monitor link interrupts now */
232     @@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev)
233    
234     bcmgenet_netif_start(dev);
235    
236     + netif_tx_start_all_queues(dev);
237     +
238     return 0;
239    
240     err_irq1:
241     @@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
242     struct bcmgenet_priv *priv = netdev_priv(dev);
243    
244     bcmgenet_disable_tx_napi(priv);
245     - netif_tx_stop_all_queues(dev);
246     + netif_tx_disable(dev);
247    
248     /* Disable MAC receive */
249     umac_enable_set(priv, CMD_RX_EN, false);
250     @@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d)
251     if (!netif_running(dev))
252     return 0;
253    
254     + netif_device_detach(dev);
255     +
256     bcmgenet_netif_stop(dev);
257    
258     if (!device_may_wakeup(d))
259     phy_suspend(dev->phydev);
260    
261     - netif_device_detach(dev);
262     -
263     /* Prepare the device for Wake-on-LAN and switch to the slow clock */
264     if (device_may_wakeup(d) && priv->wolopts) {
265     ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
266     @@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d)
267     /* Always enable ring 16 - descriptor ring */
268     bcmgenet_enable_dma(priv, dma_ctrl);
269    
270     - netif_device_attach(dev);
271     -
272     if (!device_may_wakeup(d))
273     phy_resume(dev->phydev);
274    
275     @@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d)
276    
277     bcmgenet_netif_start(dev);
278    
279     + netif_device_attach(dev);
280     +
281     return 0;
282    
283     out_clk_disable:
284     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
285     index e6f28c7942ab..a12962702611 100644
286     --- a/drivers/net/ethernet/broadcom/tg3.c
287     +++ b/drivers/net/ethernet/broadcom/tg3.c
288     @@ -12426,6 +12426,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
289     {
290     struct tg3 *tp = netdev_priv(dev);
291     int i, irq_sync = 0, err = 0;
292     + bool reset_phy = false;
293    
294     if ((ering->rx_pending > tp->rx_std_ring_mask) ||
295     (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
296     @@ -12457,7 +12458,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
297    
298     if (netif_running(dev)) {
299     tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
300     - err = tg3_restart_hw(tp, false);
301     + /* Reset PHY to avoid PHY lock up */
302     + if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
303     + tg3_asic_rev(tp) == ASIC_REV_5719 ||
304     + tg3_asic_rev(tp) == ASIC_REV_5720)
305     + reset_phy = true;
306     +
307     + err = tg3_restart_hw(tp, reset_phy);
308     if (!err)
309     tg3_netif_start(tp);
310     }
311     @@ -12491,6 +12498,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
312     {
313     struct tg3 *tp = netdev_priv(dev);
314     int err = 0;
315     + bool reset_phy = false;
316    
317     if (tp->link_config.autoneg == AUTONEG_ENABLE)
318     tg3_warn_mgmt_link_flap(tp);
319     @@ -12581,7 +12589,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
320    
321     if (netif_running(dev)) {
322     tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
323     - err = tg3_restart_hw(tp, false);
324     + /* Reset PHY to avoid PHY lock up */
325     + if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
326     + tg3_asic_rev(tp) == ASIC_REV_5719 ||
327     + tg3_asic_rev(tp) == ASIC_REV_5720)
328     + reset_phy = true;
329     +
330     + err = tg3_restart_hw(tp, reset_phy);
331     if (!err)
332     tg3_netif_start(tp);
333     }
334     diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
335     index 699ef942b615..7661064c815b 100644
336     --- a/drivers/net/ethernet/ibm/ibmvnic.c
337     +++ b/drivers/net/ethernet/ibm/ibmvnic.c
338     @@ -1545,7 +1545,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
339     tx_crq.v1.sge_len = cpu_to_be32(skb->len);
340     tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
341    
342     - if (adapter->vlan_header_insertion) {
343     + if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
344     tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
345     tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
346     }
347     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
348     index 0f189f873859..16ceeb1b2c9d 100644
349     --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
350     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
351     @@ -566,6 +566,7 @@ struct mlx5e_rq {
352    
353     unsigned long state;
354     int ix;
355     + unsigned int hw_mtu;
356    
357     struct net_dim dim; /* Dynamic Interrupt Moderation */
358    
359     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
360     index 24e3b564964f..12e1682f940b 100644
361     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
362     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
363     @@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
364    
365     eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
366     *speed = mlx5e_port_ptys2speed(eth_proto_oper);
367     - if (!(*speed)) {
368     - mlx5_core_warn(mdev, "cannot get port speed\n");
369     + if (!(*speed))
370     err = -EINVAL;
371     - }
372    
373     return err;
374     }
375     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
376     index c047da8752da..eac245a93f91 100644
377     --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
378     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
379     @@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
380     int err;
381    
382     err = mlx5e_port_linkspeed(priv->mdev, &speed);
383     - if (err)
384     + if (err) {
385     + mlx5_core_warn(priv->mdev, "cannot get port speed\n");
386     return 0;
387     + }
388    
389     xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
390    
391     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
392     index f291d1bf1558..faa84b45e20a 100644
393     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
394     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
395     @@ -492,6 +492,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
396     rq->channel = c;
397     rq->ix = c->ix;
398     rq->mdev = mdev;
399     + rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
400     rq->stats = &c->priv->channel_stats[c->ix].rq;
401    
402     rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
403     @@ -1610,13 +1611,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
404     int err;
405     u32 i;
406    
407     + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
408     + if (err)
409     + return err;
410     +
411     err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
412     &cq->wq_ctrl);
413     if (err)
414     return err;
415    
416     - mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
417     -
418     mcq->cqe_sz = 64;
419     mcq->set_ci_db = cq->wq_ctrl.db.db;
420     mcq->arm_db = cq->wq_ctrl.db.db + 1;
421     @@ -1674,6 +1677,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
422     int eqn;
423     int err;
424    
425     + err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
426     + if (err)
427     + return err;
428     +
429     inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
430     sizeof(u64) * cq->wq_ctrl.buf.npages;
431     in = kvzalloc(inlen, GFP_KERNEL);
432     @@ -1687,8 +1694,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
433     mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
434     (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
435    
436     - mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
437     -
438     MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
439     MLX5_SET(cqc, cqc, c_eqn, eqn);
440     MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
441     @@ -1908,6 +1913,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
442     int err;
443     int eqn;
444    
445     + err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
446     + if (err)
447     + return err;
448     +
449     c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
450     if (!c)
451     return -ENOMEM;
452     @@ -1924,7 +1933,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
453     c->xdp = !!params->xdp_prog;
454     c->stats = &priv->channel_stats[ix].ch;
455    
456     - mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
457     c->irq_desc = irq_to_desc(irq);
458    
459     netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
460     @@ -3566,6 +3574,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
461     return 0;
462     }
463    
464     +#ifdef CONFIG_MLX5_ESWITCH
465     static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
466     {
467     struct mlx5e_priv *priv = netdev_priv(netdev);
468     @@ -3578,6 +3587,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
469    
470     return 0;
471     }
472     +#endif
473    
474     static int set_feature_rx_all(struct net_device *netdev, bool enable)
475     {
476     @@ -3676,7 +3686,9 @@ static int mlx5e_set_features(struct net_device *netdev,
477     err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
478     err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
479     set_feature_cvlan_filter);
480     +#ifdef CONFIG_MLX5_ESWITCH
481     err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
482     +#endif
483     err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
484     err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
485     err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
486     @@ -3747,10 +3759,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
487     }
488    
489     if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
490     + bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
491     u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
492     u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
493    
494     - reset = reset && (ppw_old != ppw_new);
495     + reset = reset && (is_linear || (ppw_old != ppw_new));
496     }
497    
498     if (!reset) {
499     @@ -4685,7 +4698,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
500     FT_CAP(modify_root) &&
501     FT_CAP(identified_miss_table_mode) &&
502     FT_CAP(flow_table_modify)) {
503     +#ifdef CONFIG_MLX5_ESWITCH
504     netdev->hw_features |= NETIF_F_HW_TC;
505     +#endif
506     #ifdef CONFIG_MLX5_EN_ARFS
507     netdev->hw_features |= NETIF_F_NTUPLE;
508     #endif
509     @@ -4958,11 +4973,21 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
510     {
511     struct mlx5_core_dev *mdev = priv->mdev;
512     const struct mlx5e_profile *profile;
513     + int max_nch;
514     int err;
515    
516     profile = priv->profile;
517     clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
518    
519     + /* max number of channels may have changed */
520     + max_nch = mlx5e_get_max_num_channels(priv->mdev);
521     + if (priv->channels.params.num_channels > max_nch) {
522     + mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
523     + priv->channels.params.num_channels = max_nch;
524     + mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
525     + MLX5E_INDIR_RQT_SIZE, max_nch);
526     + }
527     +
528     err = profile->init_tx(priv);
529     if (err)
530     goto out;
531     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
532     index a144146b769c..d543a5cff049 100644
533     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
534     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
535     @@ -1064,6 +1064,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
536     u32 frag_size;
537     bool consumed;
538    
539     + /* Check packet size. Note LRO doesn't use linear SKB */
540     + if (unlikely(cqe_bcnt > rq->hw_mtu)) {
541     + rq->stats->oversize_pkts_sw_drop++;
542     + return NULL;
543     + }
544     +
545     va = page_address(di->page) + head_offset;
546     data = va + rx_headroom;
547     frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
548     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
549     index 35ded91203f5..4382ef85488c 100644
550     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
551     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
552     @@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
553     return 1;
554     }
555    
556     -#ifdef CONFIG_INET
557     -/* loopback test */
558     -#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
559     -static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
560     -#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
561     -
562     struct mlx5ehdr {
563     __be32 version;
564     __be64 magic;
565     - char text[ETH_GSTRING_LEN];
566     };
567    
568     +#ifdef CONFIG_INET
569     +/* loopback test */
570     +#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
571     + sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
572     +#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
573     +
574     static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
575     {
576     struct sk_buff *skb = NULL;
577     @@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
578     struct ethhdr *ethh;
579     struct udphdr *udph;
580     struct iphdr *iph;
581     - int datalen, iplen;
582     -
583     - datalen = MLX5E_TEST_PKT_SIZE -
584     - (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
585     + int iplen;
586    
587     skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
588     if (!skb) {
589     @@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
590     /* Fill UDP header */
591     udph->source = htons(9);
592     udph->dest = htons(9); /* Discard Protocol */
593     - udph->len = htons(datalen + sizeof(struct udphdr));
594     + udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
595     udph->check = 0;
596    
597     /* Fill IP header */
598     @@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
599     iph->ttl = 32;
600     iph->version = 4;
601     iph->protocol = IPPROTO_UDP;
602     - iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
603     + iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
604     + sizeof(struct mlx5ehdr);
605     iph->tot_len = htons(iplen);
606     iph->frag_off = 0;
607     iph->saddr = 0;
608     @@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
609     mlxh = skb_put(skb, sizeof(*mlxh));
610     mlxh->version = 0;
611     mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
612     - strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
613     - datalen -= sizeof(*mlxh);
614     - skb_put_zero(skb, datalen);
615    
616     skb->csum = 0;
617     skb->ip_summed = CHECKSUM_PARTIAL;
618     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
619     index 6839481f7697..d57d51c4e658 100644
620     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
621     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
622     @@ -82,6 +82,7 @@ static const struct counter_desc sw_stats_desc[] = {
623     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
624     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
625     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
626     + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
627     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
628     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
629     { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
630     @@ -158,6 +159,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
631     s->rx_wqe_err += rq_stats->wqe_err;
632     s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
633     s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
634     + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
635     s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
636     s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
637     s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
638     @@ -1148,6 +1150,7 @@ static const struct counter_desc rq_stats_desc[] = {
639     { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
640     { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
641     { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
642     + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
643     { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
644     { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
645     { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
646     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
647     index a4c035aedd46..c1064af9d54c 100644
648     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
649     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
650     @@ -95,6 +95,7 @@ struct mlx5e_sw_stats {
651     u64 rx_wqe_err;
652     u64 rx_mpwqe_filler_cqes;
653     u64 rx_mpwqe_filler_strides;
654     + u64 rx_oversize_pkts_sw_drop;
655     u64 rx_buff_alloc_err;
656     u64 rx_cqe_compress_blks;
657     u64 rx_cqe_compress_pkts;
658     @@ -190,6 +191,7 @@ struct mlx5e_rq_stats {
659     u64 wqe_err;
660     u64 mpwqe_filler_cqes;
661     u64 mpwqe_filler_strides;
662     + u64 oversize_pkts_sw_drop;
663     u64 buff_alloc_err;
664     u64 cqe_compress_blks;
665     u64 cqe_compress_pkts;
666     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
667     index 85796727093e..3092c59c0dc7 100644
668     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
669     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
670     @@ -1310,31 +1310,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
671     inner_headers);
672     }
673    
674     - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
675     - struct flow_dissector_key_eth_addrs *key =
676     + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
677     + struct flow_dissector_key_basic *key =
678     skb_flow_dissector_target(f->dissector,
679     - FLOW_DISSECTOR_KEY_ETH_ADDRS,
680     + FLOW_DISSECTOR_KEY_BASIC,
681     f->key);
682     - struct flow_dissector_key_eth_addrs *mask =
683     + struct flow_dissector_key_basic *mask =
684     skb_flow_dissector_target(f->dissector,
685     - FLOW_DISSECTOR_KEY_ETH_ADDRS,
686     + FLOW_DISSECTOR_KEY_BASIC,
687     f->mask);
688     + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
689     + ntohs(mask->n_proto));
690     + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
691     + ntohs(key->n_proto));
692    
693     - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
694     - dmac_47_16),
695     - mask->dst);
696     - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
697     - dmac_47_16),
698     - key->dst);
699     -
700     - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
701     - smac_47_16),
702     - mask->src);
703     - ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
704     - smac_47_16),
705     - key->src);
706     -
707     - if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
708     + if (mask->n_proto)
709     *match_level = MLX5_MATCH_L2;
710     }
711    
712     @@ -1368,9 +1358,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
713    
714     *match_level = MLX5_MATCH_L2;
715     }
716     - } else {
717     + } else if (*match_level != MLX5_MATCH_NONE) {
718     MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
719     MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
720     + *match_level = MLX5_MATCH_L2;
721     }
722    
723     if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
724     @@ -1408,21 +1399,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
725     }
726     }
727    
728     - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
729     - struct flow_dissector_key_basic *key =
730     + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
731     + struct flow_dissector_key_eth_addrs *key =
732     skb_flow_dissector_target(f->dissector,
733     - FLOW_DISSECTOR_KEY_BASIC,
734     + FLOW_DISSECTOR_KEY_ETH_ADDRS,
735     f->key);
736     - struct flow_dissector_key_basic *mask =
737     + struct flow_dissector_key_eth_addrs *mask =
738     skb_flow_dissector_target(f->dissector,
739     - FLOW_DISSECTOR_KEY_BASIC,
740     + FLOW_DISSECTOR_KEY_ETH_ADDRS,
741     f->mask);
742     - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
743     - ntohs(mask->n_proto));
744     - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
745     - ntohs(key->n_proto));
746    
747     - if (mask->n_proto)
748     + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
749     + dmac_47_16),
750     + mask->dst);
751     + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
752     + dmac_47_16),
753     + key->dst);
754     +
755     + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
756     + smac_47_16),
757     + mask->src);
758     + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
759     + smac_47_16),
760     + key->src);
761     +
762     + if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
763     *match_level = MLX5_MATCH_L2;
764     }
765    
766     @@ -1449,10 +1450,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
767    
768     /* the HW doesn't need L3 inline to match on frag=no */
769     if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
770     - *match_level = MLX5_INLINE_MODE_L2;
771     + *match_level = MLX5_MATCH_L2;
772     /* *** L2 attributes parsing up to here *** */
773     else
774     - *match_level = MLX5_INLINE_MODE_IP;
775     + *match_level = MLX5_MATCH_L3;
776     }
777     }
778    
779     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
780     index b8ee9101c506..b5a8769a5bfd 100644
781     --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
782     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
783     @@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
784     };
785    
786     static const struct rhashtable_params rhash_sa = {
787     - .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
788     - .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
789     + /* Keep out "cmd" field from the key as it's
790     + * value is not constant during the lifetime
791     + * of the key object.
792     + */
793     + .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
794     + FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
795     + .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
796     + FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
797     .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
798     .automatic_shrinking = true,
799     .min_size = 1,
800     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
801     index e3797a44e074..5b7fe8264144 100644
802     --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
803     +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
804     @@ -502,9 +502,9 @@ static int mlx5i_close(struct net_device *netdev)
805    
806     netif_carrier_off(epriv->netdev);
807     mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
808     - mlx5i_uninit_underlay_qp(epriv);
809     mlx5e_deactivate_priv_channels(epriv);
810     mlx5e_close_channels(&epriv->channels);
811     + mlx5i_uninit_underlay_qp(epriv);
812     unlock:
813     mutex_unlock(&epriv->state_lock);
814     return 0;
815     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
816     index 30bb2c533cec..ada644d90029 100644
817     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
818     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
819     @@ -3519,7 +3519,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
820     burst_size = 7;
821     break;
822     case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
823     - is_bytes = true;
824     rate = 4 * 1024;
825     burst_size = 4;
826     break;
827     diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
828     index 0afc3d335d56..d11c16aeb19a 100644
829     --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
830     +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
831     @@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
832     struct net_device *real_dev,
833     struct rmnet_endpoint *ep)
834     {
835     - struct rmnet_priv *priv;
836     + struct rmnet_priv *priv = netdev_priv(rmnet_dev);
837     int rc;
838    
839     if (ep->egress_dev)
840     @@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
841     rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
842     rmnet_dev->hw_features |= NETIF_F_SG;
843    
844     + priv->real_dev = real_dev;
845     +
846     rc = register_netdevice(rmnet_dev);
847     if (!rc) {
848     ep->egress_dev = rmnet_dev;
849     @@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
850    
851     rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
852    
853     - priv = netdev_priv(rmnet_dev);
854     priv->mux_id = id;
855     - priv->real_dev = real_dev;
856    
857     netdev_dbg(rmnet_dev, "rmnet dev created\n");
858     }
859     diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
860     index 33265747bf39..0fbcedcdf6e2 100644
861     --- a/drivers/net/phy/mdio-gpio.c
862     +++ b/drivers/net/phy/mdio-gpio.c
863     @@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
864     * assume the pin serves as pull-up. If direction is
865     * output, the default value is high.
866     */
867     - gpiod_set_value(bitbang->mdo, 1);
868     + gpiod_set_value_cansleep(bitbang->mdo, 1);
869     return;
870     }
871    
872     @@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
873     struct mdio_gpio_info *bitbang =
874     container_of(ctrl, struct mdio_gpio_info, ctrl);
875    
876     - return gpiod_get_value(bitbang->mdio);
877     + return gpiod_get_value_cansleep(bitbang->mdio);
878     }
879    
880     static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
881     @@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
882     container_of(ctrl, struct mdio_gpio_info, ctrl);
883    
884     if (bitbang->mdo)
885     - gpiod_set_value(bitbang->mdo, what);
886     + gpiod_set_value_cansleep(bitbang->mdo, what);
887     else
888     - gpiod_set_value(bitbang->mdio, what);
889     + gpiod_set_value_cansleep(bitbang->mdio, what);
890     }
891    
892     static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
893     @@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
894     struct mdio_gpio_info *bitbang =
895     container_of(ctrl, struct mdio_gpio_info, ctrl);
896    
897     - gpiod_set_value(bitbang->mdc, what);
898     + gpiod_set_value_cansleep(bitbang->mdc, what);
899     }
900    
901     static const struct mdiobb_ops mdio_gpio_ops = {
902     diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
903     index 7fc8508b5231..271e8adc39f1 100644
904     --- a/drivers/net/phy/realtek.c
905     +++ b/drivers/net/phy/realtek.c
906     @@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = {
907     .flags = PHY_HAS_INTERRUPT,
908     }, {
909     .phy_id = 0x001cc816,
910     - .name = "RTL8201F 10/100Mbps Ethernet",
911     + .name = "RTL8201F Fast Ethernet",
912     .phy_id_mask = 0x001fffff,
913     .features = PHY_BASIC_FEATURES,
914     .flags = PHY_HAS_INTERRUPT,
915     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
916     index c52207beef88..573620771154 100644
917     --- a/drivers/net/tun.c
918     +++ b/drivers/net/tun.c
919     @@ -1527,6 +1527,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
920    
921     if (!rx_batched || (!more && skb_queue_empty(queue))) {
922     local_bh_disable();
923     + skb_record_rx_queue(skb, tfile->queue_index);
924     netif_receive_skb(skb);
925     local_bh_enable();
926     return;
927     @@ -1546,8 +1547,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
928     struct sk_buff *nskb;
929    
930     local_bh_disable();
931     - while ((nskb = __skb_dequeue(&process_queue)))
932     + while ((nskb = __skb_dequeue(&process_queue))) {
933     + skb_record_rx_queue(nskb, tfile->queue_index);
934     netif_receive_skb(nskb);
935     + }
936     + skb_record_rx_queue(skb, tfile->queue_index);
937     netif_receive_skb(skb);
938     local_bh_enable();
939     }
940     diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
941     index 262e7a3c23cb..f2d01cb6f958 100644
942     --- a/drivers/net/usb/smsc95xx.c
943     +++ b/drivers/net/usb/smsc95xx.c
944     @@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
945     dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
946     dev->net->flags |= IFF_MULTICAST;
947     dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
948     + dev->net->min_mtu = ETH_MIN_MTU;
949     + dev->net->max_mtu = ETH_DATA_LEN;
950     dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
951    
952     pdata->dev = dev;
953     @@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
954     return ret;
955     }
956    
957     + cancel_delayed_work_sync(&pdata->carrier_check);
958     +
959     if (pdata->suspend_flags) {
960     netdev_warn(dev->net, "error during last resume\n");
961     pdata->suspend_flags = 0;
962     @@ -1840,6 +1844,11 @@ done:
963     */
964     if (ret && PMSG_IS_AUTO(message))
965     usbnet_resume(intf);
966     +
967     + if (ret)
968     + schedule_delayed_work(&pdata->carrier_check,
969     + CARRIER_CHECK_DELAY);
970     +
971     return ret;
972     }
973    
974     diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
975     index 8c2caa370e0f..ab9242e51d9e 100644
976     --- a/include/net/sctp/sctp.h
977     +++ b/include/net/sctp/sctp.h
978     @@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
979     SCTP_DEFAULT_MINSEGMENT));
980     }
981    
982     +static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
983     +{
984     + __u32 pmtu = sctp_dst_mtu(t->dst);
985     +
986     + if (t->pathmtu == pmtu)
987     + return true;
988     +
989     + t->pathmtu = pmtu;
990     +
991     + return false;
992     +}
993     +
994     #endif /* __net_sctp_h__ */
995     diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
996     index 34dd3d497f2c..c81feb373d3e 100644
997     --- a/include/uapi/linux/sctp.h
998     +++ b/include/uapi/linux/sctp.h
999     @@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
1000    
1001     #define SCTP_ASSOC_CHANGE_DENIED 0x0004
1002     #define SCTP_ASSOC_CHANGE_FAILED 0x0008
1003     +#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
1004     +#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
1005     struct sctp_stream_change_event {
1006     __u16 strchange_type;
1007     __u16 strchange_flags;
1008     @@ -1151,6 +1153,7 @@ struct sctp_add_streams {
1009     /* SCTP Stream schedulers */
1010     enum sctp_sched_type {
1011     SCTP_SS_FCFS,
1012     + SCTP_SS_DEFAULT = SCTP_SS_FCFS,
1013     SCTP_SS_PRIO,
1014     SCTP_SS_RR,
1015     SCTP_SS_MAX = SCTP_SS_RR
1016     diff --git a/kernel/cpu.c b/kernel/cpu.c
1017     index 3adecda21444..0097acec1c71 100644
1018     --- a/kernel/cpu.c
1019     +++ b/kernel/cpu.c
1020     @@ -2026,12 +2026,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
1021     kobject_uevent(&dev->kobj, KOBJ_ONLINE);
1022     }
1023    
1024     -/*
1025     - * Architectures that need SMT-specific errata handling during SMT hotplug
1026     - * should override this.
1027     - */
1028     -void __weak arch_smt_update(void) { };
1029     -
1030     static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
1031     {
1032     int cpu, ret = 0;
1033     @@ -2058,10 +2052,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
1034     */
1035     cpuhp_offline_cpu_device(cpu);
1036     }
1037     - if (!ret) {
1038     + if (!ret)
1039     cpu_smt_control = ctrlval;
1040     - arch_smt_update();
1041     - }
1042     cpu_maps_update_done();
1043     return ret;
1044     }
1045     @@ -2072,7 +2064,6 @@ static int cpuhp_smt_enable(void)
1046    
1047     cpu_maps_update_begin();
1048     cpu_smt_control = CPU_SMT_ENABLED;
1049     - arch_smt_update();
1050     for_each_present_cpu(cpu) {
1051     /* Skip online CPUs and CPUs on offline nodes */
1052     if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
1053     diff --git a/net/core/dev.c b/net/core/dev.c
1054     index e16ba3625400..097c02101450 100644
1055     --- a/net/core/dev.c
1056     +++ b/net/core/dev.c
1057     @@ -5630,6 +5630,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
1058     skb->vlan_tci = 0;
1059     skb->dev = napi->dev;
1060     skb->skb_iif = 0;
1061     +
1062     + /* eth_type_trans() assumes pkt_type is PACKET_HOST */
1063     + skb->pkt_type = PACKET_HOST;
1064     +
1065     skb->encapsulation = 0;
1066     skb_shinfo(skb)->gso_type = 0;
1067     skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
1068     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1069     index ce9eeeb7c024..415b95f76b66 100644
1070     --- a/net/core/flow_dissector.c
1071     +++ b/net/core/flow_dissector.c
1072     @@ -1026,8 +1026,8 @@ ip_proto_again:
1073     break;
1074     }
1075    
1076     - if (dissector_uses_key(flow_dissector,
1077     - FLOW_DISSECTOR_KEY_PORTS)) {
1078     + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
1079     + !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
1080     key_ports = skb_flow_dissector_target(flow_dissector,
1081     FLOW_DISSECTOR_KEY_PORTS,
1082     target_container);
1083     diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
1084     index bcb11f3a27c0..760a9e52e02b 100644
1085     --- a/net/ipv4/inet_fragment.c
1086     +++ b/net/ipv4/inet_fragment.c
1087     @@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
1088     }
1089    
1090     static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
1091     - void *arg)
1092     + void *arg,
1093     + struct inet_frag_queue **prev)
1094     {
1095     struct inet_frags *f = nf->f;
1096     struct inet_frag_queue *q;
1097     - int err;
1098    
1099     q = inet_frag_alloc(nf, f, arg);
1100     - if (!q)
1101     + if (!q) {
1102     + *prev = ERR_PTR(-ENOMEM);
1103     return NULL;
1104     -
1105     + }
1106     mod_timer(&q->timer, jiffies + nf->timeout);
1107    
1108     - err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
1109     - f->rhash_params);
1110     - if (err < 0) {
1111     + *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
1112     + &q->node, f->rhash_params);
1113     + if (*prev) {
1114     q->flags |= INET_FRAG_COMPLETE;
1115     inet_frag_kill(q);
1116     inet_frag_destroy(q);
1117     @@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
1118     /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
1119     struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
1120     {
1121     - struct inet_frag_queue *fq;
1122     + struct inet_frag_queue *fq = NULL, *prev;
1123    
1124     if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
1125     return NULL;
1126    
1127     rcu_read_lock();
1128    
1129     - fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
1130     - if (fq) {
1131     + prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
1132     + if (!prev)
1133     + fq = inet_frag_create(nf, key, &prev);
1134     + if (prev && !IS_ERR(prev)) {
1135     + fq = prev;
1136     if (!refcount_inc_not_zero(&fq->refcnt))
1137     fq = NULL;
1138     - rcu_read_unlock();
1139     - return fq;
1140     }
1141     rcu_read_unlock();
1142     -
1143     - return inet_frag_create(nf, key);
1144     + return fq;
1145     }
1146     EXPORT_SYMBOL(inet_frag_find);
1147     diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
1148     index dde671e97829..c248e0dccbe1 100644
1149     --- a/net/ipv4/ip_tunnel_core.c
1150     +++ b/net/ipv4/ip_tunnel_core.c
1151     @@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
1152    
1153     iph->version = 4;
1154     iph->ihl = sizeof(struct iphdr) >> 2;
1155     - iph->frag_off = df;
1156     + iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
1157     iph->protocol = proto;
1158     iph->tos = tos;
1159     iph->daddr = dst;
1160     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1161     index 47e08c1b5bc3..72898cbef43d 100644
1162     --- a/net/ipv4/tcp_input.c
1163     +++ b/net/ipv4/tcp_input.c
1164     @@ -4371,6 +4371,7 @@ static bool tcp_try_coalesce(struct sock *sk,
1165     if (TCP_SKB_CB(from)->has_rxtstamp) {
1166     TCP_SKB_CB(to)->has_rxtstamp = true;
1167     to->tstamp = from->tstamp;
1168     + skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
1169     }
1170    
1171     return true;
1172     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1173     index 1cf00d857fc1..a33681dc4796 100644
1174     --- a/net/ipv6/route.c
1175     +++ b/net/ipv6/route.c
1176     @@ -2263,8 +2263,7 @@ static void ip6_link_failure(struct sk_buff *skb)
1177     if (rt) {
1178     rcu_read_lock();
1179     if (rt->rt6i_flags & RTF_CACHE) {
1180     - if (dst_hold_safe(&rt->dst))
1181     - rt6_remove_exception_rt(rt);
1182     + rt6_remove_exception_rt(rt);
1183     } else {
1184     struct fib6_info *from;
1185     struct fib6_node *fn;
1186     @@ -2392,10 +2391,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1187    
1188     void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1189     {
1190     + int oif = sk->sk_bound_dev_if;
1191     struct dst_entry *dst;
1192    
1193     - ip6_update_pmtu(skb, sock_net(sk), mtu,
1194     - sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
1195     + if (!oif && skb->dev)
1196     + oif = l3mdev_master_ifindex(skb->dev);
1197     +
1198     + ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
1199    
1200     dst = __sk_dst_get(sk);
1201     if (!dst || !dst->obsolete ||
1202     @@ -3266,8 +3268,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
1203     if (cfg->fc_flags & RTF_GATEWAY &&
1204     !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1205     goto out;
1206     - if (dst_hold_safe(&rt->dst))
1207     - rc = rt6_remove_exception_rt(rt);
1208     +
1209     + rc = rt6_remove_exception_rt(rt);
1210     out:
1211     return rc;
1212     }
1213     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1214     index 82cdf9020b53..26f1d435696a 100644
1215     --- a/net/l2tp/l2tp_core.c
1216     +++ b/net/l2tp/l2tp_core.c
1217     @@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1218     goto err_sock;
1219     }
1220    
1221     - sk = sock->sk;
1222     -
1223     - sock_hold(sk);
1224     - tunnel->sock = sk;
1225     tunnel->l2tp_net = net;
1226     -
1227     pn = l2tp_pernet(net);
1228    
1229     spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1230     @@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1231     list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1232     spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1233    
1234     + sk = sock->sk;
1235     + sock_hold(sk);
1236     + tunnel->sock = sk;
1237     +
1238     if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1239     struct udp_tunnel_sock_cfg udp_cfg = {
1240     .sk_user_data = tunnel,
1241     diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
1242     index a6e6cae82c30..03e0fc8c183f 100644
1243     --- a/net/rxrpc/ar-internal.h
1244     +++ b/net/rxrpc/ar-internal.h
1245     @@ -611,6 +611,7 @@ struct rxrpc_call {
1246     * not hard-ACK'd packet follows this.
1247     */
1248     rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
1249     + u16 tx_backoff; /* Delay to insert due to Tx failure */
1250    
1251     /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
1252     * is fixed, we keep these numbers in terms of segments (ie. DATA
1253     diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
1254     index 8e7434e92097..468efc3660c0 100644
1255     --- a/net/rxrpc/call_event.c
1256     +++ b/net/rxrpc/call_event.c
1257     @@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
1258     else
1259     ack_at = expiry;
1260    
1261     + ack_at += READ_ONCE(call->tx_backoff);
1262     ack_at += now;
1263     if (time_before(ack_at, call->ack_at)) {
1264     WRITE_ONCE(call->ack_at, ack_at);
1265     @@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_struct *work)
1266     container_of(work, struct rxrpc_call, processor);
1267     rxrpc_serial_t *send_ack;
1268     unsigned long now, next, t;
1269     + unsigned int iterations = 0;
1270    
1271     rxrpc_see_call(call);
1272    
1273     @@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_struct *work)
1274     call->debug_id, rxrpc_call_states[call->state], call->events);
1275    
1276     recheck_state:
1277     + /* Limit the number of times we do this before returning to the manager */
1278     + iterations++;
1279     + if (iterations > 5)
1280     + goto requeue;
1281     +
1282     if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
1283     rxrpc_send_abort_packet(call);
1284     goto recheck_state;
1285     @@ -447,13 +454,16 @@ recheck_state:
1286     rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
1287    
1288     /* other events may have been raised since we started checking */
1289     - if (call->events && call->state < RXRPC_CALL_COMPLETE) {
1290     - __rxrpc_queue_call(call);
1291     - goto out;
1292     - }
1293     + if (call->events && call->state < RXRPC_CALL_COMPLETE)
1294     + goto requeue;
1295    
1296     out_put:
1297     rxrpc_put_call(call, rxrpc_call_put);
1298     out:
1299     _leave("");
1300     + return;
1301     +
1302     +requeue:
1303     + __rxrpc_queue_call(call);
1304     + goto out;
1305     }
1306     diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
1307     index a141ee3ab812..345dc1c5fe72 100644
1308     --- a/net/rxrpc/output.c
1309     +++ b/net/rxrpc/output.c
1310     @@ -34,6 +34,21 @@ struct rxrpc_abort_buffer {
1311    
1312     static const char rxrpc_keepalive_string[] = "";
1313    
1314     +/*
1315     + * Increase Tx backoff on transmission failure and clear it on success.
1316     + */
1317     +static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
1318     +{
1319     + if (ret < 0) {
1320     + u16 tx_backoff = READ_ONCE(call->tx_backoff);
1321     +
1322     + if (tx_backoff < HZ)
1323     + WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
1324     + } else {
1325     + WRITE_ONCE(call->tx_backoff, 0);
1326     + }
1327     +}
1328     +
1329     /*
1330     * Arrange for a keepalive ping a certain time after we last transmitted. This
1331     * lets the far side know we're still interested in this call and helps keep
1332     @@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
1333     else
1334     trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
1335     rxrpc_tx_point_call_ack);
1336     + rxrpc_tx_backoff(call, ret);
1337    
1338     if (call->state < RXRPC_CALL_COMPLETE) {
1339     if (ret < 0) {
1340     @@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
1341     rxrpc_propose_ACK(call, pkt->ack.reason,
1342     ntohs(pkt->ack.maxSkew),
1343     ntohl(pkt->ack.serial),
1344     - true, true,
1345     + false, true,
1346     rxrpc_propose_ack_retry_tx);
1347     } else {
1348     spin_lock_bh(&call->lock);
1349     @@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
1350     else
1351     trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
1352     rxrpc_tx_point_call_abort);
1353     -
1354     + rxrpc_tx_backoff(call, ret);
1355    
1356     rxrpc_put_connection(conn);
1357     return ret;
1358     @@ -411,6 +427,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
1359     else
1360     trace_rxrpc_tx_packet(call->debug_id, &whdr,
1361     rxrpc_tx_point_call_data_nofrag);
1362     + rxrpc_tx_backoff(call, ret);
1363     if (ret == -EMSGSIZE)
1364     goto send_fragmentable;
1365    
1366     @@ -445,9 +462,18 @@ done:
1367     rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
1368     rxrpc_timer_set_for_normal);
1369     }
1370     - }
1371    
1372     - rxrpc_set_keepalive(call);
1373     + rxrpc_set_keepalive(call);
1374     + } else {
1375     + /* Cancel the call if the initial transmission fails,
1376     + * particularly if that's due to network routing issues that
1377     + * aren't going away anytime soon. The layer above can arrange
1378     + * the retransmission.
1379     + */
1380     + if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
1381     + rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
1382     + RX_USER_ABORT, ret);
1383     + }
1384    
1385     _leave(" = %d [%u]", ret, call->peer->maxdata);
1386     return ret;
1387     @@ -506,6 +532,7 @@ send_fragmentable:
1388     else
1389     trace_rxrpc_tx_packet(call->debug_id, &whdr,
1390     rxrpc_tx_point_call_data_frag);
1391     + rxrpc_tx_backoff(call, ret);
1392    
1393     up_write(&conn->params.local->defrag_sem);
1394     goto done;
1395     diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
1396     index ad99a99f11f6..ca535a8585bc 100644
1397     --- a/net/sched/act_pedit.c
1398     +++ b/net/sched/act_pedit.c
1399     @@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
1400     goto out_release;
1401     }
1402     } else {
1403     - return err;
1404     + ret = err;
1405     + goto out_free;
1406     }
1407    
1408     p = to_pedit(*a);
1409     diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1410     index 6fd9bdd93796..7fade7107f95 100644
1411     --- a/net/sched/cls_flower.c
1412     +++ b/net/sched/cls_flower.c
1413     @@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1414     struct netlink_ext_ack *extack)
1415     {
1416     const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1417     - int option_len, key_depth, msk_depth = 0;
1418     + int err, option_len, key_depth, msk_depth = 0;
1419     +
1420     + err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
1421     + TCA_FLOWER_KEY_ENC_OPTS_MAX,
1422     + enc_opts_policy, extack);
1423     + if (err)
1424     + return err;
1425    
1426     nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1427    
1428     if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1429     + err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1430     + TCA_FLOWER_KEY_ENC_OPTS_MAX,
1431     + enc_opts_policy, extack);
1432     + if (err)
1433     + return err;
1434     +
1435     nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1436     msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1437     }
1438     diff --git a/net/sctp/output.c b/net/sctp/output.c
1439     index 67939ad99c01..08601223b0bf 100644
1440     --- a/net/sctp/output.c
1441     +++ b/net/sctp/output.c
1442     @@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
1443     sctp_transport_route(tp, NULL, sp);
1444     if (asoc->param_flags & SPP_PMTUD_ENABLE)
1445     sctp_assoc_sync_pmtu(asoc);
1446     + } else if (!sctp_transport_pmtu_check(tp)) {
1447     + if (asoc->param_flags & SPP_PMTUD_ENABLE)
1448     + sctp_assoc_sync_pmtu(asoc);
1449     }
1450    
1451     if (asoc->pmtu_pending) {
1452     diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
1453     index 42191ed9902b..7bb8e5603298 100644
1454     --- a/net/sctp/outqueue.c
1455     +++ b/net/sctp/outqueue.c
1456     @@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
1457     INIT_LIST_HEAD(&q->retransmit);
1458     INIT_LIST_HEAD(&q->sacked);
1459     INIT_LIST_HEAD(&q->abandoned);
1460     - sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
1461     + sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
1462     }
1463    
1464     /* Free the outqueue structure and any related pending chunks.
1465     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1466     index c1693e28aed4..876393cf5ed6 100644
1467     --- a/net/sctp/socket.c
1468     +++ b/net/sctp/socket.c
1469     @@ -3958,32 +3958,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
1470     unsigned int optlen)
1471     {
1472     struct sctp_assoc_value params;
1473     - struct sctp_association *asoc;
1474     - int retval = -EINVAL;
1475    
1476     if (optlen != sizeof(params))
1477     - goto out;
1478     -
1479     - if (copy_from_user(&params, optval, optlen)) {
1480     - retval = -EFAULT;
1481     - goto out;
1482     - }
1483     -
1484     - asoc = sctp_id2assoc(sk, params.assoc_id);
1485     - if (asoc) {
1486     - asoc->prsctp_enable = !!params.assoc_value;
1487     - } else if (!params.assoc_id) {
1488     - struct sctp_sock *sp = sctp_sk(sk);
1489     + return -EINVAL;
1490    
1491     - sp->ep->prsctp_enable = !!params.assoc_value;
1492     - } else {
1493     - goto out;
1494     - }
1495     + if (copy_from_user(&params, optval, optlen))
1496     + return -EFAULT;
1497    
1498     - retval = 0;
1499     + sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
1500    
1501     -out:
1502     - return retval;
1503     + return 0;
1504     }
1505    
1506     static int sctp_setsockopt_default_prinfo(struct sock *sk,
1507     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
1508     index ffb940d3b57c..3892e7630f3a 100644
1509     --- a/net/sctp/stream.c
1510     +++ b/net/sctp/stream.c
1511     @@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
1512     goto out;
1513     }
1514    
1515     - stream->incnt = incnt;
1516     stream->outcnt = outcnt;
1517    
1518     asoc->strreset_outstanding = !!out + !!in;
1519     diff --git a/net/tipc/discover.c b/net/tipc/discover.c
1520     index 2830709957bd..c138d68e8a69 100644
1521     --- a/net/tipc/discover.c
1522     +++ b/net/tipc/discover.c
1523     @@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
1524    
1525     /* Apply trial address if we just left trial period */
1526     if (!trial && !self) {
1527     - tipc_net_finalize(net, tn->trial_addr);
1528     + tipc_sched_net_finalize(net, tn->trial_addr);
1529     + msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
1530     msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
1531     }
1532    
1533     @@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t)
1534     goto exit;
1535     }
1536    
1537     - /* Trial period over ? */
1538     - if (!time_before(jiffies, tn->addr_trial_end)) {
1539     - /* Did we just leave it ? */
1540     - if (!tipc_own_addr(net))
1541     - tipc_net_finalize(net, tn->trial_addr);
1542     -
1543     - msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
1544     - msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
1545     + /* Did we just leave trial period ? */
1546     + if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
1547     + mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
1548     + spin_unlock_bh(&d->lock);
1549     + tipc_sched_net_finalize(net, tn->trial_addr);
1550     + return;
1551     }
1552    
1553     /* Adjust timeout interval according to discovery phase */
1554     @@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t)
1555     d->timer_intv = TIPC_DISC_SLOW;
1556     else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
1557     d->timer_intv = TIPC_DISC_FAST;
1558     + msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
1559     + msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
1560     }
1561    
1562     mod_timer(&d->timer, jiffies + d->timer_intv);
1563     diff --git a/net/tipc/link.c b/net/tipc/link.c
1564     index 201c3b5bc96b..836727e363c4 100644
1565     --- a/net/tipc/link.c
1566     +++ b/net/tipc/link.c
1567     @@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1568     if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1569     l->priority = peers_prio;
1570    
1571     - /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1572     - if (msg_peer_stopping(hdr))
1573     + /* If peer is going down we want full re-establish cycle */
1574     + if (msg_peer_stopping(hdr)) {
1575     rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1576     - else if ((mtyp == RESET_MSG) || !link_is_up(l))
1577     + break;
1578     + }
1579     + /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1580     + if (mtyp == RESET_MSG || !link_is_up(l))
1581     rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1582    
1583     /* ACTIVATE_MSG takes up link if it was already locally reset */
1584     - if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1585     + if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
1586     rc = TIPC_LINK_UP_EVT;
1587    
1588     l->peer_session = msg_session(hdr);
1589     diff --git a/net/tipc/net.c b/net/tipc/net.c
1590     index 62199cf5a56c..f076edb74338 100644
1591     --- a/net/tipc/net.c
1592     +++ b/net/tipc/net.c
1593     @@ -104,6 +104,14 @@
1594     * - A local spin_lock protecting the queue of subscriber events.
1595     */
1596    
1597     +struct tipc_net_work {
1598     + struct work_struct work;
1599     + struct net *net;
1600     + u32 addr;
1601     +};
1602     +
1603     +static void tipc_net_finalize(struct net *net, u32 addr);
1604     +
1605     int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
1606     {
1607     if (tipc_own_id(net)) {
1608     @@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
1609     return 0;
1610     }
1611    
1612     -void tipc_net_finalize(struct net *net, u32 addr)
1613     +static void tipc_net_finalize(struct net *net, u32 addr)
1614     {
1615     struct tipc_net *tn = tipc_net(net);
1616    
1617     - if (!cmpxchg(&tn->node_addr, 0, addr)) {
1618     - tipc_set_node_addr(net, addr);
1619     - tipc_named_reinit(net);
1620     - tipc_sk_reinit(net);
1621     - tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
1622     - TIPC_CLUSTER_SCOPE, 0, addr);
1623     - }
1624     + if (cmpxchg(&tn->node_addr, 0, addr))
1625     + return;
1626     + tipc_set_node_addr(net, addr);
1627     + tipc_named_reinit(net);
1628     + tipc_sk_reinit(net);
1629     + tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
1630     + TIPC_CLUSTER_SCOPE, 0, addr);
1631     +}
1632     +
1633     +static void tipc_net_finalize_work(struct work_struct *work)
1634     +{
1635     + struct tipc_net_work *fwork;
1636     +
1637     + fwork = container_of(work, struct tipc_net_work, work);
1638     + tipc_net_finalize(fwork->net, fwork->addr);
1639     + kfree(fwork);
1640     +}
1641     +
1642     +void tipc_sched_net_finalize(struct net *net, u32 addr)
1643     +{
1644     + struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
1645     +
1646     + if (!fwork)
1647     + return;
1648     + INIT_WORK(&fwork->work, tipc_net_finalize_work);
1649     + fwork->net = net;
1650     + fwork->addr = addr;
1651     + schedule_work(&fwork->work);
1652     }
1653    
1654     void tipc_net_stop(struct net *net)
1655     diff --git a/net/tipc/net.h b/net/tipc/net.h
1656     index 09ad02b50bb1..b7f2e364eb99 100644
1657     --- a/net/tipc/net.h
1658     +++ b/net/tipc/net.h
1659     @@ -42,7 +42,7 @@
1660     extern const struct nla_policy tipc_nl_net_policy[];
1661    
1662     int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
1663     -void tipc_net_finalize(struct net *net, u32 addr);
1664     +void tipc_sched_net_finalize(struct net *net, u32 addr);
1665     void tipc_net_stop(struct net *net);
1666     int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
1667     int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
1668     diff --git a/net/tipc/socket.c b/net/tipc/socket.c
1669     index 0bf8ad486c5e..366ce0bf2658 100644
1670     --- a/net/tipc/socket.c
1671     +++ b/net/tipc/socket.c
1672     @@ -1548,16 +1548,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1673     /**
1674     * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1675     * @m: descriptor for message info
1676     - * @msg: received message header
1677     + * @skb: received message buffer
1678     * @tsk: TIPC port associated with message
1679     *
1680     * Note: Ancillary data is not captured if not requested by receiver.
1681     *
1682     * Returns 0 if successful, otherwise errno
1683     */
1684     -static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1685     +static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1686     struct tipc_sock *tsk)
1687     {
1688     + struct tipc_msg *msg;
1689     u32 anc_data[3];
1690     u32 err;
1691     u32 dest_type;
1692     @@ -1566,6 +1567,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1693    
1694     if (likely(m->msg_controllen == 0))
1695     return 0;
1696     + msg = buf_msg(skb);
1697    
1698     /* Optionally capture errored message object(s) */
1699     err = msg ? msg_errcode(msg) : 0;
1700     @@ -1576,6 +1578,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1701     if (res)
1702     return res;
1703     if (anc_data[1]) {
1704     + if (skb_linearize(skb))
1705     + return -ENOMEM;
1706     + msg = buf_msg(skb);
1707     res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1708     msg_data(msg));
1709     if (res)
1710     @@ -1737,9 +1742,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1711    
1712     /* Collect msg meta data, including error code and rejected data */
1713     tipc_sk_set_orig_addr(m, skb);
1714     - rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1715     + rc = tipc_sk_anc_data_recv(m, skb, tsk);
1716     if (unlikely(rc))
1717     goto exit;
1718     + hdr = buf_msg(skb);
1719    
1720     /* Capture data if non-error msg, otherwise just set return value */
1721     if (likely(!err)) {
1722     @@ -1849,9 +1855,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1723     /* Collect msg meta data, incl. error code and rejected data */
1724     if (!copied) {
1725     tipc_sk_set_orig_addr(m, skb);
1726     - rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1727     + rc = tipc_sk_anc_data_recv(m, skb, tsk);
1728     if (rc)
1729     break;
1730     + hdr = buf_msg(skb);
1731     }
1732    
1733     /* Copy data if msg ok, otherwise return error/partial data */