Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0101-5.4.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3482 - (show annotations) (download)
Mon May 11 14:35:57 2020 UTC (4 years ago) by niro
File size: 61678 byte(s)
-linux-5.4.2
1 diff --git a/Makefile b/Makefile
2 index 641a62423fd6..e67f2e95b71d 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 1
10 +SUBLEVEL = 2
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
15 index 4c95c365058a..44c48e34d799 100644
16 --- a/arch/x86/include/asm/fpu/internal.h
17 +++ b/arch/x86/include/asm/fpu/internal.h
18 @@ -509,7 +509,7 @@ static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
19
20 static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
21 {
22 - return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
23 + return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
24 }
25
26 /*
27 diff --git a/drivers/base/platform.c b/drivers/base/platform.c
28 index b230beb6ccb4..3c0cd20925b7 100644
29 --- a/drivers/base/platform.c
30 +++ b/drivers/base/platform.c
31 @@ -1278,6 +1278,11 @@ struct bus_type platform_bus_type = {
32 };
33 EXPORT_SYMBOL_GPL(platform_bus_type);
34
35 +static inline int __platform_match(struct device *dev, const void *drv)
36 +{
37 + return platform_match(dev, (struct device_driver *)drv);
38 +}
39 +
40 /**
41 * platform_find_device_by_driver - Find a platform device with a given
42 * driver.
43 @@ -1288,7 +1293,7 @@ struct device *platform_find_device_by_driver(struct device *start,
44 const struct device_driver *drv)
45 {
46 return bus_find_device(&platform_bus_type, start, drv,
47 - (void *)platform_match);
48 + __platform_match);
49 }
50 EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
51
52 diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
53 index 1fb622f2a87d..8eabf7b20101 100644
54 --- a/drivers/crypto/Kconfig
55 +++ b/drivers/crypto/Kconfig
56 @@ -287,6 +287,7 @@ config CRYPTO_DEV_TALITOS
57 select CRYPTO_AUTHENC
58 select CRYPTO_BLKCIPHER
59 select CRYPTO_HASH
60 + select CRYPTO_LIB_DES
61 select HW_RANDOM
62 depends on FSL_SOC
63 help
64 diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
65 index 4ab1bde8dd9b..294debd435b6 100644
66 --- a/drivers/crypto/inside-secure/safexcel.c
67 +++ b/drivers/crypto/inside-secure/safexcel.c
68 @@ -221,9 +221,9 @@ static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
69 /* Step #3: Determine log2 of hash table size */
70 cs_ht_sz = __fls(asize - cs_rc_max) - 2;
71 /* Step #4: determine current size of hash table in dwords */
72 - cs_ht_wc = 16<<cs_ht_sz; /* dwords, not admin words */
73 + cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */
74 /* Step #5: add back excess words and see if we can fit more records */
75 - cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 4));
76 + cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2));
77
78 /* Clear the cache RAMs */
79 eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc);
80 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
81 index 63fdbf09b044..2fa3587d974f 100644
82 --- a/drivers/hid/hid-core.c
83 +++ b/drivers/hid/hid-core.c
84 @@ -211,6 +211,18 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
85 return 0; /* we know nothing about this usage type */
86 }
87
88 +/*
89 + * Concatenate usage which defines 16 bits or less with the
90 + * currently defined usage page to form a 32 bit usage
91 + */
92 +
93 +static void complete_usage(struct hid_parser *parser, unsigned int index)
94 +{
95 + parser->local.usage[index] &= 0xFFFF;
96 + parser->local.usage[index] |=
97 + (parser->global.usage_page & 0xFFFF) << 16;
98 +}
99 +
100 /*
101 * Add a usage to the temporary parser table.
102 */
103 @@ -222,6 +234,14 @@ static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
104 return -1;
105 }
106 parser->local.usage[parser->local.usage_index] = usage;
107 +
108 + /*
109 + * If Usage item only includes usage id, concatenate it with
110 + * currently defined usage page
111 + */
112 + if (size <= 2)
113 + complete_usage(parser, parser->local.usage_index);
114 +
115 parser->local.usage_size[parser->local.usage_index] = size;
116 parser->local.collection_index[parser->local.usage_index] =
117 parser->collection_stack_ptr ?
118 @@ -543,13 +563,32 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
119 * usage value."
120 */
121
122 -static void hid_concatenate_usage_page(struct hid_parser *parser)
123 +static void hid_concatenate_last_usage_page(struct hid_parser *parser)
124 {
125 int i;
126 + unsigned int usage_page;
127 + unsigned int current_page;
128
129 - for (i = 0; i < parser->local.usage_index; i++)
130 - if (parser->local.usage_size[i] <= 2)
131 - parser->local.usage[i] += parser->global.usage_page << 16;
132 + if (!parser->local.usage_index)
133 + return;
134 +
135 + usage_page = parser->global.usage_page;
136 +
137 + /*
138 + * Concatenate usage page again only if last declared Usage Page
139 + * has not been already used in previous usages concatenation
140 + */
141 + for (i = parser->local.usage_index - 1; i >= 0; i--) {
142 + if (parser->local.usage_size[i] > 2)
143 + /* Ignore extended usages */
144 + continue;
145 +
146 + current_page = parser->local.usage[i] >> 16;
147 + if (current_page == usage_page)
148 + break;
149 +
150 + complete_usage(parser, i);
151 + }
152 }
153
154 /*
155 @@ -561,7 +600,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
156 __u32 data;
157 int ret;
158
159 - hid_concatenate_usage_page(parser);
160 + hid_concatenate_last_usage_page(parser);
161
162 data = item_udata(item);
163
164 @@ -772,7 +811,7 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
165 __u32 data;
166 int i;
167
168 - hid_concatenate_usage_page(parser);
169 + hid_concatenate_last_usage_page(parser);
170
171 data = item_udata(item);
172
173 diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
174 index 985bd4fd3328..53bb394ccba6 100644
175 --- a/drivers/misc/mei/bus.c
176 +++ b/drivers/misc/mei/bus.c
177 @@ -873,15 +873,16 @@ static const struct device_type mei_cl_device_type = {
178
179 /**
180 * mei_cl_bus_set_name - set device name for me client device
181 + * <controller>-<client device>
182 + * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
183 *
184 * @cldev: me client device
185 */
186 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
187 {
188 - dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
189 - cldev->name,
190 - mei_me_cl_uuid(cldev->me_cl),
191 - mei_me_cl_ver(cldev->me_cl));
192 + dev_set_name(&cldev->dev, "%s-%pUl",
193 + dev_name(cldev->bus->dev),
194 + mei_me_cl_uuid(cldev->me_cl));
195 }
196
197 /**
198 diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
199 index c09f8bb49495..b359f06f05e7 100644
200 --- a/drivers/misc/mei/hw-me-regs.h
201 +++ b/drivers/misc/mei/hw-me-regs.h
202 @@ -81,6 +81,7 @@
203
204 #define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
205 #define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
206 +#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */
207
208 #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
209
210 diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
211 index 3dca63eddaa0..ce43415a536c 100644
212 --- a/drivers/misc/mei/pci-me.c
213 +++ b/drivers/misc/mei/pci-me.c
214 @@ -98,6 +98,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
215
216 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
217 {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
218 + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
219
220 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
221
222 diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
223 index 7687ddcae159..aa140662c7c2 100644
224 --- a/drivers/net/dsa/sja1105/sja1105_main.c
225 +++ b/drivers/net/dsa/sja1105/sja1105_main.c
226 @@ -594,15 +594,15 @@ static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
227 int i;
228
229 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
230 - if (ports->role == XMII_MAC)
231 + if (ports[i].role == XMII_MAC)
232 continue;
233
234 - if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
235 - ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
236 + if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
237 + ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
238 priv->rgmii_rx_delay[i] = true;
239
240 - if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
241 - ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
242 + if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
243 + ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
244 priv->rgmii_tx_delay[i] = true;
245
246 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
247 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
248 index 1e1b774e1953..0f10a272827c 100644
249 --- a/drivers/net/ethernet/cadence/macb_main.c
250 +++ b/drivers/net/ethernet/cadence/macb_main.c
251 @@ -4392,6 +4392,7 @@ static int macb_remove(struct platform_device *pdev)
252 mdiobus_free(bp->mii_bus);
253
254 unregister_netdev(dev);
255 + tasklet_kill(&bp->hresp_err_tasklet);
256 pm_runtime_disable(&pdev->dev);
257 pm_runtime_dont_use_autosuspend(&pdev->dev);
258 if (!pm_runtime_suspended(&pdev->dev)) {
259 diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
260 index aca95f64bde8..9b7a8db9860f 100644
261 --- a/drivers/net/ethernet/google/gve/gve_main.c
262 +++ b/drivers/net/ethernet/google/gve/gve_main.c
263 @@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
264 }
265
266 qpl->id = id;
267 - qpl->num_entries = pages;
268 + qpl->num_entries = 0;
269 qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
270 /* caller handles clean up */
271 if (!qpl->pages)
272 @@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
273 /* caller handles clean up */
274 if (err)
275 return -ENOMEM;
276 + qpl->num_entries++;
277 }
278 priv->num_registered_pages += pages;
279
280 diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
281 index c33c438850cc..1d67eeeab79d 100644
282 --- a/drivers/net/ethernet/realtek/r8169_main.c
283 +++ b/drivers/net/ethernet/realtek/r8169_main.c
284 @@ -1516,6 +1516,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
285 rtl_lock_config_regs(tp);
286
287 device_set_wakeup_enable(tp_to_dev(tp), wolopts);
288 + tp->dev->wol_enabled = wolopts ? 1 : 0;
289 }
290
291 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
292 @@ -4118,7 +4119,7 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
293 case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
294 r8168dp_hw_jumbo_enable(tp);
295 break;
296 - case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
297 + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
298 r8168e_hw_jumbo_enable(tp);
299 break;
300 default:
301 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
302 index 34fc59bd1e20..05631d97eeb4 100644
303 --- a/drivers/net/macvlan.c
304 +++ b/drivers/net/macvlan.c
305 @@ -359,10 +359,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
306 }
307 spin_unlock(&port->bc_queue.lock);
308
309 + schedule_work(&port->bc_work);
310 +
311 if (err)
312 goto free_nskb;
313
314 - schedule_work(&port->bc_work);
315 return;
316
317 free_nskb:
318 diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
319 index dbacb0031877..229e480179ff 100644
320 --- a/drivers/net/phy/mdio_bus.c
321 +++ b/drivers/net/phy/mdio_bus.c
322 @@ -62,8 +62,8 @@ static int mdiobus_register_reset(struct mdio_device *mdiodev)
323 struct reset_control *reset = NULL;
324
325 if (mdiodev->dev.of_node)
326 - reset = devm_reset_control_get_exclusive(&mdiodev->dev,
327 - "phy");
328 + reset = of_reset_control_get_exclusive(mdiodev->dev.of_node,
329 + "phy");
330 if (IS_ERR(reset)) {
331 if (PTR_ERR(reset) == -ENOENT || PTR_ERR(reset) == -ENOTSUPP)
332 reset = NULL;
333 @@ -107,6 +107,8 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
334 if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
335 return -EINVAL;
336
337 + reset_control_put(mdiodev->reset_ctrl);
338 +
339 mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
340
341 return 0;
342 diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
343 index 4d479e3c817d..2a91c192659f 100644
344 --- a/drivers/net/slip/slip.c
345 +++ b/drivers/net/slip/slip.c
346 @@ -855,6 +855,7 @@ err_free_chan:
347 sl->tty = NULL;
348 tty->disc_data = NULL;
349 clear_bit(SLF_INUSE, &sl->flags);
350 + sl_free_netdev(sl->dev);
351 free_netdev(sl->dev);
352
353 err_exit:
354 diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
355 index 6bcbbb375401..9579a706fc08 100644
356 --- a/drivers/platform/x86/hp-wmi.c
357 +++ b/drivers/platform/x86/hp-wmi.c
358 @@ -65,7 +65,7 @@ struct bios_args {
359 u32 command;
360 u32 commandtype;
361 u32 datasize;
362 - u32 data;
363 + u8 data[128];
364 };
365
366 enum hp_wmi_commandtype {
367 @@ -216,7 +216,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
368 .command = command,
369 .commandtype = query,
370 .datasize = insize,
371 - .data = 0,
372 + .data = { 0 },
373 };
374 struct acpi_buffer input = { sizeof(struct bios_args), &args };
375 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
376 @@ -228,7 +228,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
377
378 if (WARN_ON(insize > sizeof(args.data)))
379 return -EINVAL;
380 - memcpy(&args.data, buffer, insize);
381 + memcpy(&args.data[0], buffer, insize);
382
383 wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output);
384
385 @@ -380,7 +380,7 @@ static int hp_wmi_rfkill2_refresh(void)
386 int err, i;
387
388 err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
389 - 0, sizeof(state));
390 + sizeof(state), sizeof(state));
391 if (err)
392 return err;
393
394 @@ -778,7 +778,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
395 int err, i;
396
397 err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
398 - 0, sizeof(state));
399 + sizeof(state), sizeof(state));
400 if (err)
401 return err < 0 ? err : -EINVAL;
402
403 diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
404 index f932cb15e4e5..c702ee9691b1 100644
405 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
406 +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
407 @@ -1616,14 +1616,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
408 memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
409 skb_push(skb, priv->rtllib->tx_headroom);
410 ret = _rtl92e_tx(dev, skb);
411 - if (ret != 0)
412 - kfree_skb(skb);
413
414 if (queue_index != MGNT_QUEUE) {
415 priv->rtllib->stats.tx_bytes += (skb->len -
416 priv->rtllib->tx_headroom);
417 priv->rtllib->stats.tx_packets++;
418 }
419 +
420 + if (ret != 0)
421 + kfree_skb(skb);
422 }
423
424 static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
425 diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
426 index d3784c44f6d0..3784a27641a6 100644
427 --- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
428 +++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
429 @@ -18,18 +18,13 @@
430 static const struct sdio_device_id sdio_ids[] =
431 {
432 { SDIO_DEVICE(0x024c, 0x0523), },
433 + { SDIO_DEVICE(0x024c, 0x0525), },
434 { SDIO_DEVICE(0x024c, 0x0623), },
435 { SDIO_DEVICE(0x024c, 0x0626), },
436 { SDIO_DEVICE(0x024c, 0xb723), },
437 { /* end: all zeroes */ },
438 };
439 -static const struct acpi_device_id acpi_ids[] = {
440 - {"OBDA8723", 0x0000},
441 - {}
442 -};
443 -
444 MODULE_DEVICE_TABLE(sdio, sdio_ids);
445 -MODULE_DEVICE_TABLE(acpi, acpi_ids);
446
447 static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id);
448 static void rtw_dev_remove(struct sdio_func *func);
449 diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c
450 index f2b7d5a1be17..d3d9ea284816 100644
451 --- a/drivers/staging/wilc1000/wilc_hif.c
452 +++ b/drivers/staging/wilc1000/wilc_hif.c
453 @@ -477,16 +477,21 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
454 memcpy(&param->supp_rates[1], rates_ie + 2, rates_len);
455 }
456
457 - supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies->data,
458 - ies->len);
459 - if (supp_rates_ie) {
460 - if (supp_rates_ie[1] > (WILC_MAX_RATES_SUPPORTED - rates_len))
461 - param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
462 - else
463 - param->supp_rates[0] += supp_rates_ie[1];
464 -
465 - memcpy(&param->supp_rates[rates_len + 1], supp_rates_ie + 2,
466 - (param->supp_rates[0] - rates_len));
467 + if (rates_len < WILC_MAX_RATES_SUPPORTED) {
468 + supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
469 + ies->data, ies->len);
470 + if (supp_rates_ie) {
471 + u8 ext_rates = supp_rates_ie[1];
472 +
473 + if (ext_rates > (WILC_MAX_RATES_SUPPORTED - rates_len))
474 + param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
475 + else
476 + param->supp_rates[0] += ext_rates;
477 +
478 + memcpy(&param->supp_rates[rates_len + 1],
479 + supp_rates_ie + 2,
480 + (param->supp_rates[0] - rates_len));
481 + }
482 }
483
484 ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
485 diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
486 index 5ea8db667e83..c5974c9af841 100644
487 --- a/drivers/thunderbolt/switch.c
488 +++ b/drivers/thunderbolt/switch.c
489 @@ -168,7 +168,7 @@ static int nvm_validate_and_write(struct tb_switch *sw)
490
491 static int nvm_authenticate_host(struct tb_switch *sw)
492 {
493 - int ret;
494 + int ret = 0;
495
496 /*
497 * Root switch NVM upgrade requires that we disconnect the
498 @@ -176,6 +176,8 @@ static int nvm_authenticate_host(struct tb_switch *sw)
499 * already).
500 */
501 if (!sw->safe_mode) {
502 + u32 status;
503 +
504 ret = tb_domain_disconnect_all_paths(sw->tb);
505 if (ret)
506 return ret;
507 @@ -184,7 +186,16 @@ static int nvm_authenticate_host(struct tb_switch *sw)
508 * everything goes well so getting timeout is expected.
509 */
510 ret = dma_port_flash_update_auth(sw->dma_port);
511 - return ret == -ETIMEDOUT ? 0 : ret;
512 + if (!ret || ret == -ETIMEDOUT)
513 + return 0;
514 +
515 + /*
516 + * Any error from update auth operation requires power
517 + * cycling of the host router.
518 + */
519 + tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
520 + if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
521 + nvm_set_auth_status(sw, status);
522 }
523
524 /*
525 @@ -192,7 +203,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
526 * switch.
527 */
528 dma_port_power_cycle(sw->dma_port);
529 - return 0;
530 + return ret;
531 }
532
533 static int nvm_authenticate_device(struct tb_switch *sw)
534 @@ -200,8 +211,16 @@ static int nvm_authenticate_device(struct tb_switch *sw)
535 int ret, retries = 10;
536
537 ret = dma_port_flash_update_auth(sw->dma_port);
538 - if (ret && ret != -ETIMEDOUT)
539 + switch (ret) {
540 + case 0:
541 + case -ETIMEDOUT:
542 + case -EACCES:
543 + case -EINVAL:
544 + /* Power cycle is required */
545 + break;
546 + default:
547 return ret;
548 + }
549
550 /*
551 * Poll here for the authentication status. It takes some time
552 @@ -1246,8 +1265,6 @@ static ssize_t nvm_authenticate_store(struct device *dev,
553 */
554 nvm_authenticate_start(sw);
555 ret = nvm_authenticate_host(sw);
556 - if (ret)
557 - nvm_authenticate_complete(sw);
558 } else {
559 ret = nvm_authenticate_device(sw);
560 }
561 @@ -1690,13 +1707,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
562 int ret;
563
564 switch (sw->generation) {
565 - case 3:
566 - break;
567 -
568 case 2:
569 /* Only root switch can be upgraded */
570 if (tb_route(sw))
571 return 0;
572 +
573 + /* fallthrough */
574 + case 3:
575 + ret = tb_switch_set_uuid(sw);
576 + if (ret)
577 + return ret;
578 break;
579
580 default:
581 @@ -1720,6 +1740,19 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
582 if (sw->no_nvm_upgrade)
583 return 0;
584
585 + /*
586 + * If there is status already set then authentication failed
587 + * when the dma_port_flash_update_auth() returned. Power cycling
588 + * is not needed (it was done already) so only thing we do here
589 + * is to unblock runtime PM of the root port.
590 + */
591 + nvm_get_auth_status(sw, &status);
592 + if (status) {
593 + if (!tb_route(sw))
594 + nvm_authenticate_complete(sw);
595 + return 0;
596 + }
597 +
598 /*
599 * Check status of the previous flash authentication. If there
600 * is one we need to power cycle the switch in any case to make
601 @@ -1735,9 +1768,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
602
603 if (status) {
604 tb_sw_info(sw, "switch flash authentication failed\n");
605 - ret = tb_switch_set_uuid(sw);
606 - if (ret)
607 - return ret;
608 nvm_set_auth_status(sw, status);
609 }
610
611 diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
612 index 8e41d70fd298..78a4925aa118 100644
613 --- a/drivers/usb/dwc2/core.c
614 +++ b/drivers/usb/dwc2/core.c
615 @@ -524,7 +524,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
616 greset |= GRSTCTL_CSFTRST;
617 dwc2_writel(hsotg, greset, GRSTCTL);
618
619 - if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) {
620 + if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) {
621 dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n",
622 __func__);
623 return -EBUSY;
624 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
625 index 25e81faf4c24..9ad44a96dfe3 100644
626 --- a/drivers/usb/serial/ftdi_sio.c
627 +++ b/drivers/usb/serial/ftdi_sio.c
628 @@ -1033,6 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
629 /* Sienna devices */
630 { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
631 { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
632 + /* U-Blox devices */
633 + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
634 + { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
635 { } /* Terminating entry */
636 };
637
638 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
639 index 22d66217cb41..e8373528264c 100644
640 --- a/drivers/usb/serial/ftdi_sio_ids.h
641 +++ b/drivers/usb/serial/ftdi_sio_ids.h
642 @@ -1558,3 +1558,10 @@
643 */
644 #define UNJO_VID 0x22B7
645 #define UNJO_ISODEBUG_V1_PID 0x150D
646 +
647 +/*
648 + * U-Blox products (http://www.u-blox.com).
649 + */
650 +#define UBLOX_VID 0x1546
651 +#define UBLOX_C099F9P_ZED_PID 0x0502
652 +#define UBLOX_C099F9P_ODIN_PID 0x0503
653 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
654 index 516faa280ced..d691d1783ed6 100644
655 --- a/fs/ext4/inode.c
656 +++ b/fs/ext4/inode.c
657 @@ -5912,8 +5912,23 @@ static int __ext4_expand_extra_isize(struct inode *inode,
658 {
659 struct ext4_inode *raw_inode;
660 struct ext4_xattr_ibody_header *header;
661 + unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
662 + struct ext4_inode_info *ei = EXT4_I(inode);
663 int error;
664
665 + /* this was checked at iget time, but double check for good measure */
666 + if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
667 + (ei->i_extra_isize & 3)) {
668 + EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
669 + ei->i_extra_isize,
670 + EXT4_INODE_SIZE(inode->i_sb));
671 + return -EFSCORRUPTED;
672 + }
673 + if ((new_extra_isize < ei->i_extra_isize) ||
674 + (new_extra_isize < 4) ||
675 + (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
676 + return -EINVAL; /* Should never happen */
677 +
678 raw_inode = ext4_raw_inode(iloc);
679
680 header = IHDR(inode, raw_inode);
681 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
682 index dd654e53ba3d..73578359d451 100644
683 --- a/fs/ext4/super.c
684 +++ b/fs/ext4/super.c
685 @@ -3555,12 +3555,15 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb)
686 {
687 struct ext4_sb_info *sbi = EXT4_SB(sb);
688 struct ext4_super_block *es = sbi->s_es;
689 + unsigned def_extra_isize = sizeof(struct ext4_inode) -
690 + EXT4_GOOD_OLD_INODE_SIZE;
691
692 - /* determine the minimum size of new large inodes, if present */
693 - if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
694 - sbi->s_want_extra_isize == 0) {
695 - sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
696 - EXT4_GOOD_OLD_INODE_SIZE;
697 + if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
698 + sbi->s_want_extra_isize = 0;
699 + return;
700 + }
701 + if (sbi->s_want_extra_isize < 4) {
702 + sbi->s_want_extra_isize = def_extra_isize;
703 if (ext4_has_feature_extra_isize(sb)) {
704 if (sbi->s_want_extra_isize <
705 le16_to_cpu(es->s_want_extra_isize))
706 @@ -3573,10 +3576,10 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb)
707 }
708 }
709 /* Check if enough inode space is available */
710 - if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
711 - sbi->s_inode_size) {
712 - sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
713 - EXT4_GOOD_OLD_INODE_SIZE;
714 + if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
715 + (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
716 + sbi->s_inode_size)) {
717 + sbi->s_want_extra_isize = def_extra_isize;
718 ext4_msg(sb, KERN_INFO,
719 "required extra inode space not available");
720 }
721 diff --git a/fs/io_uring.c b/fs/io_uring.c
722 index 2c819c3c855d..cbe8dabb6479 100644
723 --- a/fs/io_uring.c
724 +++ b/fs/io_uring.c
725 @@ -238,6 +238,8 @@ struct io_ring_ctx {
726
727 struct user_struct *user;
728
729 + struct cred *creds;
730 +
731 struct completion ctx_done;
732
733 struct {
734 @@ -1752,8 +1754,11 @@ static void io_poll_complete_work(struct work_struct *work)
735 struct io_poll_iocb *poll = &req->poll;
736 struct poll_table_struct pt = { ._key = poll->events };
737 struct io_ring_ctx *ctx = req->ctx;
738 + const struct cred *old_cred;
739 __poll_t mask = 0;
740
741 + old_cred = override_creds(ctx->creds);
742 +
743 if (!READ_ONCE(poll->canceled))
744 mask = vfs_poll(poll->file, &pt) & poll->events;
745
746 @@ -1768,7 +1773,7 @@ static void io_poll_complete_work(struct work_struct *work)
747 if (!mask && !READ_ONCE(poll->canceled)) {
748 add_wait_queue(poll->head, &poll->wait);
749 spin_unlock_irq(&ctx->completion_lock);
750 - return;
751 + goto out;
752 }
753 list_del_init(&req->list);
754 io_poll_complete(ctx, req, mask);
755 @@ -1776,6 +1781,8 @@ static void io_poll_complete_work(struct work_struct *work)
756
757 io_cqring_ev_posted(ctx);
758 io_put_req(req);
759 +out:
760 + revert_creds(old_cred);
761 }
762
763 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
764 @@ -2147,10 +2154,12 @@ static void io_sq_wq_submit_work(struct work_struct *work)
765 struct io_ring_ctx *ctx = req->ctx;
766 struct mm_struct *cur_mm = NULL;
767 struct async_list *async_list;
768 + const struct cred *old_cred;
769 LIST_HEAD(req_list);
770 mm_segment_t old_fs;
771 int ret;
772
773 + old_cred = override_creds(ctx->creds);
774 async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
775 restart:
776 do {
777 @@ -2258,6 +2267,7 @@ out:
778 unuse_mm(cur_mm);
779 mmput(cur_mm);
780 }
781 + revert_creds(old_cred);
782 }
783
784 /*
785 @@ -2663,6 +2673,7 @@ static int io_sq_thread(void *data)
786 {
787 struct io_ring_ctx *ctx = data;
788 struct mm_struct *cur_mm = NULL;
789 + const struct cred *old_cred;
790 mm_segment_t old_fs;
791 DEFINE_WAIT(wait);
792 unsigned inflight;
793 @@ -2672,6 +2683,7 @@ static int io_sq_thread(void *data)
794
795 old_fs = get_fs();
796 set_fs(USER_DS);
797 + old_cred = override_creds(ctx->creds);
798
799 timeout = inflight = 0;
800 while (!kthread_should_park()) {
801 @@ -2782,6 +2794,7 @@ static int io_sq_thread(void *data)
802 unuse_mm(cur_mm);
803 mmput(cur_mm);
804 }
805 + revert_creds(old_cred);
806
807 kthread_parkme();
808
809 @@ -3567,6 +3580,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
810 io_unaccount_mem(ctx->user,
811 ring_pages(ctx->sq_entries, ctx->cq_entries));
812 free_uid(ctx->user);
813 + if (ctx->creds)
814 + put_cred(ctx->creds);
815 kfree(ctx);
816 }
817
818 @@ -3838,6 +3853,12 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
819 ctx->account_mem = account_mem;
820 ctx->user = user;
821
822 + ctx->creds = prepare_creds();
823 + if (!ctx->creds) {
824 + ret = -ENOMEM;
825 + goto err;
826 + }
827 +
828 ret = io_allocate_scq_urings(ctx, p);
829 if (ret)
830 goto err;
831 diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
832 index 021a4a2190ee..b86c78d178c6 100644
833 --- a/fs/jffs2/nodelist.c
834 +++ b/fs/jffs2/nodelist.c
835 @@ -226,7 +226,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
836 lastend = this->ofs + this->size;
837 } else {
838 dbg_fragtree2("lookup gave no frag\n");
839 - return -EINVAL;
840 + lastend = 0;
841 }
842
843 /* See if we ran off the end of the fragtree */
844 diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
845 index ce7055259877..da4caff7efa4 100644
846 --- a/include/linux/skmsg.h
847 +++ b/include/linux/skmsg.h
848 @@ -14,6 +14,7 @@
849 #include <net/strparser.h>
850
851 #define MAX_MSG_FRAGS MAX_SKB_FRAGS
852 +#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
853
854 enum __sk_action {
855 __SK_DROP = 0,
856 @@ -29,11 +30,13 @@ struct sk_msg_sg {
857 u32 size;
858 u32 copybreak;
859 bool copy[MAX_MSG_FRAGS];
860 - /* The extra element is used for chaining the front and sections when
861 - * the list becomes partitioned (e.g. end < start). The crypto APIs
862 - * require the chaining.
863 + /* The extra two elements:
864 + * 1) used for chaining the front and sections when the list becomes
865 + * partitioned (e.g. end < start). The crypto APIs require the
866 + * chaining;
867 + * 2) to chain tailer SG entries after the message.
868 */
869 - struct scatterlist data[MAX_MSG_FRAGS + 1];
870 + struct scatterlist data[MAX_MSG_FRAGS + 2];
871 };
872
873 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
874 @@ -141,13 +144,13 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
875
876 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
877 {
878 - return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
879 + return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
880 }
881
882 #define sk_msg_iter_var_prev(var) \
883 do { \
884 if (var == 0) \
885 - var = MAX_MSG_FRAGS - 1; \
886 + var = NR_MSG_FRAG_IDS - 1; \
887 else \
888 var--; \
889 } while (0)
890 @@ -155,7 +158,7 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end)
891 #define sk_msg_iter_var_next(var) \
892 do { \
893 var++; \
894 - if (var == MAX_MSG_FRAGS) \
895 + if (var == NR_MSG_FRAG_IDS) \
896 var = 0; \
897 } while (0)
898
899 @@ -172,9 +175,9 @@ static inline void sk_msg_clear_meta(struct sk_msg *msg)
900
901 static inline void sk_msg_init(struct sk_msg *msg)
902 {
903 - BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS);
904 + BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
905 memset(msg, 0, sizeof(*msg));
906 - sg_init_marker(msg->sg.data, MAX_MSG_FRAGS);
907 + sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
908 }
909
910 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
911 @@ -195,14 +198,11 @@ static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
912
913 static inline bool sk_msg_full(const struct sk_msg *msg)
914 {
915 - return (msg->sg.end == msg->sg.start) && msg->sg.size;
916 + return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
917 }
918
919 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
920 {
921 - if (sk_msg_full(msg))
922 - return MAX_MSG_FRAGS;
923 -
924 return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
925 }
926
927 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
928 index 503fbc3cd819..2b6f3f13d5bc 100644
929 --- a/include/net/sctp/structs.h
930 +++ b/include/net/sctp/structs.h
931 @@ -1239,6 +1239,9 @@ struct sctp_ep_common {
932 /* What socket does this endpoint belong to? */
933 struct sock *sk;
934
935 + /* Cache netns and it won't change once set */
936 + struct net *net;
937 +
938 /* This is where we receive inbound chunks. */
939 struct sctp_inq inqueue;
940
941 diff --git a/include/net/tls.h b/include/net/tls.h
942 index f4ad831eaa02..093abb5a3dff 100644
943 --- a/include/net/tls.h
944 +++ b/include/net/tls.h
945 @@ -122,7 +122,6 @@ struct tls_rec {
946 struct list_head list;
947 int tx_ready;
948 int tx_flags;
949 - int inplace_crypto;
950
951 struct sk_msg msg_plaintext;
952 struct sk_msg msg_encrypted;
953 @@ -396,7 +395,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
954 int flags);
955 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
956 int flags);
957 -bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
958 +void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
959
960 static inline struct tls_msg *tls_msg(struct sk_buff *skb)
961 {
962 diff --git a/net/core/filter.c b/net/core/filter.c
963 index 3fed5755494b..6d0111bfdb4a 100644
964 --- a/net/core/filter.c
965 +++ b/net/core/filter.c
966 @@ -2299,7 +2299,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
967 WARN_ON_ONCE(last_sge == first_sge);
968 shift = last_sge > first_sge ?
969 last_sge - first_sge - 1 :
970 - MAX_SKB_FRAGS - first_sge + last_sge - 1;
971 + NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
972 if (!shift)
973 goto out;
974
975 @@ -2308,8 +2308,8 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
976 do {
977 u32 move_from;
978
979 - if (i + shift >= MAX_MSG_FRAGS)
980 - move_from = i + shift - MAX_MSG_FRAGS;
981 + if (i + shift >= NR_MSG_FRAG_IDS)
982 + move_from = i + shift - NR_MSG_FRAG_IDS;
983 else
984 move_from = i + shift;
985 if (move_from == msg->sg.end)
986 @@ -2323,7 +2323,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
987 } while (1);
988
989 msg->sg.end = msg->sg.end - shift > msg->sg.end ?
990 - msg->sg.end - shift + MAX_MSG_FRAGS :
991 + msg->sg.end - shift + NR_MSG_FRAG_IDS :
992 msg->sg.end - shift;
993 out:
994 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
995 diff --git a/net/core/skmsg.c b/net/core/skmsg.c
996 index ad31e4e53d0a..0675d022584e 100644
997 --- a/net/core/skmsg.c
998 +++ b/net/core/skmsg.c
999 @@ -421,7 +421,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
1000 copied = skb->len;
1001 msg->sg.start = 0;
1002 msg->sg.size = copied;
1003 - msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
1004 + msg->sg.end = num_sge;
1005 msg->skb = skb;
1006
1007 sk_psock_queue_msg(psock, msg);
1008 diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
1009 index 8a56e09cfb0e..e38705165ac9 100644
1010 --- a/net/ipv4/tcp_bpf.c
1011 +++ b/net/ipv4/tcp_bpf.c
1012 @@ -301,7 +301,7 @@ EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
1013 static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
1014 struct sk_msg *msg, int *copied, int flags)
1015 {
1016 - bool cork = false, enospc = msg->sg.start == msg->sg.end;
1017 + bool cork = false, enospc = sk_msg_full(msg);
1018 struct sock *sk_redir;
1019 u32 tosend, delta = 0;
1020 int ret;
1021 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
1022 index d8c364d637b1..23f67b8fdeaa 100644
1023 --- a/net/openvswitch/datapath.c
1024 +++ b/net/openvswitch/datapath.c
1025 @@ -704,9 +704,13 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
1026 {
1027 size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
1028
1029 - /* OVS_FLOW_ATTR_UFID */
1030 + /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
1031 + * see ovs_nla_put_identifier()
1032 + */
1033 if (sfid && ovs_identifier_is_ufid(sfid))
1034 len += nla_total_size(sfid->ufid_len);
1035 + else
1036 + len += nla_total_size(ovs_key_attr_size());
1037
1038 /* OVS_FLOW_ATTR_KEY */
1039 if (!sfid || should_fill_key(sfid, ufid_flags))
1040 @@ -882,7 +886,10 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
1041 retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
1042 info->snd_portid, info->snd_seq, 0,
1043 cmd, ufid_flags);
1044 - BUG_ON(retval < 0);
1045 + if (WARN_ON_ONCE(retval < 0)) {
1046 + kfree_skb(skb);
1047 + skb = ERR_PTR(retval);
1048 + }
1049 return skb;
1050 }
1051
1052 @@ -1346,7 +1353,10 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1053 OVS_FLOW_CMD_DEL,
1054 ufid_flags);
1055 rcu_read_unlock();
1056 - BUG_ON(err < 0);
1057 + if (WARN_ON_ONCE(err < 0)) {
1058 + kfree_skb(reply);
1059 + goto out_free;
1060 + }
1061
1062 ovs_notify(&dp_flow_genl_family, reply, info);
1063 } else {
1064 @@ -1354,6 +1364,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1065 }
1066 }
1067
1068 +out_free:
1069 ovs_flow_free(flow, true);
1070 return 0;
1071 unlock:
1072 diff --git a/net/psample/psample.c b/net/psample/psample.c
1073 index a6ceb0533b5b..6f2fbc6b9eb2 100644
1074 --- a/net/psample/psample.c
1075 +++ b/net/psample/psample.c
1076 @@ -229,7 +229,7 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
1077 data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
1078 - NLA_ALIGNTO;
1079
1080 - nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
1081 + nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
1082 if (unlikely(!nl_skb))
1083 return;
1084
1085 diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
1086 index 0d578333e967..278c0b2dc523 100644
1087 --- a/net/sched/sch_mq.c
1088 +++ b/net/sched/sch_mq.c
1089 @@ -245,7 +245,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1090 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
1091
1092 sch = dev_queue->qdisc_sleeping;
1093 - if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1094 + if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
1095 + &sch->bstats) < 0 ||
1096 qdisc_qstats_copy(d, sch) < 0)
1097 return -1;
1098 return 0;
1099 diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
1100 index 46980b8d66c5..0d0113a24962 100644
1101 --- a/net/sched/sch_mqprio.c
1102 +++ b/net/sched/sch_mqprio.c
1103 @@ -557,8 +557,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1104 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
1105
1106 sch = dev_queue->qdisc_sleeping;
1107 - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1108 - d, NULL, &sch->bstats) < 0 ||
1109 + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
1110 + sch->cpu_bstats, &sch->bstats) < 0 ||
1111 qdisc_qstats_copy(d, sch) < 0)
1112 return -1;
1113 }
1114 diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
1115 index b2b7fdb06fc6..1330ad224931 100644
1116 --- a/net/sched/sch_multiq.c
1117 +++ b/net/sched/sch_multiq.c
1118 @@ -339,7 +339,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1119
1120 cl_q = q->queues[cl - 1];
1121 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1122 - d, NULL, &cl_q->bstats) < 0 ||
1123 + d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
1124 qdisc_qstats_copy(d, cl_q) < 0)
1125 return -1;
1126
1127 diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
1128 index 0f8fedb8809a..18b884cfdfe8 100644
1129 --- a/net/sched/sch_prio.c
1130 +++ b/net/sched/sch_prio.c
1131 @@ -356,7 +356,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1132
1133 cl_q = q->queues[cl - 1];
1134 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1135 - d, NULL, &cl_q->bstats) < 0 ||
1136 + d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
1137 qdisc_qstats_copy(d, cl_q) < 0)
1138 return -1;
1139
1140 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
1141 index d2ffc9a0ba3a..41839b85c268 100644
1142 --- a/net/sctp/associola.c
1143 +++ b/net/sctp/associola.c
1144 @@ -64,6 +64,7 @@ static struct sctp_association *sctp_association_init(
1145 /* Discarding const is appropriate here. */
1146 asoc->ep = (struct sctp_endpoint *)ep;
1147 asoc->base.sk = (struct sock *)sk;
1148 + asoc->base.net = sock_net(sk);
1149
1150 sctp_endpoint_hold(asoc->ep);
1151 sock_hold(asoc->base.sk);
1152 diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
1153 index ea53049d1db6..3067deb0fbec 100644
1154 --- a/net/sctp/endpointola.c
1155 +++ b/net/sctp/endpointola.c
1156 @@ -110,6 +110,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
1157
1158 /* Remember who we are attached to. */
1159 ep->base.sk = sk;
1160 + ep->base.net = sock_net(sk);
1161 sock_hold(ep->base.sk);
1162
1163 return ep;
1164 diff --git a/net/sctp/input.c b/net/sctp/input.c
1165 index 2277981559d0..4d2bcfc9d7f8 100644
1166 --- a/net/sctp/input.c
1167 +++ b/net/sctp/input.c
1168 @@ -882,7 +882,7 @@ static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
1169 if (!sctp_transport_hold(t))
1170 return err;
1171
1172 - if (!net_eq(sock_net(t->asoc->base.sk), x->net))
1173 + if (!net_eq(t->asoc->base.net, x->net))
1174 goto out;
1175 if (x->lport != htons(t->asoc->base.bind_addr.port))
1176 goto out;
1177 @@ -897,7 +897,7 @@ static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
1178 {
1179 const struct sctp_transport *t = data;
1180
1181 - return sctp_hashfn(sock_net(t->asoc->base.sk),
1182 + return sctp_hashfn(t->asoc->base.net,
1183 htons(t->asoc->base.bind_addr.port),
1184 &t->ipaddr, seed);
1185 }
1186 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
1187 index 0c21c52fc408..4ab8208a2dd4 100644
1188 --- a/net/sctp/sm_statefuns.c
1189 +++ b/net/sctp/sm_statefuns.c
1190 @@ -2160,8 +2160,10 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
1191
1192 /* Update socket peer label if first association. */
1193 if (security_sctp_assoc_request((struct sctp_endpoint *)ep,
1194 - chunk->skb))
1195 + chunk->skb)) {
1196 + sctp_association_free(new_asoc);
1197 return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1198 + }
1199
1200 /* Set temp so that it won't be added into hashtable */
1201 new_asoc->temp = 1;
1202 diff --git a/net/socket.c b/net/socket.c
1203 index 6a9ab7a8b1d2..d7a106028f0e 100644
1204 --- a/net/socket.c
1205 +++ b/net/socket.c
1206 @@ -2232,15 +2232,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
1207 return err < 0 ? err : 0;
1208 }
1209
1210 -static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
1211 - struct msghdr *msg_sys, unsigned int flags,
1212 - struct used_address *used_address,
1213 - unsigned int allowed_msghdr_flags)
1214 +static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
1215 + unsigned int flags, struct used_address *used_address,
1216 + unsigned int allowed_msghdr_flags)
1217 {
1218 - struct compat_msghdr __user *msg_compat =
1219 - (struct compat_msghdr __user *)msg;
1220 - struct sockaddr_storage address;
1221 - struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1222 unsigned char ctl[sizeof(struct cmsghdr) + 20]
1223 __aligned(sizeof(__kernel_size_t));
1224 /* 20 is size of ipv6_pktinfo */
1225 @@ -2248,19 +2243,10 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
1226 int ctl_len;
1227 ssize_t err;
1228
1229 - msg_sys->msg_name = &address;
1230 -
1231 - if (MSG_CMSG_COMPAT & flags)
1232 - err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov);
1233 - else
1234 - err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov);
1235 - if (err < 0)
1236 - return err;
1237 -
1238 err = -ENOBUFS;
1239
1240 if (msg_sys->msg_controllen > INT_MAX)
1241 - goto out_freeiov;
1242 + goto out;
1243 flags |= (msg_sys->msg_flags & allowed_msghdr_flags);
1244 ctl_len = msg_sys->msg_controllen;
1245 if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
1246 @@ -2268,7 +2254,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
1247 cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl,
1248 sizeof(ctl));
1249 if (err)
1250 - goto out_freeiov;
1251 + goto out;
1252 ctl_buf = msg_sys->msg_control;
1253 ctl_len = msg_sys->msg_controllen;
1254 } else if (ctl_len) {
1255 @@ -2277,7 +2263,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
1256 if (ctl_len > sizeof(ctl)) {
1257 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
1258 if (ctl_buf == NULL)
1259 - goto out_freeiov;
1260 + goto out;
1261 }
1262 err = -EFAULT;
1263 /*
1264 @@ -2323,7 +2309,47 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
1265 out_freectl:
1266 if (ctl_buf != ctl)
1267 sock_kfree_s(sock->sk, ctl_buf, ctl_len);
1268 -out_freeiov:
1269 +out:
1270 + return err;
1271 +}
1272 +
1273 +static int sendmsg_copy_msghdr(struct msghdr *msg,
1274 + struct user_msghdr __user *umsg, unsigned flags,
1275 + struct iovec **iov)
1276 +{
1277 + int err;
1278 +
1279 + if (flags & MSG_CMSG_COMPAT) {
1280 + struct compat_msghdr __user *msg_compat;
1281 +
1282 + msg_compat = (struct compat_msghdr __user *) umsg;
1283 + err = get_compat_msghdr(msg, msg_compat, NULL, iov);
1284 + } else {
1285 + err = copy_msghdr_from_user(msg, umsg, NULL, iov);
1286 + }
1287 + if (err < 0)
1288 + return err;
1289 +
1290 + return 0;
1291 +}
1292 +
1293 +static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
1294 + struct msghdr *msg_sys, unsigned int flags,
1295 + struct used_address *used_address,
1296 + unsigned int allowed_msghdr_flags)
1297 +{
1298 + struct sockaddr_storage address;
1299 + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1300 + ssize_t err;
1301 +
1302 + msg_sys->msg_name = &address;
1303 +
1304 + err = sendmsg_copy_msghdr(msg_sys, msg, flags, &iov);
1305 + if (err < 0)
1306 + return err;
1307 +
1308 + err = ____sys_sendmsg(sock, msg_sys, flags, used_address,
1309 + allowed_msghdr_flags);
1310 kfree(iov);
1311 return err;
1312 }
1313 @@ -2331,12 +2357,27 @@ out_freeiov:
1314 /*
1315 * BSD sendmsg interface
1316 */
1317 -long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *msg,
1318 +long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *umsg,
1319 unsigned int flags)
1320 {
1321 - struct msghdr msg_sys;
1322 + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1323 + struct sockaddr_storage address;
1324 + struct msghdr msg = { .msg_name = &address };
1325 + ssize_t err;
1326 +
1327 + err = sendmsg_copy_msghdr(&msg, umsg, flags, &iov);
1328 + if (err)
1329 + return err;
1330 + /* disallow ancillary data requests from this path */
1331 + if (msg.msg_control || msg.msg_controllen) {
1332 + err = -EINVAL;
1333 + goto out;
1334 + }
1335
1336 - return ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0);
1337 + err = ____sys_sendmsg(sock, &msg, flags, NULL, 0);
1338 +out:
1339 + kfree(iov);
1340 + return err;
1341 }
1342
1343 long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags,
1344 @@ -2442,33 +2483,41 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
1345 return __sys_sendmmsg(fd, mmsg, vlen, flags, true);
1346 }
1347
1348 -static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
1349 - struct msghdr *msg_sys, unsigned int flags, int nosec)
1350 +static int recvmsg_copy_msghdr(struct msghdr *msg,
1351 + struct user_msghdr __user *umsg, unsigned flags,
1352 + struct sockaddr __user **uaddr,
1353 + struct iovec **iov)
1354 {
1355 - struct compat_msghdr __user *msg_compat =
1356 - (struct compat_msghdr __user *)msg;
1357 - struct iovec iovstack[UIO_FASTIOV];
1358 - struct iovec *iov = iovstack;
1359 - unsigned long cmsg_ptr;
1360 - int len;
1361 ssize_t err;
1362
1363 - /* kernel mode address */
1364 - struct sockaddr_storage addr;
1365 + if (MSG_CMSG_COMPAT & flags) {
1366 + struct compat_msghdr __user *msg_compat;
1367
1368 - /* user mode address pointers */
1369 - struct sockaddr __user *uaddr;
1370 - int __user *uaddr_len = COMPAT_NAMELEN(msg);
1371 -
1372 - msg_sys->msg_name = &addr;
1373 -
1374 - if (MSG_CMSG_COMPAT & flags)
1375 - err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov);
1376 - else
1377 - err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov);
1378 + msg_compat = (struct compat_msghdr __user *) umsg;
1379 + err = get_compat_msghdr(msg, msg_compat, uaddr, iov);
1380 + } else {
1381 + err = copy_msghdr_from_user(msg, umsg, uaddr, iov);
1382 + }
1383 if (err < 0)
1384 return err;
1385
1386 + return 0;
1387 +}
1388 +
1389 +static int ____sys_recvmsg(struct socket *sock, struct msghdr *msg_sys,
1390 + struct user_msghdr __user *msg,
1391 + struct sockaddr __user *uaddr,
1392 + unsigned int flags, int nosec)
1393 +{
1394 + struct compat_msghdr __user *msg_compat =
1395 + (struct compat_msghdr __user *) msg;
1396 + int __user *uaddr_len = COMPAT_NAMELEN(msg);
1397 + struct sockaddr_storage addr;
1398 + unsigned long cmsg_ptr;
1399 + int len;
1400 + ssize_t err;
1401 +
1402 + msg_sys->msg_name = &addr;
1403 cmsg_ptr = (unsigned long)msg_sys->msg_control;
1404 msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
1405
1406 @@ -2479,7 +2528,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
1407 flags |= MSG_DONTWAIT;
1408 err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags);
1409 if (err < 0)
1410 - goto out_freeiov;
1411 + goto out;
1412 len = err;
1413
1414 if (uaddr != NULL) {
1415 @@ -2487,12 +2536,12 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
1416 msg_sys->msg_namelen, uaddr,
1417 uaddr_len);
1418 if (err < 0)
1419 - goto out_freeiov;
1420 + goto out;
1421 }
1422 err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT),
1423 COMPAT_FLAGS(msg));
1424 if (err)
1425 - goto out_freeiov;
1426 + goto out;
1427 if (MSG_CMSG_COMPAT & flags)
1428 err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr,
1429 &msg_compat->msg_controllen);
1430 @@ -2500,10 +2549,25 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
1431 err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr,
1432 &msg->msg_controllen);
1433 if (err)
1434 - goto out_freeiov;
1435 + goto out;
1436 err = len;
1437 +out:
1438 + return err;
1439 +}
1440 +
1441 +static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
1442 + struct msghdr *msg_sys, unsigned int flags, int nosec)
1443 +{
1444 + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1445 + /* user mode address pointers */
1446 + struct sockaddr __user *uaddr;
1447 + ssize_t err;
1448 +
1449 + err = recvmsg_copy_msghdr(msg_sys, msg, flags, &uaddr, &iov);
1450 + if (err < 0)
1451 + return err;
1452
1453 -out_freeiov:
1454 + err = ____sys_recvmsg(sock, msg_sys, msg, uaddr, flags, nosec);
1455 kfree(iov);
1456 return err;
1457 }
1458 @@ -2512,12 +2576,28 @@ out_freeiov:
1459 * BSD recvmsg interface
1460 */
1461
1462 -long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *msg,
1463 +long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *umsg,
1464 unsigned int flags)
1465 {
1466 - struct msghdr msg_sys;
1467 + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1468 + struct sockaddr_storage address;
1469 + struct msghdr msg = { .msg_name = &address };
1470 + struct sockaddr __user *uaddr;
1471 + ssize_t err;
1472 +
1473 + err = recvmsg_copy_msghdr(&msg, umsg, flags, &uaddr, &iov);
1474 + if (err)
1475 + return err;
1476 + /* disallow ancillary data requests from this path */
1477 + if (msg.msg_control || msg.msg_controllen) {
1478 + err = -EINVAL;
1479 + goto out;
1480 + }
1481
1482 - return ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
1483 + err = ____sys_recvmsg(sock, &msg, umsg, uaddr, flags, 0);
1484 +out:
1485 + kfree(iov);
1486 + return err;
1487 }
1488
1489 long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags,
1490 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
1491 index e135d4e11231..d4d2928424e2 100644
1492 --- a/net/tipc/netlink_compat.c
1493 +++ b/net/tipc/netlink_compat.c
1494 @@ -550,7 +550,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
1495 if (len <= 0)
1496 return -EINVAL;
1497
1498 - len = min_t(int, len, TIPC_MAX_BEARER_NAME);
1499 + len = min_t(int, len, TIPC_MAX_LINK_NAME);
1500 if (!string_is_valid(name, len))
1501 return -EINVAL;
1502
1503 @@ -822,7 +822,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
1504 if (len <= 0)
1505 return -EINVAL;
1506
1507 - len = min_t(int, len, TIPC_MAX_BEARER_NAME);
1508 + len = min_t(int, len, TIPC_MAX_LINK_NAME);
1509 if (!string_is_valid(name, len))
1510 return -EINVAL;
1511
1512 diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
1513 index f874cc0da45d..eff444293594 100644
1514 --- a/net/tls/tls_main.c
1515 +++ b/net/tls/tls_main.c
1516 @@ -209,24 +209,15 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
1517 return tls_push_sg(sk, ctx, sg, offset, flags);
1518 }
1519
1520 -bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
1521 +void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
1522 {
1523 struct scatterlist *sg;
1524
1525 - sg = ctx->partially_sent_record;
1526 - if (!sg)
1527 - return false;
1528 -
1529 - while (1) {
1530 + for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
1531 put_page(sg_page(sg));
1532 sk_mem_uncharge(sk, sg->length);
1533 -
1534 - if (sg_is_last(sg))
1535 - break;
1536 - sg++;
1537 }
1538 ctx->partially_sent_record = NULL;
1539 - return true;
1540 }
1541
1542 static void tls_write_space(struct sock *sk)
1543 diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
1544 index 319735d5c084..5dd0f01913c0 100644
1545 --- a/net/tls/tls_sw.c
1546 +++ b/net/tls/tls_sw.c
1547 @@ -705,8 +705,7 @@ static int tls_push_record(struct sock *sk, int flags,
1548 }
1549
1550 i = msg_pl->sg.start;
1551 - sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
1552 - &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
1553 + sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
1554
1555 i = msg_en->sg.end;
1556 sk_msg_iter_var_prev(i);
1557 @@ -766,8 +765,14 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
1558
1559 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
1560 psock = sk_psock_get(sk);
1561 - if (!psock || !policy)
1562 - return tls_push_record(sk, flags, record_type);
1563 + if (!psock || !policy) {
1564 + err = tls_push_record(sk, flags, record_type);
1565 + if (err) {
1566 + *copied -= sk_msg_free(sk, msg);
1567 + tls_free_open_rec(sk);
1568 + }
1569 + return err;
1570 + }
1571 more_data:
1572 enospc = sk_msg_full(msg);
1573 if (psock->eval == __SK_NONE) {
1574 @@ -965,8 +970,6 @@ alloc_encrypted:
1575 if (ret)
1576 goto fallback_to_reg_send;
1577
1578 - rec->inplace_crypto = 0;
1579 -
1580 num_zc++;
1581 copied += try_to_copy;
1582
1583 @@ -979,7 +982,7 @@ alloc_encrypted:
1584 num_async++;
1585 else if (ret == -ENOMEM)
1586 goto wait_for_memory;
1587 - else if (ret == -ENOSPC)
1588 + else if (ctx->open_rec && ret == -ENOSPC)
1589 goto rollback_iter;
1590 else if (ret != -EAGAIN)
1591 goto send_end;
1592 @@ -1048,11 +1051,12 @@ wait_for_memory:
1593 ret = sk_stream_wait_memory(sk, &timeo);
1594 if (ret) {
1595 trim_sgl:
1596 - tls_trim_both_msgs(sk, orig_size);
1597 + if (ctx->open_rec)
1598 + tls_trim_both_msgs(sk, orig_size);
1599 goto send_end;
1600 }
1601
1602 - if (msg_en->sg.size < required_size)
1603 + if (ctx->open_rec && msg_en->sg.size < required_size)
1604 goto alloc_encrypted;
1605 }
1606
1607 @@ -1164,7 +1168,6 @@ alloc_payload:
1608
1609 tls_ctx->pending_open_record_frags = true;
1610 if (full_record || eor || sk_msg_full(msg_pl)) {
1611 - rec->inplace_crypto = 0;
1612 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1613 record_type, &copied, flags);
1614 if (ret) {
1615 @@ -1185,11 +1188,13 @@ wait_for_sndbuf:
1616 wait_for_memory:
1617 ret = sk_stream_wait_memory(sk, &timeo);
1618 if (ret) {
1619 - tls_trim_both_msgs(sk, msg_pl->sg.size);
1620 + if (ctx->open_rec)
1621 + tls_trim_both_msgs(sk, msg_pl->sg.size);
1622 goto sendpage_end;
1623 }
1624
1625 - goto alloc_payload;
1626 + if (ctx->open_rec)
1627 + goto alloc_payload;
1628 }
1629
1630 if (num_async) {
1631 @@ -2079,7 +2084,8 @@ void tls_sw_release_resources_tx(struct sock *sk)
1632 /* Free up un-sent records in tx_list. First, free
1633 * the partially sent record if any at head of tx_list.
1634 */
1635 - if (tls_free_partial_record(sk, tls_ctx)) {
1636 + if (tls_ctx->partially_sent_record) {
1637 + tls_free_partial_record(sk, tls_ctx);
1638 rec = list_first_entry(&ctx->tx_list,
1639 struct tls_rec, list);
1640 list_del(&rec->list);
1641 diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
1642 index 3845144e2c91..4a851513c842 100644
1643 --- a/tools/testing/selftests/bpf/test_sockmap.c
1644 +++ b/tools/testing/selftests/bpf/test_sockmap.c
1645 @@ -240,14 +240,14 @@ static int sockmap_init_sockets(int verbose)
1646 addr.sin_port = htons(S1_PORT);
1647 err = bind(s1, (struct sockaddr *)&addr, sizeof(addr));
1648 if (err < 0) {
1649 - perror("bind s1 failed()\n");
1650 + perror("bind s1 failed()");
1651 return errno;
1652 }
1653
1654 addr.sin_port = htons(S2_PORT);
1655 err = bind(s2, (struct sockaddr *)&addr, sizeof(addr));
1656 if (err < 0) {
1657 - perror("bind s2 failed()\n");
1658 + perror("bind s2 failed()");
1659 return errno;
1660 }
1661
1662 @@ -255,14 +255,14 @@ static int sockmap_init_sockets(int verbose)
1663 addr.sin_port = htons(S1_PORT);
1664 err = listen(s1, 32);
1665 if (err < 0) {
1666 - perror("listen s1 failed()\n");
1667 + perror("listen s1 failed()");
1668 return errno;
1669 }
1670
1671 addr.sin_port = htons(S2_PORT);
1672 err = listen(s2, 32);
1673 if (err < 0) {
1674 - perror("listen s1 failed()\n");
1675 + perror("listen s1 failed()");
1676 return errno;
1677 }
1678
1679 @@ -270,14 +270,14 @@ static int sockmap_init_sockets(int verbose)
1680 addr.sin_port = htons(S1_PORT);
1681 err = connect(c1, (struct sockaddr *)&addr, sizeof(addr));
1682 if (err < 0 && errno != EINPROGRESS) {
1683 - perror("connect c1 failed()\n");
1684 + perror("connect c1 failed()");
1685 return errno;
1686 }
1687
1688 addr.sin_port = htons(S2_PORT);
1689 err = connect(c2, (struct sockaddr *)&addr, sizeof(addr));
1690 if (err < 0 && errno != EINPROGRESS) {
1691 - perror("connect c2 failed()\n");
1692 + perror("connect c2 failed()");
1693 return errno;
1694 } else if (err < 0) {
1695 err = 0;
1696 @@ -286,13 +286,13 @@ static int sockmap_init_sockets(int verbose)
1697 /* Accept Connecrtions */
1698 p1 = accept(s1, NULL, NULL);
1699 if (p1 < 0) {
1700 - perror("accept s1 failed()\n");
1701 + perror("accept s1 failed()");
1702 return errno;
1703 }
1704
1705 p2 = accept(s2, NULL, NULL);
1706 if (p2 < 0) {
1707 - perror("accept s1 failed()\n");
1708 + perror("accept s1 failed()");
1709 return errno;
1710 }
1711
1712 @@ -332,6 +332,10 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt,
1713 int i, fp;
1714
1715 file = fopen(".sendpage_tst.tmp", "w+");
1716 + if (!file) {
1717 + perror("create file for sendpage");
1718 + return 1;
1719 + }
1720 for (i = 0; i < iov_length * cnt; i++, k++)
1721 fwrite(&k, sizeof(char), 1, file);
1722 fflush(file);
1723 @@ -339,12 +343,17 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt,
1724 fclose(file);
1725
1726 fp = open(".sendpage_tst.tmp", O_RDONLY);
1727 + if (fp < 0) {
1728 + perror("reopen file for sendpage");
1729 + return 1;
1730 + }
1731 +
1732 clock_gettime(CLOCK_MONOTONIC, &s->start);
1733 for (i = 0; i < cnt; i++) {
1734 int sent = sendfile(fd, fp, NULL, iov_length);
1735
1736 if (!drop && sent < 0) {
1737 - perror("send loop error:");
1738 + perror("send loop error");
1739 close(fp);
1740 return sent;
1741 } else if (drop && sent >= 0) {
1742 @@ -463,7 +472,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
1743 int sent = sendmsg(fd, &msg, flags);
1744
1745 if (!drop && sent < 0) {
1746 - perror("send loop error:");
1747 + perror("send loop error");
1748 goto out_errno;
1749 } else if (drop && sent >= 0) {
1750 printf("send loop error expected: %i\n", sent);
1751 @@ -499,7 +508,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
1752 total_bytes -= txmsg_pop_total;
1753 err = clock_gettime(CLOCK_MONOTONIC, &s->start);
1754 if (err < 0)
1755 - perror("recv start time: ");
1756 + perror("recv start time");
1757 while (s->bytes_recvd < total_bytes) {
1758 if (txmsg_cork) {
1759 timeout.tv_sec = 0;
1760 @@ -543,7 +552,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
1761 if (recv < 0) {
1762 if (errno != EWOULDBLOCK) {
1763 clock_gettime(CLOCK_MONOTONIC, &s->end);
1764 - perror("recv failed()\n");
1765 + perror("recv failed()");
1766 goto out_errno;
1767 }
1768 }
1769 @@ -557,7 +566,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
1770
1771 errno = msg_verify_data(&msg, recv, chunk_sz);
1772 if (errno) {
1773 - perror("data verify msg failed\n");
1774 + perror("data verify msg failed");
1775 goto out_errno;
1776 }
1777 if (recvp) {
1778 @@ -565,7 +574,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
1779 recvp,
1780 chunk_sz);
1781 if (errno) {
1782 - perror("data verify msg_peek failed\n");
1783 + perror("data verify msg_peek failed");
1784 goto out_errno;
1785 }
1786 }
1787 @@ -654,7 +663,7 @@ static int sendmsg_test(struct sockmap_options *opt)
1788 err = 0;
1789 exit(err ? 1 : 0);
1790 } else if (rxpid == -1) {
1791 - perror("msg_loop_rx: ");
1792 + perror("msg_loop_rx");
1793 return errno;
1794 }
1795
1796 @@ -681,7 +690,7 @@ static int sendmsg_test(struct sockmap_options *opt)
1797 s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
1798 exit(err ? 1 : 0);
1799 } else if (txpid == -1) {
1800 - perror("msg_loop_tx: ");
1801 + perror("msg_loop_tx");
1802 return errno;
1803 }
1804
1805 @@ -715,7 +724,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt)
1806 /* Ping/Pong data from client to server */
1807 sc = send(c1, buf, sizeof(buf), 0);
1808 if (sc < 0) {
1809 - perror("send failed()\n");
1810 + perror("send failed()");
1811 return sc;
1812 }
1813
1814 @@ -748,7 +757,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt)
1815 rc = recv(i, buf, sizeof(buf), 0);
1816 if (rc < 0) {
1817 if (errno != EWOULDBLOCK) {
1818 - perror("recv failed()\n");
1819 + perror("recv failed()");
1820 return rc;
1821 }
1822 }
1823 @@ -760,7 +769,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt)
1824
1825 sc = send(i, buf, rc, 0);
1826 if (sc < 0) {
1827 - perror("send failed()\n");
1828 + perror("send failed()");
1829 return sc;
1830 }
1831 }
1832 diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
1833 index d60a343b1371..842d9155d36c 100644
1834 --- a/tools/testing/selftests/bpf/xdping.c
1835 +++ b/tools/testing/selftests/bpf/xdping.c
1836 @@ -45,7 +45,7 @@ static int get_stats(int fd, __u16 count, __u32 raddr)
1837 printf("\nXDP RTT data:\n");
1838
1839 if (bpf_map_lookup_elem(fd, &raddr, &pinginfo)) {
1840 - perror("bpf_map_lookup elem: ");
1841 + perror("bpf_map_lookup elem");
1842 return 1;
1843 }
1844
1845 diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
1846 index ab367e75f095..d697815d2785 100755
1847 --- a/tools/testing/selftests/net/pmtu.sh
1848 +++ b/tools/testing/selftests/net/pmtu.sh
1849 @@ -1249,8 +1249,7 @@ test_list_flush_ipv4_exception() {
1850 done
1851 run_cmd ${ns_a} ping -q -M want -i 0.1 -c 2 -s 1800 "${dst2}"
1852
1853 - # Each exception is printed as two lines
1854 - if [ "$(${ns_a} ip route list cache | wc -l)" -ne 202 ]; then
1855 + if [ "$(${ns_a} ip -oneline route list cache | wc -l)" -ne 101 ]; then
1856 err " can't list cached exceptions"
1857 fail=1
1858 fi
1859 @@ -1300,7 +1299,7 @@ test_list_flush_ipv6_exception() {
1860 run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst_prefix1}${i}"
1861 done
1862 run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst2}"
1863 - if [ "$(${ns_a} ip -6 route list cache | wc -l)" -ne 101 ]; then
1864 + if [ "$(${ns_a} ip -oneline -6 route list cache | wc -l)" -ne 101 ]; then
1865 err " can't list cached exceptions"
1866 fail=1
1867 fi
1868 diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
1869 index 1c8f194d6556..46abcae47dee 100644
1870 --- a/tools/testing/selftests/net/tls.c
1871 +++ b/tools/testing/selftests/net/tls.c
1872 @@ -268,6 +268,38 @@ TEST_F(tls, sendmsg_single)
1873 EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
1874 }
1875
1876 +#define MAX_FRAGS 64
1877 +#define SEND_LEN 13
1878 +TEST_F(tls, sendmsg_fragmented)
1879 +{
1880 + char const *test_str = "test_sendmsg";
1881 + char buf[SEND_LEN * MAX_FRAGS];
1882 + struct iovec vec[MAX_FRAGS];
1883 + struct msghdr msg;
1884 + int i, frags;
1885 +
1886 + for (frags = 1; frags <= MAX_FRAGS; frags++) {
1887 + for (i = 0; i < frags; i++) {
1888 + vec[i].iov_base = (char *)test_str;
1889 + vec[i].iov_len = SEND_LEN;
1890 + }
1891 +
1892 + memset(&msg, 0, sizeof(struct msghdr));
1893 + msg.msg_iov = vec;
1894 + msg.msg_iovlen = frags;
1895 +
1896 + EXPECT_EQ(sendmsg(self->fd, &msg, 0), SEND_LEN * frags);
1897 + EXPECT_EQ(recv(self->cfd, buf, SEND_LEN * frags, MSG_WAITALL),
1898 + SEND_LEN * frags);
1899 +
1900 + for (i = 0; i < frags; i++)
1901 + EXPECT_EQ(memcmp(buf + SEND_LEN * i,
1902 + test_str, SEND_LEN), 0);
1903 + }
1904 +}
1905 +#undef MAX_FRAGS
1906 +#undef SEND_LEN
1907 +
1908 TEST_F(tls, sendmsg_large)
1909 {
1910 void *mem = malloc(16384);
1911 @@ -694,6 +726,34 @@ TEST_F(tls, recv_lowat)
1912 EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
1913 }
1914
1915 +TEST_F(tls, recv_rcvbuf)
1916 +{
1917 + char send_mem[4096];
1918 + char recv_mem[4096];
1919 + int rcv_buf = 1024;
1920 +
1921 + memset(send_mem, 0x1c, sizeof(send_mem));
1922 +
1923 + EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVBUF,
1924 + &rcv_buf, sizeof(rcv_buf)), 0);
1925 +
1926 + EXPECT_EQ(send(self->fd, send_mem, 512, 0), 512);
1927 + memset(recv_mem, 0, sizeof(recv_mem));
1928 + EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), 512);
1929 + EXPECT_EQ(memcmp(send_mem, recv_mem, 512), 0);
1930 +
1931 + if (self->notls)
1932 + return;
1933 +
1934 + EXPECT_EQ(send(self->fd, send_mem, 4096, 0), 4096);
1935 + memset(recv_mem, 0, sizeof(recv_mem));
1936 + EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
1937 + EXPECT_EQ(errno, EMSGSIZE);
1938 +
1939 + EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
1940 + EXPECT_EQ(errno, EMSGSIZE);
1941 +}
1942 +
1943 TEST_F(tls, bidir)
1944 {
1945 char const *test_str = "test_read";