Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0181-5.4.82-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 3 weeks ago) by niro
File size: 41406 byte(s)
-add missing
1 diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
2 index 27e1b4cebfbd4..9cb3560756d00 100644
3 --- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
4 +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
5 @@ -33,7 +33,7 @@ tcan4x5x: tcan4x5x@0 {
6 spi-max-frequency = <10000000>;
7 bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
8 interrupt-parent = <&gpio1>;
9 - interrupts = <14 GPIO_ACTIVE_LOW>;
10 + interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
11 device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
12 device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
13 reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
14 diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
15 index cfaf889989187..9e4dc510a40aa 100644
16 --- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
17 +++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
18 @@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
19 clock-frequency = <100000>;
20
21 interrupt-parent = <&gpio1>;
22 - interrupts = <29 GPIO_ACTIVE_HIGH>;
23 + interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
24
25 enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
26 firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
27 diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt
28 index 92f399ec22b87..2bd82562ce8e9 100644
29 --- a/Documentation/devicetree/bindings/net/nfc/pn544.txt
30 +++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt
31 @@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2):
32 clock-frequency = <400000>;
33
34 interrupt-parent = <&gpio1>;
35 - interrupts = <17 GPIO_ACTIVE_HIGH>;
36 + interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
37
38 enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
39 firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
40 diff --git a/Makefile b/Makefile
41 index 5bbb7607fa55f..e520dee34490a 100644
42 --- a/Makefile
43 +++ b/Makefile
44 @@ -1,7 +1,7 @@
45 # SPDX-License-Identifier: GPL-2.0
46 VERSION = 5
47 PATCHLEVEL = 4
48 -SUBLEVEL = 81
49 +SUBLEVEL = 82
50 EXTRAVERSION =
51 NAME = Kleptomaniac Octopus
52
53 diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
54 index 385bd4dc66867..f81a5e35d8fd1 100644
55 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c
56 +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
57 @@ -1077,6 +1077,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
58 sk_setup_caps(newsk, dst);
59 ctx = tls_get_ctx(lsk);
60 newsk->sk_destruct = ctx->sk_destruct;
61 + newsk->sk_prot_creator = lsk->sk_prot_creator;
62 csk->sk = newsk;
63 csk->passive_reap_next = oreq;
64 csk->tx_chan = cxgb4_port_chan(ndev);
65 diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
66 index 3ef723e089537..753f4ba38f83c 100644
67 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c
68 +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
69 @@ -365,6 +365,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
70 csk->wr_unacked += DIV_ROUND_UP(len, 16);
71 enqueue_wr(csk, skb);
72 cxgb4_ofld_send(csk->egress_dev, skb);
73 + skb = NULL;
74
75 chtls_set_scmd(csk);
76 /* Clear quiesce for Rx key */
77 diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
78 index 238614370927a..f1b0290da92d0 100644
79 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c
80 +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
81 @@ -54,10 +54,6 @@
82 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
83 __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
84
85 -static int push_mode;
86 -module_param(push_mode, int, 0644);
87 -MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
88 -
89 static int debug;
90 module_param(debug, int, 0644);
91 MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
92 @@ -1588,7 +1584,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
93 if (status)
94 goto exit;
95 iwdev->obj_next = iwdev->obj_mem;
96 - iwdev->push_mode = push_mode;
97
98 init_waitqueue_head(&iwdev->vchnl_waitq);
99 init_waitqueue_head(&dev->vf_reqs);
100 diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
101 index 22bf4f09c0647..7e9c1a40f0409 100644
102 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
103 +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
104 @@ -168,38 +168,16 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
105 */
106 static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
107 {
108 - struct i40iw_ucontext *ucontext;
109 - u64 db_addr_offset;
110 - u64 push_offset;
111 -
112 - ucontext = to_ucontext(context);
113 - if (ucontext->iwdev->sc_dev.is_pf) {
114 - db_addr_offset = I40IW_DB_ADDR_OFFSET;
115 - push_offset = I40IW_PUSH_OFFSET;
116 - if (vma->vm_pgoff)
117 - vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
118 - } else {
119 - db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
120 - push_offset = I40IW_VF_PUSH_OFFSET;
121 - if (vma->vm_pgoff)
122 - vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
123 - }
124 + struct i40iw_ucontext *ucontext = to_ucontext(context);
125 + u64 dbaddr;
126
127 - vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
128 + if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
129 + return -EINVAL;
130
131 - if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
132 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
133 - vma->vm_private_data = ucontext;
134 - } else {
135 - if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
136 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
137 - else
138 - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
139 - }
140 + dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
141
142 - if (io_remap_pfn_range(vma, vma->vm_start,
143 - vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
144 - PAGE_SIZE, vma->vm_page_prot))
145 + if (io_remap_pfn_range(vma, vma->vm_start, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
146 + pgprot_noncached(vma->vm_page_prot)))
147 return -EAGAIN;
148
149 return 0;
150 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
151 index c77cdb3b62b5b..8c73377ac82ca 100644
152 --- a/drivers/input/joystick/xpad.c
153 +++ b/drivers/input/joystick/xpad.c
154 @@ -241,6 +241,7 @@ static const struct xpad_device {
155 { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
156 { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
157 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
158 + { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
159 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
160 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
161 { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
162 @@ -418,6 +419,7 @@ static const struct usb_device_id xpad_table[] = {
163 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
164 XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
165 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
166 + XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
167 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
168 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
169 XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
170 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
171 index 98f0c7729b754..837911a15e443 100644
172 --- a/drivers/input/serio/i8042-x86ia64io.h
173 +++ b/drivers/input/serio/i8042-x86ia64io.h
174 @@ -219,6 +219,10 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
175 DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
176 DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
177 },
178 + .matches = {
179 + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
180 + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
181 + },
182 },
183 { }
184 };
185 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
186 index 6862c2ef24424..2bc4cb9e30954 100644
187 --- a/drivers/net/bonding/bond_main.c
188 +++ b/drivers/net/bonding/bond_main.c
189 @@ -1293,7 +1293,39 @@ static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
190 slave->dev->flags &= ~IFF_SLAVE;
191 }
192
193 -static struct slave *bond_alloc_slave(struct bonding *bond)
194 +static void slave_kobj_release(struct kobject *kobj)
195 +{
196 + struct slave *slave = to_slave(kobj);
197 + struct bonding *bond = bond_get_bond_by_slave(slave);
198 +
199 + cancel_delayed_work_sync(&slave->notify_work);
200 + if (BOND_MODE(bond) == BOND_MODE_8023AD)
201 + kfree(SLAVE_AD_INFO(slave));
202 +
203 + kfree(slave);
204 +}
205 +
206 +static struct kobj_type slave_ktype = {
207 + .release = slave_kobj_release,
208 +#ifdef CONFIG_SYSFS
209 + .sysfs_ops = &slave_sysfs_ops,
210 +#endif
211 +};
212 +
213 +static int bond_kobj_init(struct slave *slave)
214 +{
215 + int err;
216 +
217 + err = kobject_init_and_add(&slave->kobj, &slave_ktype,
218 + &(slave->dev->dev.kobj), "bonding_slave");
219 + if (err)
220 + kobject_put(&slave->kobj);
221 +
222 + return err;
223 +}
224 +
225 +static struct slave *bond_alloc_slave(struct bonding *bond,
226 + struct net_device *slave_dev)
227 {
228 struct slave *slave = NULL;
229
230 @@ -1301,11 +1333,17 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
231 if (!slave)
232 return NULL;
233
234 + slave->bond = bond;
235 + slave->dev = slave_dev;
236 +
237 + if (bond_kobj_init(slave))
238 + return NULL;
239 +
240 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
241 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
242 GFP_KERNEL);
243 if (!SLAVE_AD_INFO(slave)) {
244 - kfree(slave);
245 + kobject_put(&slave->kobj);
246 return NULL;
247 }
248 }
249 @@ -1314,17 +1352,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
250 return slave;
251 }
252
253 -static void bond_free_slave(struct slave *slave)
254 -{
255 - struct bonding *bond = bond_get_bond_by_slave(slave);
256 -
257 - cancel_delayed_work_sync(&slave->notify_work);
258 - if (BOND_MODE(bond) == BOND_MODE_8023AD)
259 - kfree(SLAVE_AD_INFO(slave));
260 -
261 - kfree(slave);
262 -}
263 -
264 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
265 {
266 info->bond_mode = BOND_MODE(bond);
267 @@ -1508,14 +1535,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
268 goto err_undo_flags;
269 }
270
271 - new_slave = bond_alloc_slave(bond);
272 + new_slave = bond_alloc_slave(bond, slave_dev);
273 if (!new_slave) {
274 res = -ENOMEM;
275 goto err_undo_flags;
276 }
277
278 - new_slave->bond = bond;
279 - new_slave->dev = slave_dev;
280 /* Set the new_slave's queue_id to be zero. Queue ID mapping
281 * is set via sysfs or module option if desired.
282 */
283 @@ -1837,7 +1862,7 @@ err_restore_mtu:
284 dev_set_mtu(slave_dev, new_slave->original_mtu);
285
286 err_free:
287 - bond_free_slave(new_slave);
288 + kobject_put(&new_slave->kobj);
289
290 err_undo_flags:
291 /* Enslave of first slave has failed and we need to fix master's mac */
292 @@ -2017,7 +2042,7 @@ static int __bond_release_one(struct net_device *bond_dev,
293 if (!netif_is_bond_master(slave_dev))
294 slave_dev->priv_flags &= ~IFF_BONDING;
295
296 - bond_free_slave(slave);
297 + kobject_put(&slave->kobj);
298
299 return 0;
300 }
301 diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
302 index 9b8346638f697..fd07561da0348 100644
303 --- a/drivers/net/bonding/bond_sysfs_slave.c
304 +++ b/drivers/net/bonding/bond_sysfs_slave.c
305 @@ -121,7 +121,6 @@ static const struct slave_attribute *slave_attrs[] = {
306 };
307
308 #define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr)
309 -#define to_slave(obj) container_of(obj, struct slave, kobj)
310
311 static ssize_t slave_show(struct kobject *kobj,
312 struct attribute *attr, char *buf)
313 @@ -132,28 +131,15 @@ static ssize_t slave_show(struct kobject *kobj,
314 return slave_attr->show(slave, buf);
315 }
316
317 -static const struct sysfs_ops slave_sysfs_ops = {
318 +const struct sysfs_ops slave_sysfs_ops = {
319 .show = slave_show,
320 };
321
322 -static struct kobj_type slave_ktype = {
323 -#ifdef CONFIG_SYSFS
324 - .sysfs_ops = &slave_sysfs_ops,
325 -#endif
326 -};
327 -
328 int bond_sysfs_slave_add(struct slave *slave)
329 {
330 const struct slave_attribute **a;
331 int err;
332
333 - err = kobject_init_and_add(&slave->kobj, &slave_ktype,
334 - &(slave->dev->dev.kobj), "bonding_slave");
335 - if (err) {
336 - kobject_put(&slave->kobj);
337 - return err;
338 - }
339 -
340 for (a = slave_attrs; *a; ++a) {
341 err = sysfs_create_file(&slave->kobj, &((*a)->attr));
342 if (err) {
343 @@ -171,6 +157,4 @@ void bond_sysfs_slave_del(struct slave *slave)
344
345 for (a = slave_attrs; *a; ++a)
346 sysfs_remove_file(&slave->kobj, &((*a)->attr));
347 -
348 - kobject_put(&slave->kobj);
349 }
350 diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
351 index 6dabbf1502c71..c0e96bf5dd1a0 100644
352 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
353 +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
354 @@ -3176,6 +3176,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
355 GFP_KERNEL | __GFP_COMP);
356 if (!avail) {
357 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
358 + ret = -ENOMEM;
359 goto err;
360 }
361 if (avail < q->fl[0].size)
362 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
363 index e53994ca3142c..7056419461e7b 100644
364 --- a/drivers/net/ethernet/ibm/ibmvnic.c
365 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
366 @@ -2307,6 +2307,12 @@ restart_poll:
367
368 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
369 break;
370 + /* The queue entry at the current index is peeked at above
371 + * to determine that there is a valid descriptor awaiting
372 + * processing. We want to be sure that the current slot
373 + * holds a valid descriptor before reading its contents.
374 + */
375 + dma_rmb();
376 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
377 rx_buff =
378 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
379 @@ -2988,13 +2994,18 @@ restart_loop:
380 unsigned int pool = scrq->pool_index;
381 int num_entries = 0;
382
383 + /* The queue entry at the current index is peeked at above
384 + * to determine that there is a valid descriptor awaiting
385 + * processing. We want to be sure that the current slot
386 + * holds a valid descriptor before reading its contents.
387 + */
388 + dma_rmb();
389 +
390 next = ibmvnic_next_scrq(adapter, scrq);
391 for (i = 0; i < next->tx_comp.num_comps; i++) {
392 - if (next->tx_comp.rcs[i]) {
393 + if (next->tx_comp.rcs[i])
394 dev_err(dev, "tx error %x\n",
395 next->tx_comp.rcs[i]);
396 - continue;
397 - }
398 index = be32_to_cpu(next->tx_comp.correlators[i]);
399 if (index & IBMVNIC_TSO_POOL_MASK) {
400 tx_pool = &adapter->tso_pool[pool];
401 @@ -3388,6 +3399,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
402 }
403 spin_unlock_irqrestore(&scrq->lock, flags);
404
405 + /* Ensure that the entire buffer descriptor has been
406 + * loaded before reading its contents
407 + */
408 + dma_rmb();
409 +
410 return entry;
411 }
412
413 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
414 index 0f136f1af5d14..63c0334430134 100644
415 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
416 +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
417 @@ -3696,6 +3696,7 @@ static int mvpp2_open(struct net_device *dev)
418 if (!valid) {
419 netdev_err(port->dev,
420 "invalid configuration: no dt or link IRQ");
421 + err = -ENOENT;
422 goto err_free_irq;
423 }
424
425 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
426 index 91bd258ecf1b5..db76c92b75e29 100644
427 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
428 +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
429 @@ -339,6 +339,24 @@ out_free:
430 return err;
431 }
432
433 +static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
434 + u32 npages)
435 +{
436 + u32 pages_set = 0;
437 + unsigned int n;
438 +
439 + for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
440 + MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
441 + fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
442 + pages_set++;
443 +
444 + if (!--npages)
445 + break;
446 + }
447 +
448 + return pages_set;
449 +}
450 +
451 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
452 u32 *in, int in_size, u32 *out, int out_size)
453 {
454 @@ -362,8 +380,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
455 if (fwp->func_id != func_id)
456 continue;
457
458 - MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
459 - i++;
460 + i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
461 }
462
463 MLX5_SET(manage_pages_out, out, output_num_entries, i);
464 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
465 index 41662c4e26642..64f6f529f6eb1 100644
466 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
467 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
468 @@ -92,6 +92,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
469 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
470 caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
471 caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
472 + caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
473
474 if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
475 caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
476 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
477 index 5b24732b18c0d..56bf900eb753f 100644
478 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
479 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
480 @@ -223,6 +223,11 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
481 if (ret)
482 return ret;
483
484 + if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
485 + mlx5dr_err(dmn, "SW steering is not supported on this device\n");
486 + return -EOPNOTSUPP;
487 + }
488 +
489 ret = dr_domain_query_fdb_caps(mdev, dmn);
490 if (ret)
491 return ret;
492 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
493 index 31737dfca4ea2..c360d08af67da 100644
494 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
495 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
496 @@ -613,6 +613,7 @@ struct mlx5dr_cmd_caps {
497 u8 max_ft_level;
498 u16 roce_min_src_udp;
499 u8 num_esw_ports;
500 + u8 sw_format_ver;
501 bool eswitch_manager;
502 bool rx_sw_owner;
503 bool tx_sw_owner;
504 diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
505 index be6660128b556..040a15a828b41 100644
506 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c
507 +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
508 @@ -1078,16 +1078,20 @@ static int pasemi_mac_open(struct net_device *dev)
509
510 mac->tx = pasemi_mac_setup_tx_resources(dev);
511
512 - if (!mac->tx)
513 + if (!mac->tx) {
514 + ret = -ENOMEM;
515 goto out_tx_ring;
516 + }
517
518 /* We might already have allocated rings in case mtu was changed
519 * before interface was brought up.
520 */
521 if (dev->mtu > 1500 && !mac->num_cs) {
522 pasemi_mac_setup_csrings(mac);
523 - if (!mac->num_cs)
524 + if (!mac->num_cs) {
525 + ret = -ENOMEM;
526 goto out_tx_ring;
527 + }
528 }
529
530 /* Zero out rmon counters */
531 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
532 index c7ec3d24eabc8..496ae07aca5e5 100644
533 --- a/drivers/net/geneve.c
534 +++ b/drivers/net/geneve.c
535 @@ -254,11 +254,21 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
536 skb_dst_set(skb, &tun_dst->dst);
537
538 /* Ignore packet loops (and multicast echo) */
539 - if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
540 - geneve->dev->stats.rx_errors++;
541 - goto drop;
542 - }
543 + if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
544 + goto rx_error;
545
546 + switch (skb_protocol(skb, true)) {
547 + case htons(ETH_P_IP):
548 + if (pskb_may_pull(skb, sizeof(struct iphdr)))
549 + goto rx_error;
550 + break;
551 + case htons(ETH_P_IPV6):
552 + if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
553 + goto rx_error;
554 + break;
555 + default:
556 + goto rx_error;
557 + }
558 oiph = skb_network_header(skb);
559 skb_reset_network_header(skb);
560
561 @@ -299,6 +309,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
562 u64_stats_update_end(&stats->syncp);
563 }
564 return;
565 +rx_error:
566 + geneve->dev->stats.rx_errors++;
567 drop:
568 /* Consume bad packet */
569 kfree_skb(skb);
570 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
571 index 46bdd0df2eb8b..e72d273999834 100644
572 --- a/drivers/net/tun.c
573 +++ b/drivers/net/tun.c
574 @@ -2028,12 +2028,15 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
575 struct tun_file *tfile = file->private_data;
576 struct tun_struct *tun = tun_get(tfile);
577 ssize_t result;
578 + int noblock = 0;
579
580 if (!tun)
581 return -EBADFD;
582
583 - result = tun_get_user(tun, tfile, NULL, from,
584 - file->f_flags & O_NONBLOCK, false);
585 + if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
586 + noblock = 1;
587 +
588 + result = tun_get_user(tun, tfile, NULL, from, noblock, false);
589
590 tun_put(tun);
591 return result;
592 @@ -2254,10 +2257,15 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
593 struct tun_file *tfile = file->private_data;
594 struct tun_struct *tun = tun_get(tfile);
595 ssize_t len = iov_iter_count(to), ret;
596 + int noblock = 0;
597
598 if (!tun)
599 return -EBADFD;
600 - ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
601 +
602 + if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
603 + noblock = 1;
604 +
605 + ret = tun_do_read(tun, tfile, to, noblock, NULL);
606 ret = min_t(ssize_t, ret, len);
607 if (ret > 0)
608 iocb->ki_pos = ret;
609 diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
610 index 8c01fbf68a895..345576f1a7470 100644
611 --- a/drivers/net/usb/ipheth.c
612 +++ b/drivers/net/usb/ipheth.c
613 @@ -59,7 +59,7 @@
614 #define IPHETH_USBINTF_SUBCLASS 253
615 #define IPHETH_USBINTF_PROTO 1
616
617 -#define IPHETH_BUF_SIZE 1516
618 +#define IPHETH_BUF_SIZE 1514
619 #define IPHETH_IP_ALIGN 2 /* padding at front of URB */
620 #define IPHETH_TX_TIMEOUT (5 * HZ)
621
622 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
623 index f9edc76580d91..630ac00a34ede 100644
624 --- a/drivers/net/vxlan.c
625 +++ b/drivers/net/vxlan.c
626 @@ -3617,8 +3617,10 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
627
628 if (dst->remote_ifindex) {
629 remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
630 - if (!remote_dev)
631 + if (!remote_dev) {
632 + err = -ENODEV;
633 goto errout;
634 + }
635
636 err = netdev_upper_dev_link(remote_dev, dev, extack);
637 if (err)
638 diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
639 index 83469061a5426..fe6e1ae73460a 100644
640 --- a/drivers/staging/octeon/ethernet-tx.c
641 +++ b/drivers/staging/octeon/ethernet-tx.c
642 @@ -352,10 +352,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
643 skb_dst_set(skb, NULL);
644 skb_ext_reset(skb);
645 nf_reset_ct(skb);
646 + skb_reset_redirect(skb);
647
648 #ifdef CONFIG_NET_SCHED
649 skb->tc_index = 0;
650 - skb_reset_tc(skb);
651 #endif /* CONFIG_NET_SCHED */
652 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
653
654 diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
655 index aba56077cfda2..75e5a7fe341fd 100644
656 --- a/include/linux/mlx5/mlx5_ifc.h
657 +++ b/include/linux/mlx5/mlx5_ifc.h
658 @@ -1139,6 +1139,11 @@ enum mlx5_fc_bulk_alloc_bitmask {
659
660 #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
661
662 +enum {
663 + MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
664 + MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
665 +};
666 +
667 struct mlx5_ifc_cmd_hca_cap_bits {
668 u8 reserved_at_0[0x30];
669 u8 vhca_id[0x10];
670 @@ -1419,7 +1424,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
671
672 u8 general_obj_types[0x40];
673
674 - u8 reserved_at_440[0x20];
675 + u8 reserved_at_440[0x4];
676 + u8 steering_format_version[0x4];
677 + u8 create_qp_start_hint[0x18];
678
679 u8 reserved_at_460[0x3];
680 u8 log_max_uctx[0x5];
681 diff --git a/include/net/bonding.h b/include/net/bonding.h
682 index 3d56b026bb9e7..1bee8fdff7db0 100644
683 --- a/include/net/bonding.h
684 +++ b/include/net/bonding.h
685 @@ -180,6 +180,11 @@ struct slave {
686 struct rtnl_link_stats64 slave_stats;
687 };
688
689 +static inline struct slave *to_slave(struct kobject *kobj)
690 +{
691 + return container_of(kobj, struct slave, kobj);
692 +}
693 +
694 struct bond_up_slave {
695 unsigned int count;
696 struct rcu_head rcu;
697 @@ -743,6 +748,9 @@ extern struct bond_parm_tbl ad_select_tbl[];
698 /* exported from bond_netlink.c */
699 extern struct rtnl_link_ops bond_link_ops;
700
701 +/* exported from bond_sysfs_slave.c */
702 +extern const struct sysfs_ops slave_sysfs_ops;
703 +
704 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
705 {
706 atomic_long_inc(&dev->tx_dropped);
707 diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
708 index e1eaf17802889..563457fec557e 100644
709 --- a/include/net/inet_ecn.h
710 +++ b/include/net/inet_ecn.h
711 @@ -107,7 +107,7 @@ static inline int IP_ECN_set_ect1(struct iphdr *iph)
712 if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
713 return 0;
714
715 - check += (__force u16)htons(0x100);
716 + check += (__force u16)htons(0x1);
717
718 iph->check = (__force __sum16)(check + (check>=0xFFFF));
719 iph->tos ^= INET_ECN_MASK;
720 diff --git a/include/net/tls.h b/include/net/tls.h
721 index 0a065bdffa395..697df45c0bcee 100644
722 --- a/include/net/tls.h
723 +++ b/include/net/tls.h
724 @@ -221,6 +221,12 @@ enum tls_context_flags {
725 * to be atomic.
726 */
727 TLS_TX_SYNC_SCHED = 1,
728 + /* tls_dev_del was called for the RX side, device state was released,
729 + * but tls_ctx->netdev might still be kept, because TX-side driver
730 + * resources might not be released yet. Used to prevent the second
731 + * tls_dev_del call in tls_device_down if it happens simultaneously.
732 + */
733 + TLS_RX_DEV_CLOSED = 2,
734 };
735
736 struct cipher_context {
737 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
738 index 200e121101097..3dd7c10d6a582 100644
739 --- a/kernel/sched/fair.c
740 +++ b/kernel/sched/fair.c
741 @@ -4580,7 +4580,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
742 struct rq *rq = rq_of(cfs_rq);
743 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
744 struct sched_entity *se;
745 - int enqueue = 1;
746 long task_delta, idle_task_delta;
747
748 se = cfs_rq->tg->se[cpu_of(rq)];
749 @@ -4604,21 +4603,41 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
750 idle_task_delta = cfs_rq->idle_h_nr_running;
751 for_each_sched_entity(se) {
752 if (se->on_rq)
753 - enqueue = 0;
754 + break;
755 + cfs_rq = cfs_rq_of(se);
756 + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
757 +
758 + cfs_rq->h_nr_running += task_delta;
759 + cfs_rq->idle_h_nr_running += idle_task_delta;
760
761 + /* end evaluation on encountering a throttled cfs_rq */
762 + if (cfs_rq_throttled(cfs_rq))
763 + goto unthrottle_throttle;
764 + }
765 +
766 + for_each_sched_entity(se) {
767 cfs_rq = cfs_rq_of(se);
768 - if (enqueue)
769 - enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
770 +
771 cfs_rq->h_nr_running += task_delta;
772 cfs_rq->idle_h_nr_running += idle_task_delta;
773
774 +
775 + /* end evaluation on encountering a throttled cfs_rq */
776 if (cfs_rq_throttled(cfs_rq))
777 - break;
778 + goto unthrottle_throttle;
779 +
780 + /*
781 + * One parent has been throttled and cfs_rq removed from the
782 + * list. Add it back to not break the leaf list.
783 + */
784 + if (throttled_hierarchy(cfs_rq))
785 + list_add_leaf_cfs_rq(cfs_rq);
786 }
787
788 - if (!se)
789 - add_nr_running(rq, task_delta);
790 + /* At this point se is NULL and we are at root level*/
791 + add_nr_running(rq, task_delta);
792
793 +unthrottle_throttle:
794 /*
795 * The cfs_rq_throttled() breaks in the above iteration can result in
796 * incomplete leaf list maintenance, resulting in triggering the
797 @@ -4627,7 +4646,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
798 for_each_sched_entity(se) {
799 cfs_rq = cfs_rq_of(se);
800
801 - list_add_leaf_cfs_rq(cfs_rq);
802 + if (list_add_leaf_cfs_rq(cfs_rq))
803 + break;
804 }
805
806 assert_list_leaf_cfs_rq(rq);
807 diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
808 index 35512ed26d9ff..164e5c618cce1 100644
809 --- a/kernel/trace/trace_hwlat.c
810 +++ b/kernel/trace/trace_hwlat.c
811 @@ -355,7 +355,7 @@ static int start_kthread(struct trace_array *tr)
812 struct task_struct *kthread;
813 int next_cpu;
814
815 - if (WARN_ON(hwlat_kthread))
816 + if (hwlat_kthread)
817 return 0;
818
819 /* Just pick the first CPU on first iteration */
820 diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
821 index 59980ecfc9623..2371b833b2bcd 100644
822 --- a/net/bridge/br_netfilter_hooks.c
823 +++ b/net/bridge/br_netfilter_hooks.c
824 @@ -735,6 +735,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
825 mtu_reserved = nf_bridge_mtu_reduction(skb);
826 mtu = skb->dev->mtu;
827
828 + if (nf_bridge->pkt_otherhost) {
829 + skb->pkt_type = PACKET_OTHERHOST;
830 + nf_bridge->pkt_otherhost = false;
831 + }
832 +
833 if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
834 mtu = nf_bridge->frag_max_size;
835
836 @@ -835,8 +840,6 @@ static unsigned int br_nf_post_routing(void *priv,
837 else
838 return NF_ACCEPT;
839
840 - /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
841 - * about the value of skb->pkt_type. */
842 if (skb->pkt_type == PACKET_OTHERHOST) {
843 skb->pkt_type = PACKET_HOST;
844 nf_bridge->pkt_otherhost = true;
845 diff --git a/net/core/devlink.c b/net/core/devlink.c
846 index 79f54ae714229..0ac02cab3087b 100644
847 --- a/net/core/devlink.c
848 +++ b/net/core/devlink.c
849 @@ -562,6 +562,8 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
850 if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
851 goto nla_put_failure;
852
853 + /* Hold rtnl lock while accessing port's netdev attributes. */
854 + rtnl_lock();
855 spin_lock_bh(&devlink_port->type_lock);
856 if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
857 goto nla_put_failure_type_locked;
858 @@ -588,6 +590,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
859 goto nla_put_failure_type_locked;
860 }
861 spin_unlock_bh(&devlink_port->type_lock);
862 + rtnl_unlock();
863 if (devlink_nl_port_attrs_put(msg, devlink_port))
864 goto nla_put_failure;
865
866 @@ -596,6 +599,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
867
868 nla_put_failure_type_locked:
869 spin_unlock_bh(&devlink_port->type_lock);
870 + rtnl_unlock();
871 nla_put_failure:
872 genlmsg_cancel(msg, hdr);
873 return -EMSGSIZE;
874 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
875 index 466d6273da9f2..a0486dcf5425b 100644
876 --- a/net/core/skbuff.c
877 +++ b/net/core/skbuff.c
878 @@ -4452,7 +4452,7 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
879 if (skb && (skb_next = skb_peek(q))) {
880 icmp_next = is_icmp_err_skb(skb_next);
881 if (icmp_next)
882 - sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
883 + sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
884 }
885 spin_unlock_irqrestore(&q->lock, flags);
886
887 @@ -5618,6 +5618,9 @@ int skb_mpls_dec_ttl(struct sk_buff *skb)
888 if (unlikely(!eth_p_mpls(skb->protocol)))
889 return -EINVAL;
890
891 + if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
892 + return -ENOMEM;
893 +
894 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
895 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
896 if (!--ttl)
897 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
898 index a293d4968d1eb..53c5cf5723aa1 100644
899 --- a/net/ipv4/route.c
900 +++ b/net/ipv4/route.c
901 @@ -3132,7 +3132,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
902
903 fl4.daddr = dst;
904 fl4.saddr = src;
905 - fl4.flowi4_tos = rtm->rtm_tos;
906 + fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
907 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
908 fl4.flowi4_mark = mark;
909 fl4.flowi4_uid = uid;
910 @@ -3156,8 +3156,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
911 fl4.flowi4_iif = iif; /* for rt_fill_info */
912 skb->dev = dev;
913 skb->mark = mark;
914 - err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
915 - dev, &res);
916 + err = ip_route_input_rcu(skb, dst, src,
917 + rtm->rtm_tos & IPTOS_RT_MASK, dev,
918 + &res);
919
920 rt = skb_rtable(skb);
921 if (err == 0 && rt->dst.error)
922 diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
923 index d7a1f2ef6c52f..62292eef151c4 100644
924 --- a/net/ipv4/tcp_cong.c
925 +++ b/net/ipv4/tcp_cong.c
926 @@ -197,6 +197,11 @@ static void tcp_reinit_congestion_control(struct sock *sk,
927 icsk->icsk_ca_setsockopt = 1;
928 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
929
930 + if (ca->flags & TCP_CONG_NEEDS_ECN)
931 + INET_ECN_xmit(sk);
932 + else
933 + INET_ECN_dontxmit(sk);
934 +
935 if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
936 tcp_init_congestion_control(sk);
937 }
938 diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
939 index 642fc6ac13d22..8a22486cf2702 100644
940 --- a/net/ipv6/addrlabel.c
941 +++ b/net/ipv6/addrlabel.c
942 @@ -306,7 +306,9 @@ static int ip6addrlbl_del(struct net *net,
943 /* add default label */
944 static int __net_init ip6addrlbl_net_init(struct net *net)
945 {
946 - int err = 0;
947 + struct ip6addrlbl_entry *p = NULL;
948 + struct hlist_node *n;
949 + int err;
950 int i;
951
952 ADDRLABEL(KERN_DEBUG "%s\n", __func__);
953 @@ -315,14 +317,20 @@ static int __net_init ip6addrlbl_net_init(struct net *net)
954 INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head);
955
956 for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) {
957 - int ret = ip6addrlbl_add(net,
958 - ip6addrlbl_init_table[i].prefix,
959 - ip6addrlbl_init_table[i].prefixlen,
960 - 0,
961 - ip6addrlbl_init_table[i].label, 0);
962 - /* XXX: should we free all rules when we catch an error? */
963 - if (ret && (!err || err != -ENOMEM))
964 - err = ret;
965 + err = ip6addrlbl_add(net,
966 + ip6addrlbl_init_table[i].prefix,
967 + ip6addrlbl_init_table[i].prefixlen,
968 + 0,
969 + ip6addrlbl_init_table[i].label, 0);
970 + if (err)
971 + goto err_ip6addrlbl_add;
972 + }
973 + return 0;
974 +
975 +err_ip6addrlbl_add:
976 + hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) {
977 + hlist_del_rcu(&p->list);
978 + kfree_rcu(p, rcu);
979 }
980 return err;
981 }
982 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
983 index 44876509d2155..e4a43a8941c86 100644
984 --- a/net/ipv6/ip6_gre.c
985 +++ b/net/ipv6/ip6_gre.c
986 @@ -1120,8 +1120,13 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
987 return;
988
989 if (rt->dst.dev) {
990 - dev->needed_headroom = rt->dst.dev->hard_header_len +
991 - t_hlen;
992 + unsigned short dst_len = rt->dst.dev->hard_header_len +
993 + t_hlen;
994 +
995 + if (t->dev->header_ops)
996 + dev->hard_header_len = dst_len;
997 + else
998 + dev->needed_headroom = dst_len;
999
1000 if (set_mtu) {
1001 dev->mtu = rt->dst.dev->mtu - t_hlen;
1002 @@ -1146,7 +1151,12 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1003 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1004
1005 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1006 - tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1007 +
1008 + if (tunnel->dev->header_ops)
1009 + tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1010 + else
1011 + tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1012 +
1013 return t_hlen;
1014 }
1015
1016 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
1017 index be8fd79202b87..fdced0a7bd776 100644
1018 --- a/net/iucv/af_iucv.c
1019 +++ b/net/iucv/af_iucv.c
1020 @@ -1785,7 +1785,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
1021 }
1022
1023 /* Create the new socket */
1024 - nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1025 + nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1026 if (!nsk) {
1027 err = pr_iucv->path_sever(path, user_data);
1028 iucv_path_free(path);
1029 @@ -1991,7 +1991,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1030 goto out;
1031 }
1032
1033 - nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1034 + nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1035 bh_lock_sock(sk);
1036 if ((sk->sk_state != IUCV_LISTEN) ||
1037 sk_acceptq_is_full(sk) ||
1038 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
1039 index 3d96dab104490..425e146523cc9 100644
1040 --- a/net/openvswitch/actions.c
1041 +++ b/net/openvswitch/actions.c
1042 @@ -196,6 +196,9 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
1043 __be32 lse;
1044 int err;
1045
1046 + if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
1047 + return -ENOMEM;
1048 +
1049 stack = mpls_hdr(skb);
1050 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
1051 err = skb_mpls_update_lse(skb, lse);
1052 diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
1053 index 7b094275ea8b4..11c45c8c6c164 100644
1054 --- a/net/rose/rose_loopback.c
1055 +++ b/net/rose/rose_loopback.c
1056 @@ -96,10 +96,19 @@ static void rose_loopback_timer(struct timer_list *unused)
1057 }
1058
1059 if (frametype == ROSE_CALL_REQUEST) {
1060 - if ((dev = rose_dev_get(dest)) != NULL) {
1061 - if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
1062 - kfree_skb(skb);
1063 - } else {
1064 + if (!rose_loopback_neigh->dev) {
1065 + kfree_skb(skb);
1066 + continue;
1067 + }
1068 +
1069 + dev = rose_dev_get(dest);
1070 + if (!dev) {
1071 + kfree_skb(skb);
1072 + continue;
1073 + }
1074 +
1075 + if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) {
1076 + dev_put(dev);
1077 kfree_skb(skb);
1078 }
1079 } else {
1080 diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
1081 index 7954021ade33d..0fccae356dc14 100644
1082 --- a/net/sched/act_mpls.c
1083 +++ b/net/sched/act_mpls.c
1084 @@ -88,6 +88,9 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
1085 goto drop;
1086 break;
1087 case TCA_MPLS_ACT_MODIFY:
1088 + if (!pskb_may_pull(skb,
1089 + skb_network_offset(skb) + MPLS_HLEN))
1090 + goto drop;
1091 new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
1092 if (skb_mpls_update_lse(skb, new_lse))
1093 goto drop;
1094 diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
1095 index 933a3187d3bf2..0f034c3bc37d7 100644
1096 --- a/net/tls/tls_device.c
1097 +++ b/net/tls/tls_device.c
1098 @@ -1163,6 +1163,8 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
1099 if (tls_ctx->tx_conf != TLS_HW) {
1100 dev_put(netdev);
1101 tls_ctx->netdev = NULL;
1102 + } else {
1103 + set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
1104 }
1105 out:
1106 up_read(&device_offload_lock);
1107 @@ -1192,7 +1194,8 @@ static int tls_device_down(struct net_device *netdev)
1108 if (ctx->tx_conf == TLS_HW)
1109 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1110 TLS_OFFLOAD_CTX_DIR_TX);
1111 - if (ctx->rx_conf == TLS_HW)
1112 + if (ctx->rx_conf == TLS_HW &&
1113 + !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
1114 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1115 TLS_OFFLOAD_CTX_DIR_RX);
1116 WRITE_ONCE(ctx->netdev, NULL);
1117 diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
1118 index 57032b7ad0239..0d524ef0d8c80 100644
1119 --- a/net/tls/tls_sw.c
1120 +++ b/net/tls/tls_sw.c
1121 @@ -1291,6 +1291,12 @@ static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
1122 return NULL;
1123 }
1124
1125 + if (!skb_queue_empty(&sk->sk_receive_queue)) {
1126 + __strp_unpause(&ctx->strp);
1127 + if (ctx->recv_pkt)
1128 + return ctx->recv_pkt;
1129 + }
1130 +
1131 if (sk->sk_shutdown & RCV_SHUTDOWN)
1132 return NULL;
1133
1134 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
1135 index 54351e5ba0470..cb1f5016c433a 100644
1136 --- a/net/x25/af_x25.c
1137 +++ b/net/x25/af_x25.c
1138 @@ -675,7 +675,8 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1139 int len, i, rc = 0;
1140
1141 if (addr_len != sizeof(struct sockaddr_x25) ||
1142 - addr->sx25_family != AF_X25) {
1143 + addr->sx25_family != AF_X25 ||
1144 + strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) {
1145 rc = -EINVAL;
1146 goto out;
1147 }
1148 @@ -769,7 +770,8 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
1149
1150 rc = -EINVAL;
1151 if (addr_len != sizeof(struct sockaddr_x25) ||
1152 - addr->sx25_family != AF_X25)
1153 + addr->sx25_family != AF_X25 ||
1154 + strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN)
1155 goto out;
1156
1157 rc = -ENETUNREACH;
1158 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
1159 index 8173982e00ab5..5fae6cfe8d910 100644
1160 --- a/security/integrity/ima/ima.h
1161 +++ b/security/integrity/ima/ima.h
1162 @@ -30,7 +30,7 @@
1163
1164 enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
1165 IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
1166 -enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
1167 +enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
1168
1169 /* digest size for IMA, fits SHA1 or MD5 */
1170 #define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
1171 diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
1172 index d86825261b515..b06baf5d3cd32 100644
1173 --- a/security/integrity/ima/ima_crypto.c
1174 +++ b/security/integrity/ima/ima_crypto.c
1175 @@ -682,7 +682,7 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
1176 if (rc != 0)
1177 return rc;
1178
1179 - /* cumulative sha1 over tpm registers 0-7 */
1180 + /* cumulative digest over TPM registers 0-7 */
1181 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
1182 ima_pcrread(i, &d);
1183 /* now accumulate with current aggregate */
1184 @@ -691,6 +691,19 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
1185 if (rc != 0)
1186 return rc;
1187 }
1188 + /*
1189 + * Extend cumulative digest over TPM registers 8-9, which contain
1190 + * measurement for the kernel command line (reg. 8) and image (reg. 9)
1191 + * in a typical PCR allocation. Registers 8-9 are only included in
1192 + * non-SHA1 boot_aggregate digests to avoid ambiguity.
1193 + */
1194 + if (alg_id != TPM_ALG_SHA1) {
1195 + for (i = TPM_PCR8; i < TPM_PCR10; i++) {
1196 + ima_pcrread(i, &d);
1197 + rc = crypto_shash_update(shash, d.digest,
1198 + crypto_shash_digestsize(tfm));
1199 + }
1200 + }
1201 if (!rc)
1202 crypto_shash_final(shash, digest);
1203 return rc;
1204 diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c
1205 index f0e8e15394501..c6c834ac83aca 100644
1206 --- a/sound/usb/mixer_us16x08.c
1207 +++ b/sound/usb/mixer_us16x08.c
1208 @@ -607,7 +607,7 @@ static int snd_us16x08_eq_put(struct snd_kcontrol *kcontrol,
1209 static int snd_us16x08_meter_info(struct snd_kcontrol *kcontrol,
1210 struct snd_ctl_elem_info *uinfo)
1211 {
1212 - uinfo->count = 1;
1213 + uinfo->count = 34;
1214 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
1215 uinfo->value.integer.max = 0x7FFF;
1216 uinfo->value.integer.min = 0;