Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0165-4.19.66-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3444 - (hide annotations) (download)
Thu Aug 15 09:33:27 2019 UTC (4 years, 10 months ago) by niro
File size: 74846 byte(s)
-linux-4.19.66
1 niro 3444 diff --git a/Makefile b/Makefile
2     index 41a565770431..065e5b34dc02 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 65
10     +SUBLEVEL = 66
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
15     index 82532c299bb5..008905d4152a 100644
16     --- a/drivers/atm/iphase.c
17     +++ b/drivers/atm/iphase.c
18     @@ -63,6 +63,7 @@
19     #include <asm/byteorder.h>
20     #include <linux/vmalloc.h>
21     #include <linux/jiffies.h>
22     +#include <linux/nospec.h>
23     #include "iphase.h"
24     #include "suni.h"
25     #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
26     @@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
27     }
28     if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
29     board = ia_cmds.status;
30     - if ((board < 0) || (board > iadev_count))
31     - board = 0;
32     +
33     + if ((board < 0) || (board > iadev_count))
34     + board = 0;
35     + board = array_index_nospec(board, iadev_count + 1);
36     +
37     iadev = ia_dev[board];
38     switch (ia_cmds.cmd) {
39     case MEMDUMP:
40     diff --git a/drivers/base/base.h b/drivers/base/base.h
41     index 7a419a7a6235..559b047de9f7 100644
42     --- a/drivers/base/base.h
43     +++ b/drivers/base/base.h
44     @@ -66,6 +66,9 @@ struct driver_private {
45     * probed first.
46     * @device - pointer back to the struct device that this structure is
47     * associated with.
48     + * @dead - This device is currently either in the process of or has been
49     + * removed from the system. Any asynchronous events scheduled for this
50     + * device should exit without taking any action.
51     *
52     * Nothing outside of the driver core should ever touch these fields.
53     */
54     @@ -76,6 +79,7 @@ struct device_private {
55     struct klist_node knode_bus;
56     struct list_head deferred_probe;
57     struct device *device;
58     + u8 dead:1;
59     };
60     #define to_device_private_parent(obj) \
61     container_of(obj, struct device_private, knode_parent)
62     diff --git a/drivers/base/core.c b/drivers/base/core.c
63     index 92e2c32c2227..e1a8d5c06f65 100644
64     --- a/drivers/base/core.c
65     +++ b/drivers/base/core.c
66     @@ -2031,6 +2031,24 @@ void put_device(struct device *dev)
67     }
68     EXPORT_SYMBOL_GPL(put_device);
69    
70     +bool kill_device(struct device *dev)
71     +{
72     + /*
73     + * Require the device lock and set the "dead" flag to guarantee that
74     + * the update behavior is consistent with the other bitfields near
75     + * it and that we cannot have an asynchronous probe routine trying
76     + * to run while we are tearing out the bus/class/sysfs from
77     + * underneath the device.
78     + */
79     + lockdep_assert_held(&dev->mutex);
80     +
81     + if (dev->p->dead)
82     + return false;
83     + dev->p->dead = true;
84     + return true;
85     +}
86     +EXPORT_SYMBOL_GPL(kill_device);
87     +
88     /**
89     * device_del - delete device from system.
90     * @dev: device.
91     @@ -2050,6 +2068,10 @@ void device_del(struct device *dev)
92     struct kobject *glue_dir = NULL;
93     struct class_interface *class_intf;
94    
95     + device_lock(dev);
96     + kill_device(dev);
97     + device_unlock(dev);
98     +
99     /* Notify clients of device removal. This call must come
100     * before dpm_sysfs_remove().
101     */
102     diff --git a/drivers/base/dd.c b/drivers/base/dd.c
103     index d48b310c4760..11d24a552ee4 100644
104     --- a/drivers/base/dd.c
105     +++ b/drivers/base/dd.c
106     @@ -725,15 +725,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
107     bool async_allowed;
108     int ret;
109    
110     - /*
111     - * Check if device has already been claimed. This may
112     - * happen with driver loading, device discovery/registration,
113     - * and deferred probe processing happens all at once with
114     - * multiple threads.
115     - */
116     - if (dev->driver)
117     - return -EBUSY;
118     -
119     ret = driver_match_device(drv, dev);
120     if (ret == 0) {
121     /* no match */
122     @@ -768,6 +759,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
123    
124     device_lock(dev);
125    
126     + /*
127     + * Check if device has already been removed or claimed. This may
128     + * happen with driver loading, device discovery/registration,
129     + * and deferred probe processing happens all at once with
130     + * multiple threads.
131     + */
132     + if (dev->p->dead || dev->driver)
133     + goto out_unlock;
134     +
135     if (dev->parent)
136     pm_runtime_get_sync(dev->parent);
137    
138     @@ -778,7 +778,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
139    
140     if (dev->parent)
141     pm_runtime_put(dev->parent);
142     -
143     +out_unlock:
144     device_unlock(dev);
145    
146     put_device(dev);
147     @@ -891,7 +891,7 @@ static int __driver_attach(struct device *dev, void *data)
148     if (dev->parent && dev->bus->need_parent_lock)
149     device_lock(dev->parent);
150     device_lock(dev);
151     - if (!dev->driver)
152     + if (!dev->p->dead && !dev->driver)
153     driver_probe_device(drv, dev);
154     device_unlock(dev);
155     if (dev->parent && dev->bus->need_parent_lock)
156     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
157     index 50b3c0d89c9c..2898bb061945 100644
158     --- a/drivers/hid/hid-ids.h
159     +++ b/drivers/hid/hid-ids.h
160     @@ -559,6 +559,7 @@
161     #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
162     #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
163     #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
164     +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
165    
166     #define USB_VENDOR_ID_HUION 0x256c
167     #define USB_DEVICE_ID_HUION_TABLET 0x006e
168     diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
169     index 91e86af44a04..d29c7c9cd185 100644
170     --- a/drivers/hid/hid-quirks.c
171     +++ b/drivers/hid/hid-quirks.c
172     @@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
173     { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
174     { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
175     { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
176     + { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
177     { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
178     { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
179     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
180     diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
181     index 0ae848369474..e56dc97fe4b6 100644
182     --- a/drivers/hid/wacom_wac.c
183     +++ b/drivers/hid/wacom_wac.c
184     @@ -537,14 +537,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
185     */
186     buttons = (data[4] << 1) | (data[3] & 0x01);
187     } else if (features->type == CINTIQ_COMPANION_2) {
188     - /* d-pad right -> data[4] & 0x10
189     - * d-pad up -> data[4] & 0x20
190     - * d-pad left -> data[4] & 0x40
191     - * d-pad down -> data[4] & 0x80
192     - * d-pad center -> data[3] & 0x01
193     + /* d-pad right -> data[2] & 0x10
194     + * d-pad up -> data[2] & 0x20
195     + * d-pad left -> data[2] & 0x40
196     + * d-pad down -> data[2] & 0x80
197     + * d-pad center -> data[1] & 0x01
198     */
199     buttons = ((data[2] >> 4) << 7) |
200     - ((data[1] & 0x04) << 6) |
201     + ((data[1] & 0x04) << 4) |
202     ((data[2] & 0x0F) << 2) |
203     (data[1] & 0x03);
204     } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
205     diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
206     index 5b0e1d9e5adc..1de10e5c70d7 100644
207     --- a/drivers/i2c/i2c-core-base.c
208     +++ b/drivers/i2c/i2c-core-base.c
209     @@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
210     int i2c_generic_scl_recovery(struct i2c_adapter *adap)
211     {
212     struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
213     - int i = 0, scl = 1, ret;
214     + int i = 0, scl = 1, ret = 0;
215    
216     if (bri->prepare_recovery)
217     bri->prepare_recovery(adap);
218     diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
219     index 7b794a14d6e8..8be082edf986 100644
220     --- a/drivers/infiniband/core/sa_query.c
221     +++ b/drivers/infiniband/core/sa_query.c
222     @@ -1232,7 +1232,6 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec,
223     {
224     struct rdma_dev_addr dev_addr = {};
225     union {
226     - struct sockaddr _sockaddr;
227     struct sockaddr_in _sockaddr_in;
228     struct sockaddr_in6 _sockaddr_in6;
229     } sgid_addr, dgid_addr;
230     @@ -1249,12 +1248,12 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec,
231     */
232     dev_addr.net = &init_net;
233    
234     - rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
235     - rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
236     + rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
237     + rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
238    
239     /* validate the route */
240     - ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
241     - &dgid_addr._sockaddr, &dev_addr);
242     + ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
243     + (struct sockaddr *)&dgid_addr, &dev_addr);
244     if (ret)
245     return ret;
246    
247     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
248     index 3edb81a4f075..33baa17fa9d5 100644
249     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
250     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
251     @@ -1936,8 +1936,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
252     }
253    
254     /* select a non-FCoE queue */
255     - return fallback(dev, skb, NULL) %
256     - (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
257     + return fallback(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
258     }
259    
260     void bnx2x_set_num_queues(struct bnx2x *bp)
261     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
262     index df5b74f289e1..6455511457ca 100644
263     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
264     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
265     @@ -3501,6 +3501,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
266     static int mvpp2_change_mtu(struct net_device *dev, int mtu)
267     {
268     struct mvpp2_port *port = netdev_priv(dev);
269     + bool running = netif_running(dev);
270     int err;
271    
272     if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
273     @@ -3509,40 +3510,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
274     mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
275     }
276    
277     - if (!netif_running(dev)) {
278     - err = mvpp2_bm_update_mtu(dev, mtu);
279     - if (!err) {
280     - port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
281     - return 0;
282     - }
283     -
284     - /* Reconfigure BM to the original MTU */
285     - err = mvpp2_bm_update_mtu(dev, dev->mtu);
286     - if (err)
287     - goto log_error;
288     - }
289     -
290     - mvpp2_stop_dev(port);
291     + if (running)
292     + mvpp2_stop_dev(port);
293    
294     err = mvpp2_bm_update_mtu(dev, mtu);
295     - if (!err) {
296     + if (err) {
297     + netdev_err(dev, "failed to change MTU\n");
298     + /* Reconfigure BM to the original MTU */
299     + mvpp2_bm_update_mtu(dev, dev->mtu);
300     + } else {
301     port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
302     - goto out_start;
303     }
304    
305     - /* Reconfigure BM to the original MTU */
306     - err = mvpp2_bm_update_mtu(dev, dev->mtu);
307     - if (err)
308     - goto log_error;
309     -
310     -out_start:
311     - mvpp2_start_dev(port);
312     - mvpp2_egress_enable(port);
313     - mvpp2_ingress_enable(port);
314     + if (running) {
315     + mvpp2_start_dev(port);
316     + mvpp2_egress_enable(port);
317     + mvpp2_ingress_enable(port);
318     + }
319    
320     - return 0;
321     -log_error:
322     - netdev_err(dev, "failed to change MTU\n");
323     return err;
324     }
325    
326     @@ -5358,9 +5343,6 @@ static int mvpp2_remove(struct platform_device *pdev)
327    
328     mvpp2_dbgfs_cleanup(priv);
329    
330     - flush_workqueue(priv->stats_queue);
331     - destroy_workqueue(priv->stats_queue);
332     -
333     fwnode_for_each_available_child_node(fwnode, port_fwnode) {
334     if (priv->port_list[i]) {
335     mutex_destroy(&priv->port_list[i]->gather_stats_lock);
336     @@ -5369,6 +5351,8 @@ static int mvpp2_remove(struct platform_device *pdev)
337     i++;
338     }
339    
340     + destroy_workqueue(priv->stats_queue);
341     +
342     for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
343     struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
344    
345     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
346     index 1c225be9c7db..3692d6a1cce8 100644
347     --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
348     +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
349     @@ -307,7 +307,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
350     struct mlx5_interface *intf;
351    
352     mutex_lock(&mlx5_intf_mutex);
353     - list_for_each_entry(intf, &intf_list, list)
354     + list_for_each_entry_reverse(intf, &intf_list, list)
355     mlx5_remove_device(intf, priv);
356     list_del(&priv->dev_list);
357     mutex_unlock(&mlx5_intf_mutex);
358     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
359     index 0f1c296c3ce4..83ab2c0e6b61 100644
360     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
361     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
362     @@ -420,12 +420,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
363    
364     static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
365     {
366     - struct mlx5e_wqe_frag_info next_frag, *prev;
367     + struct mlx5e_wqe_frag_info next_frag = {};
368     + struct mlx5e_wqe_frag_info *prev = NULL;
369     int i;
370    
371     next_frag.di = &rq->wqe.di[0];
372     - next_frag.offset = 0;
373     - prev = NULL;
374    
375     for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
376     struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
377     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
378     index 9f7f8425f676..c8928ce69185 100644
379     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
380     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
381     @@ -992,13 +992,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
382     void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
383     {
384     struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
385     - u64 bytes, packets, lastuse = 0;
386     struct mlx5e_tc_flow *flow;
387     struct mlx5e_encap_entry *e;
388     struct mlx5_fc *counter;
389     struct neigh_table *tbl;
390     bool neigh_used = false;
391     struct neighbour *n;
392     + u64 lastuse;
393    
394     if (m_neigh->family == AF_INET)
395     tbl = &arp_tbl;
396     @@ -1015,7 +1015,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
397     list_for_each_entry(flow, &e->flows, encap) {
398     if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
399     counter = mlx5_flow_rule_counter(flow->rule[0]);
400     - mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
401     + lastuse = mlx5_fc_query_lastuse(counter);
402     if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
403     neigh_used = true;
404     break;
405     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
406     index 58af6be13dfa..808ddd732e04 100644
407     --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
408     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
409     @@ -321,6 +321,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
410     }
411     EXPORT_SYMBOL(mlx5_fc_query);
412    
413     +u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
414     +{
415     + return counter->cache.lastuse;
416     +}
417     +
418     void mlx5_fc_query_cached(struct mlx5_fc *counter,
419     u64 *bytes, u64 *packets, u64 *lastuse)
420     {
421     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
422     index 0cab06046e5d..ee126bcf7c35 100644
423     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
424     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
425     @@ -5032,7 +5032,7 @@ static int __init mlxsw_sp_module_init(void)
426     return 0;
427    
428     err_sp2_pci_driver_register:
429     - mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
430     + mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
431     err_sp1_pci_driver_register:
432     mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
433     err_sp2_core_driver_register:
434     diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
435     index 10291198decd..732ba21d3369 100644
436     --- a/drivers/net/ethernet/mscc/ocelot.c
437     +++ b/drivers/net/ethernet/mscc/ocelot.c
438     @@ -1767,6 +1767,7 @@ EXPORT_SYMBOL(ocelot_init);
439    
440     void ocelot_deinit(struct ocelot *ocelot)
441     {
442     + cancel_delayed_work(&ocelot->stats_work);
443     destroy_workqueue(ocelot->stats_queue);
444     mutex_destroy(&ocelot->stats_lock);
445     }
446     diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
447     index 884f1f52dcc2..70879a3ab567 100644
448     --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
449     +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
450     @@ -59,7 +59,7 @@ struct rmnet_map_dl_csum_trailer {
451     struct rmnet_map_ul_csum_header {
452     __be16 csum_start_offset;
453     u16 csum_insert_offset:14;
454     - u16 udp_ip4_ind:1;
455     + u16 udp_ind:1;
456     u16 csum_enabled:1;
457     } __aligned(1);
458    
459     diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
460     index 57a9c314a665..b2090cedd2e9 100644
461     --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
462     +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
463     @@ -215,9 +215,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
464     ul_header->csum_insert_offset = skb->csum_offset;
465     ul_header->csum_enabled = 1;
466     if (ip4h->protocol == IPPROTO_UDP)
467     - ul_header->udp_ip4_ind = 1;
468     + ul_header->udp_ind = 1;
469     else
470     - ul_header->udp_ip4_ind = 0;
471     + ul_header->udp_ind = 0;
472    
473     /* Changing remaining fields to network order */
474     hdr++;
475     @@ -248,6 +248,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
476     struct rmnet_map_ul_csum_header *ul_header,
477     struct sk_buff *skb)
478     {
479     + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
480     __be16 *hdr = (__be16 *)ul_header, offset;
481    
482     offset = htons((__force u16)(skb_transport_header(skb) -
483     @@ -255,7 +256,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
484     ul_header->csum_start_offset = offset;
485     ul_header->csum_insert_offset = skb->csum_offset;
486     ul_header->csum_enabled = 1;
487     - ul_header->udp_ip4_ind = 0;
488     +
489     + if (ip6h->nexthdr == IPPROTO_UDP)
490     + ul_header->udp_ind = 1;
491     + else
492     + ul_header->udp_ind = 0;
493    
494     /* Changing remaining fields to network order */
495     hdr++;
496     @@ -428,7 +433,7 @@ sw_csum:
497     ul_header->csum_start_offset = 0;
498     ul_header->csum_insert_offset = 0;
499     ul_header->csum_enabled = 0;
500     - ul_header->udp_ip4_ind = 0;
501     + ul_header->udp_ind = 0;
502    
503     priv->stats.csum_sw++;
504     }
505     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
506     index a6992c4c7313..0c8b7146637e 100644
507     --- a/drivers/net/ethernet/realtek/r8169.c
508     +++ b/drivers/net/ethernet/realtek/r8169.c
509     @@ -7239,13 +7239,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
510     {
511     unsigned int flags;
512    
513     - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
514     + switch (tp->mac_version) {
515     + case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
516     RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
517     RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
518     RTL_W8(tp, Cfg9346, Cfg9346_Lock);
519     + /* fall through */
520     + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
521     flags = PCI_IRQ_LEGACY;
522     - } else {
523     + break;
524     + default:
525     flags = PCI_IRQ_ALL_TYPES;
526     + break;
527     }
528    
529     return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
530     diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
531     index e029c7977a56..2e8056d48f4a 100644
532     --- a/drivers/net/phy/phylink.c
533     +++ b/drivers/net/phy/phylink.c
534     @@ -226,6 +226,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
535     __ETHTOOL_LINK_MODE_MASK_NBITS, true);
536     linkmode_zero(pl->supported);
537     phylink_set(pl->supported, MII);
538     + phylink_set(pl->supported, Pause);
539     + phylink_set(pl->supported, Asym_Pause);
540     if (s) {
541     __set_bit(s->bit, pl->supported);
542     } else {
543     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
544     index f22639f0116a..c04f3dc17d76 100644
545     --- a/drivers/net/ppp/pppoe.c
546     +++ b/drivers/net/ppp/pppoe.c
547     @@ -1120,6 +1120,9 @@ static const struct proto_ops pppoe_ops = {
548     .recvmsg = pppoe_recvmsg,
549     .mmap = sock_no_mmap,
550     .ioctl = pppox_ioctl,
551     +#ifdef CONFIG_COMPAT
552     + .compat_ioctl = pppox_compat_ioctl,
553     +#endif
554     };
555    
556     static const struct pppox_proto pppoe_proto = {
557     diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
558     index c0599b3b23c0..9128e42e33e7 100644
559     --- a/drivers/net/ppp/pppox.c
560     +++ b/drivers/net/ppp/pppox.c
561     @@ -22,6 +22,7 @@
562     #include <linux/string.h>
563     #include <linux/module.h>
564     #include <linux/kernel.h>
565     +#include <linux/compat.h>
566     #include <linux/errno.h>
567     #include <linux/netdevice.h>
568     #include <linux/net.h>
569     @@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
570    
571     EXPORT_SYMBOL(pppox_ioctl);
572    
573     +#ifdef CONFIG_COMPAT
574     +int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
575     +{
576     + if (cmd == PPPOEIOCSFWD32)
577     + cmd = PPPOEIOCSFWD;
578     +
579     + return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
580     +}
581     +
582     +EXPORT_SYMBOL(pppox_compat_ioctl);
583     +#endif
584     +
585     static int pppox_create(struct net *net, struct socket *sock, int protocol,
586     int kern)
587     {
588     diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
589     index 7321a4eca235..9ad3ff40a563 100644
590     --- a/drivers/net/ppp/pptp.c
591     +++ b/drivers/net/ppp/pptp.c
592     @@ -633,6 +633,9 @@ static const struct proto_ops pptp_ops = {
593     .recvmsg = sock_no_recvmsg,
594     .mmap = sock_no_mmap,
595     .ioctl = pppox_ioctl,
596     +#ifdef CONFIG_COMPAT
597     + .compat_ioctl = pppox_compat_ioctl,
598     +#endif
599     };
600    
601     static const struct pppox_proto pppox_pptp_proto = {
602     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
603     index b67fee56ec81..5fa7047ea361 100644
604     --- a/drivers/net/tun.c
605     +++ b/drivers/net/tun.c
606     @@ -1682,6 +1682,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
607    
608     skb_reserve(skb, pad - delta);
609     skb_put(skb, len);
610     + skb_set_owner_w(skb, tfile->socket.sk);
611     get_page(alloc_frag->page);
612     alloc_frag->offset += buflen;
613    
614     diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
615     index e65d027b91fa..529be35ac178 100644
616     --- a/drivers/nfc/nfcmrvl/main.c
617     +++ b/drivers/nfc/nfcmrvl/main.c
618     @@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
619     /* Reset possible fault of previous session */
620     clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
621    
622     - if (priv->config.reset_n_io) {
623     + if (gpio_is_valid(priv->config.reset_n_io)) {
624     nfc_info(priv->dev, "reset the chip\n");
625     gpio_set_value(priv->config.reset_n_io, 0);
626     usleep_range(5000, 10000);
627     @@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
628    
629     void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
630     {
631     - if (priv->config.reset_n_io)
632     + if (gpio_is_valid(priv->config.reset_n_io))
633     gpio_set_value(priv->config.reset_n_io, 0);
634     }
635    
636     diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
637     index 9a22056e8d9e..e5a622ce4b95 100644
638     --- a/drivers/nfc/nfcmrvl/uart.c
639     +++ b/drivers/nfc/nfcmrvl/uart.c
640     @@ -26,7 +26,7 @@
641     static unsigned int hci_muxed;
642     static unsigned int flow_control;
643     static unsigned int break_control;
644     -static unsigned int reset_n_io;
645     +static int reset_n_io = -EINVAL;
646    
647     /*
648     ** NFCMRVL NCI OPS
649     @@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
650     module_param(hci_muxed, uint, 0);
651     MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
652    
653     -module_param(reset_n_io, uint, 0);
654     +module_param(reset_n_io, int, 0);
655     MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
656     diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
657     index 945cc903d8f1..888e298f610b 100644
658     --- a/drivers/nfc/nfcmrvl/usb.c
659     +++ b/drivers/nfc/nfcmrvl/usb.c
660     @@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
661    
662     /* No configuration for USB */
663     memset(&config, 0, sizeof(config));
664     + config.reset_n_io = -EINVAL;
665    
666     nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
667    
668     diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
669     index ee39e2c1644a..2ba22cd1331b 100644
670     --- a/drivers/nvdimm/bus.c
671     +++ b/drivers/nvdimm/bus.c
672     @@ -528,13 +528,38 @@ EXPORT_SYMBOL(nd_device_register);
673    
674     void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
675     {
676     + bool killed;
677     +
678     switch (mode) {
679     case ND_ASYNC:
680     + /*
681     + * In the async case this is being triggered with the
682     + * device lock held and the unregistration work needs to
683     + * be moved out of line iff this is thread has won the
684     + * race to schedule the deletion.
685     + */
686     + if (!kill_device(dev))
687     + return;
688     +
689     get_device(dev);
690     async_schedule_domain(nd_async_device_unregister, dev,
691     &nd_async_domain);
692     break;
693     case ND_SYNC:
694     + /*
695     + * In the sync case the device is being unregistered due
696     + * to a state change of the parent. Claim the kill state
697     + * to synchronize against other unregistration requests,
698     + * or otherwise let the async path handle it if the
699     + * unregistration was already queued.
700     + */
701     + device_lock(dev);
702     + killed = kill_device(dev);
703     + device_unlock(dev);
704     +
705     + if (!killed)
706     + return;
707     +
708     nd_synchronize();
709     device_unregister(dev);
710     break;
711     @@ -840,10 +865,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
712     do {
713     if (nvdimm_bus->probe_active == 0)
714     break;
715     - nvdimm_bus_unlock(&nvdimm_bus->dev);
716     + nvdimm_bus_unlock(dev);
717     + device_unlock(dev);
718     wait_event(nvdimm_bus->wait,
719     nvdimm_bus->probe_active == 0);
720     - nvdimm_bus_lock(&nvdimm_bus->dev);
721     + device_lock(dev);
722     + nvdimm_bus_lock(dev);
723     } while (true);
724     }
725    
726     @@ -926,20 +953,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
727     int read_only, unsigned int ioctl_cmd, unsigned long arg)
728     {
729     struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
730     - static char out_env[ND_CMD_MAX_ENVELOPE];
731     - static char in_env[ND_CMD_MAX_ENVELOPE];
732     const struct nd_cmd_desc *desc = NULL;
733     unsigned int cmd = _IOC_NR(ioctl_cmd);
734     struct device *dev = &nvdimm_bus->dev;
735     void __user *p = (void __user *) arg;
736     + char *out_env = NULL, *in_env = NULL;
737     const char *cmd_name, *dimm_name;
738     u32 in_len = 0, out_len = 0;
739     unsigned int func = cmd;
740     unsigned long cmd_mask;
741     struct nd_cmd_pkg pkg;
742     int rc, i, cmd_rc;
743     + void *buf = NULL;
744     u64 buf_len = 0;
745     - void *buf;
746    
747     if (nvdimm) {
748     desc = nd_cmd_dimm_desc(cmd);
749     @@ -970,7 +996,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
750     case ND_CMD_ARS_START:
751     case ND_CMD_CLEAR_ERROR:
752     case ND_CMD_CALL:
753     - dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
754     + dev_dbg(dev, "'%s' command while read-only.\n",
755     nvdimm ? nvdimm_cmd_name(cmd)
756     : nvdimm_bus_cmd_name(cmd));
757     return -EPERM;
758     @@ -979,6 +1005,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
759     }
760    
761     /* process an input envelope */
762     + in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
763     + if (!in_env)
764     + return -ENOMEM;
765     for (i = 0; i < desc->in_num; i++) {
766     u32 in_size, copy;
767    
768     @@ -986,14 +1015,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
769     if (in_size == UINT_MAX) {
770     dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
771     __func__, dimm_name, cmd_name, i);
772     - return -ENXIO;
773     + rc = -ENXIO;
774     + goto out;
775     }
776     - if (in_len < sizeof(in_env))
777     - copy = min_t(u32, sizeof(in_env) - in_len, in_size);
778     + if (in_len < ND_CMD_MAX_ENVELOPE)
779     + copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
780     else
781     copy = 0;
782     - if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
783     - return -EFAULT;
784     + if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
785     + rc = -EFAULT;
786     + goto out;
787     + }
788     in_len += in_size;
789     }
790    
791     @@ -1005,6 +1037,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
792     }
793    
794     /* process an output envelope */
795     + out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
796     + if (!out_env) {
797     + rc = -ENOMEM;
798     + goto out;
799     + }
800     +
801     for (i = 0; i < desc->out_num; i++) {
802     u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
803     (u32 *) in_env, (u32 *) out_env, 0);
804     @@ -1013,15 +1051,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
805     if (out_size == UINT_MAX) {
806     dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
807     dimm_name, cmd_name, i);
808     - return -EFAULT;
809     + rc = -EFAULT;
810     + goto out;
811     }
812     - if (out_len < sizeof(out_env))
813     - copy = min_t(u32, sizeof(out_env) - out_len, out_size);
814     + if (out_len < ND_CMD_MAX_ENVELOPE)
815     + copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
816     else
817     copy = 0;
818     if (copy && copy_from_user(&out_env[out_len],
819     - p + in_len + out_len, copy))
820     - return -EFAULT;
821     + p + in_len + out_len, copy)) {
822     + rc = -EFAULT;
823     + goto out;
824     + }
825     out_len += out_size;
826     }
827    
828     @@ -1029,19 +1070,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
829     if (buf_len > ND_IOCTL_MAX_BUFLEN) {
830     dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
831     cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
832     - return -EINVAL;
833     + rc = -EINVAL;
834     + goto out;
835     }
836    
837     buf = vmalloc(buf_len);
838     - if (!buf)
839     - return -ENOMEM;
840     + if (!buf) {
841     + rc = -ENOMEM;
842     + goto out;
843     + }
844    
845     if (copy_from_user(buf, p, buf_len)) {
846     rc = -EFAULT;
847     goto out;
848     }
849    
850     - nvdimm_bus_lock(&nvdimm_bus->dev);
851     + device_lock(dev);
852     + nvdimm_bus_lock(dev);
853     rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
854     if (rc)
855     goto out_unlock;
856     @@ -1056,17 +1101,16 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
857     nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
858     clear_err->cleared);
859     }
860     - nvdimm_bus_unlock(&nvdimm_bus->dev);
861    
862     if (copy_to_user(p, buf, buf_len))
863     rc = -EFAULT;
864    
865     - vfree(buf);
866     - return rc;
867     -
868     - out_unlock:
869     - nvdimm_bus_unlock(&nvdimm_bus->dev);
870     - out:
871     +out_unlock:
872     + nvdimm_bus_unlock(dev);
873     + device_unlock(dev);
874     +out:
875     + kfree(in_env);
876     + kfree(out_env);
877     vfree(buf);
878     return rc;
879     }
880     diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
881     index b9ca0033cc99..f9130cc157e8 100644
882     --- a/drivers/nvdimm/region.c
883     +++ b/drivers/nvdimm/region.c
884     @@ -42,17 +42,6 @@ static int nd_region_probe(struct device *dev)
885     if (rc)
886     return rc;
887    
888     - rc = nd_region_register_namespaces(nd_region, &err);
889     - if (rc < 0)
890     - return rc;
891     -
892     - ndrd = dev_get_drvdata(dev);
893     - ndrd->ns_active = rc;
894     - ndrd->ns_count = rc + err;
895     -
896     - if (rc && err && rc == err)
897     - return -ENODEV;
898     -
899     if (is_nd_pmem(&nd_region->dev)) {
900     struct resource ndr_res;
901    
902     @@ -68,6 +57,17 @@ static int nd_region_probe(struct device *dev)
903     nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
904     }
905    
906     + rc = nd_region_register_namespaces(nd_region, &err);
907     + if (rc < 0)
908     + return rc;
909     +
910     + ndrd = dev_get_drvdata(dev);
911     + ndrd->ns_active = rc;
912     + ndrd->ns_count = rc + err;
913     +
914     + if (rc && err && rc == err)
915     + return -ENODEV;
916     +
917     nd_region->btt_seed = nd_btt_create(nd_region);
918     nd_region->pfn_seed = nd_pfn_create(nd_region);
919     nd_region->dax_seed = nd_dax_create(nd_region);
920     diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
921     index e7377f1028ef..0303296e6d5b 100644
922     --- a/drivers/nvdimm/region_devs.c
923     +++ b/drivers/nvdimm/region_devs.c
924     @@ -425,10 +425,12 @@ static ssize_t available_size_show(struct device *dev,
925     * memory nvdimm_bus_lock() is dropped, but that's userspace's
926     * problem to not race itself.
927     */
928     + device_lock(dev);
929     nvdimm_bus_lock(dev);
930     wait_nvdimm_bus_probe_idle(dev);
931     available = nd_region_available_dpa(nd_region);
932     nvdimm_bus_unlock(dev);
933     + device_unlock(dev);
934    
935     return sprintf(buf, "%llu\n", available);
936     }
937     @@ -440,10 +442,12 @@ static ssize_t max_available_extent_show(struct device *dev,
938     struct nd_region *nd_region = to_nd_region(dev);
939     unsigned long long available = 0;
940    
941     + device_lock(dev);
942     nvdimm_bus_lock(dev);
943     wait_nvdimm_bus_probe_idle(dev);
944     available = nd_region_allocatable_dpa(nd_region);
945     nvdimm_bus_unlock(dev);
946     + device_unlock(dev);
947    
948     return sprintf(buf, "%llu\n", available);
949     }
950     diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
951     index 7dc4ffa24430..24cbd0a2cc69 100644
952     --- a/drivers/scsi/fcoe/fcoe_ctlr.c
953     +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
954     @@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
955     */
956     static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
957     {
958     - return (struct fcoe_rport *)(rdata + 1);
959     + return container_of(rdata, struct fcoe_rport, rdata);
960     }
961    
962     /**
963     @@ -2281,7 +2281,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
964     */
965     static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
966     struct sk_buff *skb,
967     - struct fc_rport_priv *rdata)
968     + struct fcoe_rport *frport)
969     {
970     struct fip_header *fiph;
971     struct fip_desc *desc = NULL;
972     @@ -2289,16 +2289,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
973     struct fip_wwn_desc *wwn = NULL;
974     struct fip_vn_desc *vn = NULL;
975     struct fip_size_desc *size = NULL;
976     - struct fcoe_rport *frport;
977     size_t rlen;
978     size_t dlen;
979     u32 desc_mask = 0;
980     u32 dtype;
981     u8 sub;
982    
983     - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
984     - frport = fcoe_ctlr_rport(rdata);
985     -
986     fiph = (struct fip_header *)skb->data;
987     frport->flags = ntohs(fiph->fip_flags);
988    
989     @@ -2361,15 +2357,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
990     if (dlen != sizeof(struct fip_wwn_desc))
991     goto len_err;
992     wwn = (struct fip_wwn_desc *)desc;
993     - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
994     + frport->rdata.ids.node_name =
995     + get_unaligned_be64(&wwn->fd_wwn);
996     break;
997     case FIP_DT_VN_ID:
998     if (dlen != sizeof(struct fip_vn_desc))
999     goto len_err;
1000     vn = (struct fip_vn_desc *)desc;
1001     memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
1002     - rdata->ids.port_id = ntoh24(vn->fd_fc_id);
1003     - rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
1004     + frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
1005     + frport->rdata.ids.port_name =
1006     + get_unaligned_be64(&vn->fd_wwpn);
1007     break;
1008     case FIP_DT_FC4F:
1009     if (dlen != sizeof(struct fip_fc4_feat))
1010     @@ -2750,10 +2748,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
1011     {
1012     struct fip_header *fiph;
1013     enum fip_vn2vn_subcode sub;
1014     - struct {
1015     - struct fc_rport_priv rdata;
1016     - struct fcoe_rport frport;
1017     - } buf;
1018     + struct fcoe_rport frport = { };
1019     int rc, vlan_id = 0;
1020    
1021     fiph = (struct fip_header *)skb->data;
1022     @@ -2769,7 +2764,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
1023     goto drop;
1024     }
1025    
1026     - rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
1027     + rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
1028     if (rc) {
1029     LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
1030     goto drop;
1031     @@ -2778,19 +2773,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
1032     mutex_lock(&fip->ctlr_mutex);
1033     switch (sub) {
1034     case FIP_SC_VN_PROBE_REQ:
1035     - fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
1036     + fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
1037     break;
1038     case FIP_SC_VN_PROBE_REP:
1039     - fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
1040     + fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
1041     break;
1042     case FIP_SC_VN_CLAIM_NOTIFY:
1043     - fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
1044     + fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
1045     break;
1046     case FIP_SC_VN_CLAIM_REP:
1047     - fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
1048     + fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
1049     break;
1050     case FIP_SC_VN_BEACON:
1051     - fcoe_ctlr_vn_beacon(fip, &buf.rdata);
1052     + fcoe_ctlr_vn_beacon(fip, &frport.rdata);
1053     break;
1054     default:
1055     LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
1056     @@ -2814,22 +2809,18 @@ drop:
1057     */
1058     static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
1059     struct sk_buff *skb,
1060     - struct fc_rport_priv *rdata)
1061     + struct fcoe_rport *frport)
1062     {
1063     struct fip_header *fiph;
1064     struct fip_desc *desc = NULL;
1065     struct fip_mac_desc *macd = NULL;
1066     struct fip_wwn_desc *wwn = NULL;
1067     - struct fcoe_rport *frport;
1068     size_t rlen;
1069     size_t dlen;
1070     u32 desc_mask = 0;
1071     u32 dtype;
1072     u8 sub;
1073    
1074     - memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
1075     - frport = fcoe_ctlr_rport(rdata);
1076     -
1077     fiph = (struct fip_header *)skb->data;
1078     frport->flags = ntohs(fiph->fip_flags);
1079    
1080     @@ -2883,7 +2874,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
1081     if (dlen != sizeof(struct fip_wwn_desc))
1082     goto len_err;
1083     wwn = (struct fip_wwn_desc *)desc;
1084     - rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
1085     + frport->rdata.ids.node_name =
1086     + get_unaligned_be64(&wwn->fd_wwn);
1087     break;
1088     default:
1089     LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
1090     @@ -2994,22 +2986,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
1091     {
1092     struct fip_header *fiph;
1093     enum fip_vlan_subcode sub;
1094     - struct {
1095     - struct fc_rport_priv rdata;
1096     - struct fcoe_rport frport;
1097     - } buf;
1098     + struct fcoe_rport frport = { };
1099     int rc;
1100    
1101     fiph = (struct fip_header *)skb->data;
1102     sub = fiph->fip_subcode;
1103     - rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
1104     + rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
1105     if (rc) {
1106     LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
1107     goto drop;
1108     }
1109     mutex_lock(&fip->ctlr_mutex);
1110     if (sub == FIP_SC_VL_REQ)
1111     - fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
1112     + fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
1113     mutex_unlock(&fip->ctlr_mutex);
1114    
1115     drop:
1116     diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
1117     index 3d51a936f6d5..90a748551ede 100644
1118     --- a/drivers/scsi/libfc/fc_rport.c
1119     +++ b/drivers/scsi/libfc/fc_rport.c
1120     @@ -140,6 +140,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
1121     struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
1122     {
1123     struct fc_rport_priv *rdata;
1124     + size_t rport_priv_size = sizeof(*rdata);
1125    
1126     lockdep_assert_held(&lport->disc.disc_mutex);
1127    
1128     @@ -147,7 +148,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
1129     if (rdata)
1130     return rdata;
1131    
1132     - rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
1133     + if (lport->rport_priv_size > 0)
1134     + rport_priv_size = lport->rport_priv_size;
1135     + rdata = kzalloc(rport_priv_size, GFP_KERNEL);
1136     if (!rdata)
1137     return NULL;
1138    
1139     diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
1140     index 25abf2d1732a..eab27d41ba83 100644
1141     --- a/drivers/spi/spi-bcm2835.c
1142     +++ b/drivers/spi/spi-bcm2835.c
1143     @@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
1144     bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
1145    
1146     /* handle all the 3-wire mode */
1147     - if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
1148     + if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
1149     + tfr->rx_buf != master->dummy_rx)
1150     cs |= BCM2835_SPI_CS_REN;
1151     else
1152     cs &= ~BCM2835_SPI_CS_REN;
1153     diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
1154     index a9b00942e87d..8f08095ee54e 100644
1155     --- a/fs/compat_ioctl.c
1156     +++ b/fs/compat_ioctl.c
1157     @@ -894,9 +894,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
1158     COMPATIBLE_IOCTL(PPPIOCATTCHAN)
1159     COMPATIBLE_IOCTL(PPPIOCGCHAN)
1160     COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
1161     -/* PPPOX */
1162     -COMPATIBLE_IOCTL(PPPOEIOCSFWD)
1163     -COMPATIBLE_IOCTL(PPPOEIOCDFWD)
1164     /* Big A */
1165     /* sparc only */
1166     /* Big Q for sound/OSS */
1167     diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
1168     index a6090154b2ab..a01ebb630abc 100644
1169     --- a/include/linux/cgroup-defs.h
1170     +++ b/include/linux/cgroup-defs.h
1171     @@ -207,6 +207,7 @@ struct css_set {
1172     */
1173     struct list_head tasks;
1174     struct list_head mg_tasks;
1175     + struct list_head dying_tasks;
1176    
1177     /* all css_task_iters currently walking this cset */
1178     struct list_head task_iters;
1179     diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
1180     index 8937d48a5389..b4854b48a4f3 100644
1181     --- a/include/linux/cgroup.h
1182     +++ b/include/linux/cgroup.h
1183     @@ -43,6 +43,9 @@
1184     /* walk all threaded css_sets in the domain */
1185     #define CSS_TASK_ITER_THREADED (1U << 1)
1186    
1187     +/* internal flags */
1188     +#define CSS_TASK_ITER_SKIPPED (1U << 16)
1189     +
1190     /* a css_task_iter should be treated as an opaque object */
1191     struct css_task_iter {
1192     struct cgroup_subsys *ss;
1193     @@ -57,6 +60,7 @@ struct css_task_iter {
1194     struct list_head *task_pos;
1195     struct list_head *tasks_head;
1196     struct list_head *mg_tasks_head;
1197     + struct list_head *dying_tasks_head;
1198    
1199     struct css_set *cur_cset;
1200     struct css_set *cur_dcset;
1201     diff --git a/include/linux/device.h b/include/linux/device.h
1202     index 3f1066a9e1c3..19dd8852602c 100644
1203     --- a/include/linux/device.h
1204     +++ b/include/linux/device.h
1205     @@ -1332,6 +1332,7 @@ extern int (*platform_notify_remove)(struct device *dev);
1206     */
1207     extern struct device *get_device(struct device *dev);
1208     extern void put_device(struct device *dev);
1209     +extern bool kill_device(struct device *dev);
1210    
1211     #ifdef CONFIG_DEVTMPFS
1212     extern int devtmpfs_create_node(struct device *dev);
1213     diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
1214     index ba7a9b0c7c57..24e9b360da65 100644
1215     --- a/include/linux/if_pppox.h
1216     +++ b/include/linux/if_pppox.h
1217     @@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
1218     extern void unregister_pppox_proto(int proto_num);
1219     extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
1220     extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
1221     +extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
1222     +
1223     +#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
1224    
1225     /* PPPoX socket states */
1226     enum {
1227     diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
1228     index 804516e4f483..3386399feadc 100644
1229     --- a/include/linux/mlx5/fs.h
1230     +++ b/include/linux/mlx5/fs.h
1231     @@ -188,6 +188,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
1232     struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
1233     struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
1234     void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
1235     +u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
1236     void mlx5_fc_query_cached(struct mlx5_fc *counter,
1237     u64 *bytes, u64 *packets, u64 *lastuse);
1238     int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
1239     diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
1240     index f043d65b9bac..177f11c96187 100644
1241     --- a/include/linux/mlx5/mlx5_ifc.h
1242     +++ b/include/linux/mlx5/mlx5_ifc.h
1243     @@ -5623,7 +5623,12 @@ struct mlx5_ifc_modify_cq_in_bits {
1244    
1245     struct mlx5_ifc_cqc_bits cq_context;
1246    
1247     - u8 reserved_at_280[0x600];
1248     + u8 reserved_at_280[0x60];
1249     +
1250     + u8 cq_umem_valid[0x1];
1251     + u8 reserved_at_2e1[0x1f];
1252     +
1253     + u8 reserved_at_300[0x580];
1254    
1255     u8 pas[0][0x40];
1256     };
1257     diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
1258     index bb8092fa1e36..58507c7783cf 100644
1259     --- a/include/scsi/libfcoe.h
1260     +++ b/include/scsi/libfcoe.h
1261     @@ -241,6 +241,7 @@ struct fcoe_fcf {
1262     * @vn_mac: VN_Node assigned MAC address for data
1263     */
1264     struct fcoe_rport {
1265     + struct fc_rport_priv rdata;
1266     unsigned long time;
1267     u16 fcoe_len;
1268     u16 flags;
1269     diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
1270     index 81441117f611..78ef274b036e 100644
1271     --- a/kernel/cgroup/cgroup.c
1272     +++ b/kernel/cgroup/cgroup.c
1273     @@ -212,7 +212,8 @@ static struct cftype cgroup_base_files[];
1274    
1275     static int cgroup_apply_control(struct cgroup *cgrp);
1276     static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
1277     -static void css_task_iter_advance(struct css_task_iter *it);
1278     +static void css_task_iter_skip(struct css_task_iter *it,
1279     + struct task_struct *task);
1280     static int cgroup_destroy_locked(struct cgroup *cgrp);
1281     static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
1282     struct cgroup_subsys *ss);
1283     @@ -672,6 +673,7 @@ struct css_set init_css_set = {
1284     .dom_cset = &init_css_set,
1285     .tasks = LIST_HEAD_INIT(init_css_set.tasks),
1286     .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
1287     + .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
1288     .task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
1289     .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
1290     .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
1291     @@ -775,6 +777,21 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
1292     cgroup_update_populated(link->cgrp, populated);
1293     }
1294    
1295     +/*
1296     + * @task is leaving, advance task iterators which are pointing to it so
1297     + * that they can resume at the next position. Advancing an iterator might
1298     + * remove it from the list, use safe walk. See css_task_iter_skip() for
1299     + * details.
1300     + */
1301     +static void css_set_skip_task_iters(struct css_set *cset,
1302     + struct task_struct *task)
1303     +{
1304     + struct css_task_iter *it, *pos;
1305     +
1306     + list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
1307     + css_task_iter_skip(it, task);
1308     +}
1309     +
1310     /**
1311     * css_set_move_task - move a task from one css_set to another
1312     * @task: task being moved
1313     @@ -800,22 +817,9 @@ static void css_set_move_task(struct task_struct *task,
1314     css_set_update_populated(to_cset, true);
1315    
1316     if (from_cset) {
1317     - struct css_task_iter *it, *pos;
1318     -
1319     WARN_ON_ONCE(list_empty(&task->cg_list));
1320    
1321     - /*
1322     - * @task is leaving, advance task iterators which are
1323     - * pointing to it so that they can resume at the next
1324     - * position. Advancing an iterator might remove it from
1325     - * the list, use safe walk. See css_task_iter_advance*()
1326     - * for details.
1327     - */
1328     - list_for_each_entry_safe(it, pos, &from_cset->task_iters,
1329     - iters_node)
1330     - if (it->task_pos == &task->cg_list)
1331     - css_task_iter_advance(it);
1332     -
1333     + css_set_skip_task_iters(from_cset, task);
1334     list_del_init(&task->cg_list);
1335     if (!css_set_populated(from_cset))
1336     css_set_update_populated(from_cset, false);
1337     @@ -1142,6 +1146,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
1338     cset->dom_cset = cset;
1339     INIT_LIST_HEAD(&cset->tasks);
1340     INIT_LIST_HEAD(&cset->mg_tasks);
1341     + INIT_LIST_HEAD(&cset->dying_tasks);
1342     INIT_LIST_HEAD(&cset->task_iters);
1343     INIT_LIST_HEAD(&cset->threaded_csets);
1344     INIT_HLIST_NODE(&cset->hlist);
1345     @@ -4149,15 +4154,18 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
1346     it->task_pos = NULL;
1347     return;
1348     }
1349     - } while (!css_set_populated(cset));
1350     + } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
1351    
1352     if (!list_empty(&cset->tasks))
1353     it->task_pos = cset->tasks.next;
1354     - else
1355     + else if (!list_empty(&cset->mg_tasks))
1356     it->task_pos = cset->mg_tasks.next;
1357     + else
1358     + it->task_pos = cset->dying_tasks.next;
1359    
1360     it->tasks_head = &cset->tasks;
1361     it->mg_tasks_head = &cset->mg_tasks;
1362     + it->dying_tasks_head = &cset->dying_tasks;
1363    
1364     /*
1365     * We don't keep css_sets locked across iteration steps and thus
1366     @@ -4183,9 +4191,20 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
1367     list_add(&it->iters_node, &cset->task_iters);
1368     }
1369    
1370     +static void css_task_iter_skip(struct css_task_iter *it,
1371     + struct task_struct *task)
1372     +{
1373     + lockdep_assert_held(&css_set_lock);
1374     +
1375     + if (it->task_pos == &task->cg_list) {
1376     + it->task_pos = it->task_pos->next;
1377     + it->flags |= CSS_TASK_ITER_SKIPPED;
1378     + }
1379     +}
1380     +
1381     static void css_task_iter_advance(struct css_task_iter *it)
1382     {
1383     - struct list_head *next;
1384     + struct task_struct *task;
1385    
1386     lockdep_assert_held(&css_set_lock);
1387     repeat:
1388     @@ -4195,25 +4214,40 @@ repeat:
1389     * consumed first and then ->mg_tasks. After ->mg_tasks,
1390     * we move onto the next cset.
1391     */
1392     - next = it->task_pos->next;
1393     -
1394     - if (next == it->tasks_head)
1395     - next = it->mg_tasks_head->next;
1396     + if (it->flags & CSS_TASK_ITER_SKIPPED)
1397     + it->flags &= ~CSS_TASK_ITER_SKIPPED;
1398     + else
1399     + it->task_pos = it->task_pos->next;
1400    
1401     - if (next == it->mg_tasks_head)
1402     + if (it->task_pos == it->tasks_head)
1403     + it->task_pos = it->mg_tasks_head->next;
1404     + if (it->task_pos == it->mg_tasks_head)
1405     + it->task_pos = it->dying_tasks_head->next;
1406     + if (it->task_pos == it->dying_tasks_head)
1407     css_task_iter_advance_css_set(it);
1408     - else
1409     - it->task_pos = next;
1410     } else {
1411     /* called from start, proceed to the first cset */
1412     css_task_iter_advance_css_set(it);
1413     }
1414    
1415     - /* if PROCS, skip over tasks which aren't group leaders */
1416     - if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
1417     - !thread_group_leader(list_entry(it->task_pos, struct task_struct,
1418     - cg_list)))
1419     - goto repeat;
1420     + if (!it->task_pos)
1421     + return;
1422     +
1423     + task = list_entry(it->task_pos, struct task_struct, cg_list);
1424     +
1425     + if (it->flags & CSS_TASK_ITER_PROCS) {
1426     + /* if PROCS, skip over tasks which aren't group leaders */
1427     + if (!thread_group_leader(task))
1428     + goto repeat;
1429     +
1430     + /* and dying leaders w/o live member threads */
1431     + if (!atomic_read(&task->signal->live))
1432     + goto repeat;
1433     + } else {
1434     + /* skip all dying ones */
1435     + if (task->flags & PF_EXITING)
1436     + goto repeat;
1437     + }
1438     }
1439    
1440     /**
1441     @@ -4269,6 +4303,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
1442    
1443     spin_lock_irq(&css_set_lock);
1444    
1445     + /* @it may be half-advanced by skips, finish advancing */
1446     + if (it->flags & CSS_TASK_ITER_SKIPPED)
1447     + css_task_iter_advance(it);
1448     +
1449     if (it->task_pos) {
1450     it->cur_task = list_entry(it->task_pos, struct task_struct,
1451     cg_list);
1452     @@ -5670,6 +5708,7 @@ void cgroup_exit(struct task_struct *tsk)
1453     if (!list_empty(&tsk->cg_list)) {
1454     spin_lock_irq(&css_set_lock);
1455     css_set_move_task(tsk, cset, NULL, false);
1456     + list_add_tail(&tsk->cg_list, &cset->dying_tasks);
1457     cset->nr_tasks--;
1458     spin_unlock_irq(&css_set_lock);
1459     } else {
1460     @@ -5690,6 +5729,13 @@ void cgroup_release(struct task_struct *task)
1461     do_each_subsys_mask(ss, ssid, have_release_callback) {
1462     ss->release(task);
1463     } while_each_subsys_mask();
1464     +
1465     + if (use_task_css_set_links) {
1466     + spin_lock_irq(&css_set_lock);
1467     + css_set_skip_task_iters(task_css_set(task), task);
1468     + list_del_init(&task->cg_list);
1469     + spin_unlock_irq(&css_set_lock);
1470     + }
1471     }
1472    
1473     void cgroup_free(struct task_struct *task)
1474     diff --git a/kernel/exit.c b/kernel/exit.c
1475     index 5c0964dc805a..e10de9836dd7 100644
1476     --- a/kernel/exit.c
1477     +++ b/kernel/exit.c
1478     @@ -194,6 +194,7 @@ repeat:
1479     rcu_read_unlock();
1480    
1481     proc_flush_task(p);
1482     + cgroup_release(p);
1483    
1484     write_lock_irq(&tasklist_lock);
1485     ptrace_release_task(p);
1486     @@ -219,7 +220,6 @@ repeat:
1487     }
1488    
1489     write_unlock_irq(&tasklist_lock);
1490     - cgroup_release(p);
1491     release_thread(p);
1492     call_rcu(&p->rcu, delayed_put_task_struct);
1493    
1494     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
1495     index fb54d32321ec..6a362da211e1 100644
1496     --- a/net/bridge/br_multicast.c
1497     +++ b/net/bridge/br_multicast.c
1498     @@ -1621,6 +1621,9 @@ br_multicast_leave_group(struct net_bridge *br,
1499     if (!br_port_group_equal(p, port, src))
1500     continue;
1501    
1502     + if (p->flags & MDB_PG_FLAGS_PERMANENT)
1503     + break;
1504     +
1505     rcu_assign_pointer(*pp, p->next);
1506     hlist_del_init(&p->mglist);
1507     del_timer(&p->timer);
1508     diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
1509     index 7df269092103..5f3950f00f73 100644
1510     --- a/net/bridge/br_vlan.c
1511     +++ b/net/bridge/br_vlan.c
1512     @@ -677,6 +677,11 @@ void br_vlan_flush(struct net_bridge *br)
1513    
1514     ASSERT_RTNL();
1515    
1516     + /* delete auto-added default pvid local fdb before flushing vlans
1517     + * otherwise it will be leaked on bridge device init failure
1518     + */
1519     + br_fdb_delete_by_port(br, NULL, 0, 1);
1520     +
1521     vg = br_vlan_group(br);
1522     __vlan_flush(vg);
1523     RCU_INIT_POINTER(br->vlgrp, NULL);
1524     diff --git a/net/core/dev.c b/net/core/dev.c
1525     index 138951d28643..e4b4cb40da00 100644
1526     --- a/net/core/dev.c
1527     +++ b/net/core/dev.c
1528     @@ -9510,6 +9510,8 @@ static void __net_exit default_device_exit(struct net *net)
1529    
1530     /* Push remaining network devices to init_net */
1531     snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
1532     + if (__dev_get_by_name(&init_net, fb_name))
1533     + snprintf(fb_name, IFNAMSIZ, "dev%%d");
1534     err = dev_change_net_namespace(dev, &init_net, fb_name);
1535     if (err) {
1536     pr_emerg("%s: failed to move %s to init_net: %d\n",
1537     diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
1538     index c891235b4966..4368282eb6f8 100644
1539     --- a/net/ipv4/ipip.c
1540     +++ b/net/ipv4/ipip.c
1541     @@ -281,6 +281,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
1542     const struct iphdr *tiph = &tunnel->parms.iph;
1543     u8 ipproto;
1544    
1545     + if (!pskb_inet_may_pull(skb))
1546     + goto tx_error;
1547     +
1548     switch (skb->protocol) {
1549     case htons(ETH_P_IP):
1550     ipproto = IPPROTO_IPIP;
1551     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
1552     index 01ecd510014f..a53ef079a539 100644
1553     --- a/net/ipv6/ip6_gre.c
1554     +++ b/net/ipv6/ip6_gre.c
1555     @@ -680,12 +680,13 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
1556     struct flowi6 *fl6, __u8 *dsfield,
1557     int *encap_limit)
1558     {
1559     - struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1560     + struct ipv6hdr *ipv6h;
1561     struct ip6_tnl *t = netdev_priv(dev);
1562     __u16 offset;
1563    
1564     offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1565     /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1566     + ipv6h = ipv6_hdr(skb);
1567    
1568     if (offset > 0) {
1569     struct ipv6_tlv_tnl_enc_lim *tel;
1570     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1571     index ade1390c6348..d0ad85b8650d 100644
1572     --- a/net/ipv6/ip6_tunnel.c
1573     +++ b/net/ipv6/ip6_tunnel.c
1574     @@ -1283,12 +1283,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1575     }
1576    
1577     fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1578     + dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1579    
1580     if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1581     return -1;
1582    
1583     - dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1584     -
1585     skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1586    
1587     err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1588     @@ -1372,12 +1371,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1589     }
1590    
1591     fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1592     + dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1593    
1594     if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1595     return -1;
1596    
1597     - dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1598     -
1599     skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1600    
1601     err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1602     diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
1603     index 04d9946dcdba..c0956781665e 100644
1604     --- a/net/l2tp/l2tp_ppp.c
1605     +++ b/net/l2tp/l2tp_ppp.c
1606     @@ -1686,6 +1686,9 @@ static const struct proto_ops pppol2tp_ops = {
1607     .recvmsg = pppol2tp_recvmsg,
1608     .mmap = sock_no_mmap,
1609     .ioctl = pppox_ioctl,
1610     +#ifdef CONFIG_COMPAT
1611     + .compat_ioctl = pppox_compat_ioctl,
1612     +#endif
1613     };
1614    
1615     static const struct pppox_proto pppol2tp_proto = {
1616     diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
1617     index 0c68bc9cf0b4..20fae5ca87fa 100644
1618     --- a/net/sched/act_bpf.c
1619     +++ b/net/sched/act_bpf.c
1620     @@ -287,6 +287,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
1621     struct tcf_bpf *prog;
1622     bool is_bpf, is_ebpf;
1623     int ret, res = 0;
1624     + u32 index;
1625    
1626     if (!nla)
1627     return -EINVAL;
1628     @@ -299,13 +300,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
1629     return -EINVAL;
1630    
1631     parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
1632     -
1633     - ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
1634     + index = parm->index;
1635     + ret = tcf_idr_check_alloc(tn, &index, act, bind);
1636     if (!ret) {
1637     - ret = tcf_idr_create(tn, parm->index, est, act,
1638     + ret = tcf_idr_create(tn, index, est, act,
1639     &act_bpf_ops, bind, true);
1640     if (ret < 0) {
1641     - tcf_idr_cleanup(tn, parm->index);
1642     + tcf_idr_cleanup(tn, index);
1643     return ret;
1644     }
1645    
1646     diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
1647     index 6f0f273f1139..605436747978 100644
1648     --- a/net/sched/act_connmark.c
1649     +++ b/net/sched/act_connmark.c
1650     @@ -104,6 +104,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
1651     struct tcf_connmark_info *ci;
1652     struct tc_connmark *parm;
1653     int ret = 0;
1654     + u32 index;
1655    
1656     if (!nla)
1657     return -EINVAL;
1658     @@ -117,13 +118,13 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
1659     return -EINVAL;
1660    
1661     parm = nla_data(tb[TCA_CONNMARK_PARMS]);
1662     -
1663     - ret = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1664     + index = parm->index;
1665     + ret = tcf_idr_check_alloc(tn, &index, a, bind);
1666     if (!ret) {
1667     - ret = tcf_idr_create(tn, parm->index, est, a,
1668     + ret = tcf_idr_create(tn, index, est, a,
1669     &act_connmark_ops, bind, false);
1670     if (ret) {
1671     - tcf_idr_cleanup(tn, parm->index);
1672     + tcf_idr_cleanup(tn, index);
1673     return ret;
1674     }
1675    
1676     diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
1677     index b8a67ae3105a..40437197e053 100644
1678     --- a/net/sched/act_csum.c
1679     +++ b/net/sched/act_csum.c
1680     @@ -55,6 +55,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
1681     struct tc_csum *parm;
1682     struct tcf_csum *p;
1683     int ret = 0, err;
1684     + u32 index;
1685    
1686     if (nla == NULL)
1687     return -EINVAL;
1688     @@ -66,13 +67,13 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
1689     if (tb[TCA_CSUM_PARMS] == NULL)
1690     return -EINVAL;
1691     parm = nla_data(tb[TCA_CSUM_PARMS]);
1692     -
1693     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1694     + index = parm->index;
1695     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1696     if (!err) {
1697     - ret = tcf_idr_create(tn, parm->index, est, a,
1698     + ret = tcf_idr_create(tn, index, est, a,
1699     &act_csum_ops, bind, true);
1700     if (ret) {
1701     - tcf_idr_cleanup(tn, parm->index);
1702     + tcf_idr_cleanup(tn, index);
1703     return ret;
1704     }
1705     ret = ACT_P_CREATED;
1706     diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
1707     index cd1d9bd32ef9..72d3347bdd41 100644
1708     --- a/net/sched/act_gact.c
1709     +++ b/net/sched/act_gact.c
1710     @@ -64,6 +64,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
1711     struct tc_gact *parm;
1712     struct tcf_gact *gact;
1713     int ret = 0;
1714     + u32 index;
1715     int err;
1716     #ifdef CONFIG_GACT_PROB
1717     struct tc_gact_p *p_parm = NULL;
1718     @@ -79,6 +80,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
1719     if (tb[TCA_GACT_PARMS] == NULL)
1720     return -EINVAL;
1721     parm = nla_data(tb[TCA_GACT_PARMS]);
1722     + index = parm->index;
1723    
1724     #ifndef CONFIG_GACT_PROB
1725     if (tb[TCA_GACT_PROB] != NULL)
1726     @@ -91,12 +93,12 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
1727     }
1728     #endif
1729    
1730     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1731     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1732     if (!err) {
1733     - ret = tcf_idr_create(tn, parm->index, est, a,
1734     + ret = tcf_idr_create(tn, index, est, a,
1735     &act_gact_ops, bind, true);
1736     if (ret) {
1737     - tcf_idr_cleanup(tn, parm->index);
1738     + tcf_idr_cleanup(tn, index);
1739     return ret;
1740     }
1741     ret = ACT_P_CREATED;
1742     diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
1743     index 06a3d4801878..24047e0e5db0 100644
1744     --- a/net/sched/act_ife.c
1745     +++ b/net/sched/act_ife.c
1746     @@ -482,8 +482,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
1747     u8 *saddr = NULL;
1748     bool exists = false;
1749     int ret = 0;
1750     + u32 index;
1751     int err;
1752    
1753     + if (!nla) {
1754     + NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
1755     + return -EINVAL;
1756     + }
1757     +
1758     err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
1759     if (err < 0)
1760     return err;
1761     @@ -504,7 +510,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
1762     if (!p)
1763     return -ENOMEM;
1764    
1765     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1766     + index = parm->index;
1767     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1768     if (err < 0) {
1769     kfree(p);
1770     return err;
1771     @@ -516,10 +523,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
1772     }
1773    
1774     if (!exists) {
1775     - ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
1776     + ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
1777     bind, true);
1778     if (ret) {
1779     - tcf_idr_cleanup(tn, parm->index);
1780     + tcf_idr_cleanup(tn, index);
1781     kfree(p);
1782     return ret;
1783     }
1784     diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
1785     index f767e78e38c9..548614bd9366 100644
1786     --- a/net/sched/act_mirred.c
1787     +++ b/net/sched/act_mirred.c
1788     @@ -104,6 +104,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
1789     struct net_device *dev;
1790     bool exists = false;
1791     int ret, err;
1792     + u32 index;
1793    
1794     if (!nla) {
1795     NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
1796     @@ -117,8 +118,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
1797     return -EINVAL;
1798     }
1799     parm = nla_data(tb[TCA_MIRRED_PARMS]);
1800     -
1801     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1802     + index = parm->index;
1803     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1804     if (err < 0)
1805     return err;
1806     exists = err;
1807     @@ -135,21 +136,21 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
1808     if (exists)
1809     tcf_idr_release(*a, bind);
1810     else
1811     - tcf_idr_cleanup(tn, parm->index);
1812     + tcf_idr_cleanup(tn, index);
1813     NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
1814     return -EINVAL;
1815     }
1816    
1817     if (!exists) {
1818     if (!parm->ifindex) {
1819     - tcf_idr_cleanup(tn, parm->index);
1820     + tcf_idr_cleanup(tn, index);
1821     NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
1822     return -EINVAL;
1823     }
1824     - ret = tcf_idr_create(tn, parm->index, est, a,
1825     + ret = tcf_idr_create(tn, index, est, a,
1826     &act_mirred_ops, bind, true);
1827     if (ret) {
1828     - tcf_idr_cleanup(tn, parm->index);
1829     + tcf_idr_cleanup(tn, index);
1830     return ret;
1831     }
1832     ret = ACT_P_CREATED;
1833     diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
1834     index 4313aa102440..619828920b97 100644
1835     --- a/net/sched/act_nat.c
1836     +++ b/net/sched/act_nat.c
1837     @@ -45,6 +45,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
1838     struct tc_nat *parm;
1839     int ret = 0, err;
1840     struct tcf_nat *p;
1841     + u32 index;
1842    
1843     if (nla == NULL)
1844     return -EINVAL;
1845     @@ -56,13 +57,13 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
1846     if (tb[TCA_NAT_PARMS] == NULL)
1847     return -EINVAL;
1848     parm = nla_data(tb[TCA_NAT_PARMS]);
1849     -
1850     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1851     + index = parm->index;
1852     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1853     if (!err) {
1854     - ret = tcf_idr_create(tn, parm->index, est, a,
1855     + ret = tcf_idr_create(tn, index, est, a,
1856     &act_nat_ops, bind, false);
1857     if (ret) {
1858     - tcf_idr_cleanup(tn, parm->index);
1859     + tcf_idr_cleanup(tn, index);
1860     return ret;
1861     }
1862     ret = ACT_P_CREATED;
1863     diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
1864     index ca535a8585bc..82d258b2a75a 100644
1865     --- a/net/sched/act_pedit.c
1866     +++ b/net/sched/act_pedit.c
1867     @@ -149,6 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
1868     struct tcf_pedit *p;
1869     int ret = 0, err;
1870     int ksize;
1871     + u32 index;
1872    
1873     if (!nla) {
1874     NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
1875     @@ -178,18 +179,19 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
1876     if (IS_ERR(keys_ex))
1877     return PTR_ERR(keys_ex);
1878    
1879     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1880     + index = parm->index;
1881     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1882     if (!err) {
1883     if (!parm->nkeys) {
1884     - tcf_idr_cleanup(tn, parm->index);
1885     + tcf_idr_cleanup(tn, index);
1886     NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
1887     ret = -EINVAL;
1888     goto out_free;
1889     }
1890     - ret = tcf_idr_create(tn, parm->index, est, a,
1891     + ret = tcf_idr_create(tn, index, est, a,
1892     &act_pedit_ops, bind, false);
1893     if (ret) {
1894     - tcf_idr_cleanup(tn, parm->index);
1895     + tcf_idr_cleanup(tn, index);
1896     goto out_free;
1897     }
1898     ret = ACT_P_CREATED;
1899     diff --git a/net/sched/act_police.c b/net/sched/act_police.c
1900     index 5d8bfa878477..997c34db1491 100644
1901     --- a/net/sched/act_police.c
1902     +++ b/net/sched/act_police.c
1903     @@ -85,6 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
1904     struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
1905     struct tc_action_net *tn = net_generic(net, police_net_id);
1906     bool exists = false;
1907     + u32 index;
1908     int size;
1909    
1910     if (nla == NULL)
1911     @@ -101,7 +102,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
1912     return -EINVAL;
1913    
1914     parm = nla_data(tb[TCA_POLICE_TBF]);
1915     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1916     + index = parm->index;
1917     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1918     if (err < 0)
1919     return err;
1920     exists = err;
1921     @@ -109,10 +111,10 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
1922     return 0;
1923    
1924     if (!exists) {
1925     - ret = tcf_idr_create(tn, parm->index, NULL, a,
1926     + ret = tcf_idr_create(tn, index, NULL, a,
1927     &act_police_ops, bind, false);
1928     if (ret) {
1929     - tcf_idr_cleanup(tn, parm->index);
1930     + tcf_idr_cleanup(tn, index);
1931     return ret;
1932     }
1933     ret = ACT_P_CREATED;
1934     diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
1935     index c7f5d630d97c..ac37654ca292 100644
1936     --- a/net/sched/act_sample.c
1937     +++ b/net/sched/act_sample.c
1938     @@ -43,7 +43,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
1939     struct tc_action_net *tn = net_generic(net, sample_net_id);
1940     struct nlattr *tb[TCA_SAMPLE_MAX + 1];
1941     struct psample_group *psample_group;
1942     - u32 psample_group_num, rate;
1943     + u32 psample_group_num, rate, index;
1944     struct tc_sample *parm;
1945     struct tcf_sample *s;
1946     bool exists = false;
1947     @@ -59,8 +59,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
1948     return -EINVAL;
1949    
1950     parm = nla_data(tb[TCA_SAMPLE_PARMS]);
1951     -
1952     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1953     + index = parm->index;
1954     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1955     if (err < 0)
1956     return err;
1957     exists = err;
1958     @@ -68,10 +68,10 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
1959     return 0;
1960    
1961     if (!exists) {
1962     - ret = tcf_idr_create(tn, parm->index, est, a,
1963     + ret = tcf_idr_create(tn, index, est, a,
1964     &act_sample_ops, bind, true);
1965     if (ret) {
1966     - tcf_idr_cleanup(tn, parm->index);
1967     + tcf_idr_cleanup(tn, index);
1968     return ret;
1969     }
1970     ret = ACT_P_CREATED;
1971     diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
1972     index 52400d49f81f..658efae71a09 100644
1973     --- a/net/sched/act_simple.c
1974     +++ b/net/sched/act_simple.c
1975     @@ -88,6 +88,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
1976     struct tcf_defact *d;
1977     bool exists = false;
1978     int ret = 0, err;
1979     + u32 index;
1980    
1981     if (nla == NULL)
1982     return -EINVAL;
1983     @@ -100,7 +101,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
1984     return -EINVAL;
1985    
1986     parm = nla_data(tb[TCA_DEF_PARMS]);
1987     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
1988     + index = parm->index;
1989     + err = tcf_idr_check_alloc(tn, &index, a, bind);
1990     if (err < 0)
1991     return err;
1992     exists = err;
1993     @@ -111,15 +113,15 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
1994     if (exists)
1995     tcf_idr_release(*a, bind);
1996     else
1997     - tcf_idr_cleanup(tn, parm->index);
1998     + tcf_idr_cleanup(tn, index);
1999     return -EINVAL;
2000     }
2001    
2002     if (!exists) {
2003     - ret = tcf_idr_create(tn, parm->index, est, a,
2004     + ret = tcf_idr_create(tn, index, est, a,
2005     &act_simp_ops, bind, false);
2006     if (ret) {
2007     - tcf_idr_cleanup(tn, parm->index);
2008     + tcf_idr_cleanup(tn, index);
2009     return ret;
2010     }
2011    
2012     diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
2013     index 86d90fc5e97e..7709710a41f7 100644
2014     --- a/net/sched/act_skbedit.c
2015     +++ b/net/sched/act_skbedit.c
2016     @@ -107,6 +107,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
2017     u16 *queue_mapping = NULL, *ptype = NULL;
2018     bool exists = false;
2019     int ret = 0, err;
2020     + u32 index;
2021    
2022     if (nla == NULL)
2023     return -EINVAL;
2024     @@ -153,8 +154,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
2025     }
2026    
2027     parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
2028     -
2029     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
2030     + index = parm->index;
2031     + err = tcf_idr_check_alloc(tn, &index, a, bind);
2032     if (err < 0)
2033     return err;
2034     exists = err;
2035     @@ -165,15 +166,15 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
2036     if (exists)
2037     tcf_idr_release(*a, bind);
2038     else
2039     - tcf_idr_cleanup(tn, parm->index);
2040     + tcf_idr_cleanup(tn, index);
2041     return -EINVAL;
2042     }
2043    
2044     if (!exists) {
2045     - ret = tcf_idr_create(tn, parm->index, est, a,
2046     + ret = tcf_idr_create(tn, index, est, a,
2047     &act_skbedit_ops, bind, true);
2048     if (ret) {
2049     - tcf_idr_cleanup(tn, parm->index);
2050     + tcf_idr_cleanup(tn, index);
2051     return ret;
2052     }
2053    
2054     diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
2055     index 588077fafd6c..3038493d18ca 100644
2056     --- a/net/sched/act_skbmod.c
2057     +++ b/net/sched/act_skbmod.c
2058     @@ -88,12 +88,12 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
2059     struct nlattr *tb[TCA_SKBMOD_MAX + 1];
2060     struct tcf_skbmod_params *p, *p_old;
2061     struct tc_skbmod *parm;
2062     + u32 lflags = 0, index;
2063     struct tcf_skbmod *d;
2064     bool exists = false;
2065     u8 *daddr = NULL;
2066     u8 *saddr = NULL;
2067     u16 eth_type = 0;
2068     - u32 lflags = 0;
2069     int ret = 0, err;
2070    
2071     if (!nla)
2072     @@ -122,10 +122,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
2073     }
2074    
2075     parm = nla_data(tb[TCA_SKBMOD_PARMS]);
2076     + index = parm->index;
2077     if (parm->flags & SKBMOD_F_SWAPMAC)
2078     lflags = SKBMOD_F_SWAPMAC;
2079    
2080     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
2081     + err = tcf_idr_check_alloc(tn, &index, a, bind);
2082     if (err < 0)
2083     return err;
2084     exists = err;
2085     @@ -136,15 +137,15 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
2086     if (exists)
2087     tcf_idr_release(*a, bind);
2088     else
2089     - tcf_idr_cleanup(tn, parm->index);
2090     + tcf_idr_cleanup(tn, index);
2091     return -EINVAL;
2092     }
2093    
2094     if (!exists) {
2095     - ret = tcf_idr_create(tn, parm->index, est, a,
2096     + ret = tcf_idr_create(tn, index, est, a,
2097     &act_skbmod_ops, bind, true);
2098     if (ret) {
2099     - tcf_idr_cleanup(tn, parm->index);
2100     + tcf_idr_cleanup(tn, index);
2101     return ret;
2102     }
2103    
2104     diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
2105     index 72d9c432e8b4..66bfe57e74ae 100644
2106     --- a/net/sched/act_tunnel_key.c
2107     +++ b/net/sched/act_tunnel_key.c
2108     @@ -224,6 +224,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
2109     __be16 flags;
2110     u8 tos, ttl;
2111     int ret = 0;
2112     + u32 index;
2113     int err;
2114    
2115     if (!nla) {
2116     @@ -244,7 +245,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
2117     }
2118    
2119     parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
2120     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
2121     + index = parm->index;
2122     + err = tcf_idr_check_alloc(tn, &index, a, bind);
2123     if (err < 0)
2124     return err;
2125     exists = err;
2126     @@ -338,7 +340,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
2127     }
2128    
2129     if (!exists) {
2130     - ret = tcf_idr_create(tn, parm->index, est, a,
2131     + ret = tcf_idr_create(tn, index, est, a,
2132     &act_tunnel_key_ops, bind, true);
2133     if (ret) {
2134     NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
2135     @@ -384,7 +386,7 @@ err_out:
2136     if (exists)
2137     tcf_idr_release(*a, bind);
2138     else
2139     - tcf_idr_cleanup(tn, parm->index);
2140     + tcf_idr_cleanup(tn, index);
2141     return ret;
2142     }
2143    
2144     diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
2145     index 033d273afe50..da993edd2e40 100644
2146     --- a/net/sched/act_vlan.c
2147     +++ b/net/sched/act_vlan.c
2148     @@ -118,6 +118,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
2149     u8 push_prio = 0;
2150     bool exists = false;
2151     int ret = 0, err;
2152     + u32 index;
2153    
2154     if (!nla)
2155     return -EINVAL;
2156     @@ -129,7 +130,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
2157     if (!tb[TCA_VLAN_PARMS])
2158     return -EINVAL;
2159     parm = nla_data(tb[TCA_VLAN_PARMS]);
2160     - err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
2161     + index = parm->index;
2162     + err = tcf_idr_check_alloc(tn, &index, a, bind);
2163     if (err < 0)
2164     return err;
2165     exists = err;
2166     @@ -145,7 +147,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
2167     if (exists)
2168     tcf_idr_release(*a, bind);
2169     else
2170     - tcf_idr_cleanup(tn, parm->index);
2171     + tcf_idr_cleanup(tn, index);
2172     return -EINVAL;
2173     }
2174     push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
2175     @@ -153,7 +155,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
2176     if (exists)
2177     tcf_idr_release(*a, bind);
2178     else
2179     - tcf_idr_cleanup(tn, parm->index);
2180     + tcf_idr_cleanup(tn, index);
2181     return -ERANGE;
2182     }
2183    
2184     @@ -167,7 +169,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
2185     if (exists)
2186     tcf_idr_release(*a, bind);
2187     else
2188     - tcf_idr_cleanup(tn, parm->index);
2189     + tcf_idr_cleanup(tn, index);
2190     return -EPROTONOSUPPORT;
2191     }
2192     } else {
2193     @@ -181,16 +183,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
2194     if (exists)
2195     tcf_idr_release(*a, bind);
2196     else
2197     - tcf_idr_cleanup(tn, parm->index);
2198     + tcf_idr_cleanup(tn, index);
2199     return -EINVAL;
2200     }
2201     action = parm->v_action;
2202    
2203     if (!exists) {
2204     - ret = tcf_idr_create(tn, parm->index, est, a,
2205     + ret = tcf_idr_create(tn, index, est, a,
2206     &act_vlan_ops, bind, true);
2207     if (ret) {
2208     - tcf_idr_cleanup(tn, parm->index);
2209     + tcf_idr_cleanup(tn, index);
2210     return ret;
2211     }
2212    
2213     @@ -296,6 +298,14 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
2214     return tcf_idr_search(tn, a, index);
2215     }
2216    
2217     +static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
2218     +{
2219     + return nla_total_size(sizeof(struct tc_vlan))
2220     + + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
2221     + + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
2222     + + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
2223     +}
2224     +
2225     static struct tc_action_ops act_vlan_ops = {
2226     .kind = "vlan",
2227     .type = TCA_ACT_VLAN,
2228     @@ -305,6 +315,7 @@ static struct tc_action_ops act_vlan_ops = {
2229     .init = tcf_vlan_init,
2230     .cleanup = tcf_vlan_cleanup,
2231     .walk = tcf_vlan_walker,
2232     + .get_fill_size = tcf_vlan_get_fill_size,
2233     .lookup = tcf_vlan_search,
2234     .size = sizeof(struct tcf_vlan),
2235     };
2236     diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
2237     index 17cd81f84b5d..77fae0b7c6ee 100644
2238     --- a/net/sched/sch_codel.c
2239     +++ b/net/sched/sch_codel.c
2240     @@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
2241     struct Qdisc *sch = ctx;
2242     struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
2243    
2244     - if (skb)
2245     + if (skb) {
2246     sch->qstats.backlog -= qdisc_pkt_len(skb);
2247     -
2248     - prefetch(&skb->end); /* we'll need skb_shinfo() */
2249     + prefetch(&skb->end); /* we'll need skb_shinfo() */
2250     + }
2251     return skb;
2252     }
2253    
2254     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
2255     index 9bbab6ba2dab..26dcd02b2d0c 100644
2256     --- a/net/smc/af_smc.c
2257     +++ b/net/smc/af_smc.c
2258     @@ -1680,14 +1680,18 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
2259     }
2260     break;
2261     case TCP_NODELAY:
2262     - if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
2263     + if (sk->sk_state != SMC_INIT &&
2264     + sk->sk_state != SMC_LISTEN &&
2265     + sk->sk_state != SMC_CLOSED) {
2266     if (val && !smc->use_fallback)
2267     mod_delayed_work(system_wq, &smc->conn.tx_work,
2268     0);
2269     }
2270     break;
2271     case TCP_CORK:
2272     - if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
2273     + if (sk->sk_state != SMC_INIT &&
2274     + sk->sk_state != SMC_LISTEN &&
2275     + sk->sk_state != SMC_CLOSED) {
2276     if (!val && !smc->use_fallback)
2277     mod_delayed_work(system_wq, &smc->conn.tx_work,
2278     0);
2279     diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
2280     index 85ebb675600c..318c541970ec 100644
2281     --- a/net/tipc/netlink_compat.c
2282     +++ b/net/tipc/netlink_compat.c
2283     @@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
2284     int rep_type;
2285     int rep_size;
2286     int req_type;
2287     + int req_size;
2288     struct net *net;
2289     struct sk_buff *rep;
2290     struct tlv_desc *req;
2291     @@ -257,7 +258,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
2292     int err;
2293     struct sk_buff *arg;
2294    
2295     - if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
2296     + if (msg->req_type && (!msg->req_size ||
2297     + !TLV_CHECK_TYPE(msg->req, msg->req_type)))
2298     return -EINVAL;
2299    
2300     msg->rep = tipc_tlv_alloc(msg->rep_size);
2301     @@ -354,7 +356,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
2302     {
2303     int err;
2304    
2305     - if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
2306     + if (msg->req_type && (!msg->req_size ||
2307     + !TLV_CHECK_TYPE(msg->req, msg->req_type)))
2308     return -EINVAL;
2309    
2310     err = __tipc_nl_compat_doit(cmd, msg);
2311     @@ -1276,8 +1279,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
2312     goto send;
2313     }
2314    
2315     - len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
2316     - if (!len || !TLV_OK(msg.req, len)) {
2317     + msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
2318     + if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
2319     msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
2320     err = -EOPNOTSUPP;
2321     goto send;