Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0324-4.9.225-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3599 - (hide annotations) (download)
Thu Aug 13 10:21:39 2020 UTC (3 years, 9 months ago) by niro
File size: 94351 byte(s)
linux-225
1 niro 3599 diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
2     index 4650a00ed012..9bc271cdc9a8 100644
3     --- a/Documentation/networking/l2tp.txt
4     +++ b/Documentation/networking/l2tp.txt
5     @@ -177,10 +177,10 @@ setsockopt on the PPPoX socket to set a debug mask.
6    
7     The following debug mask bits are available:
8    
9     -PPPOL2TP_MSG_DEBUG verbose debug (if compiled in)
10     -PPPOL2TP_MSG_CONTROL userspace - kernel interface
11     -PPPOL2TP_MSG_SEQ sequence numbers handling
12     -PPPOL2TP_MSG_DATA data packets
13     +L2TP_MSG_DEBUG verbose debug (if compiled in)
14     +L2TP_MSG_CONTROL userspace - kernel interface
15     +L2TP_MSG_SEQ sequence numbers handling
16     +L2TP_MSG_DATA data packets
17    
18     If enabled, files under a l2tp debugfs directory can be used to dump
19     kernel state about L2TP tunnels and sessions. To access it, the
20     diff --git a/Makefile b/Makefile
21     index 3e58c142f92f..d17a2ad3cc4d 100644
22     --- a/Makefile
23     +++ b/Makefile
24     @@ -1,6 +1,6 @@
25     VERSION = 4
26     PATCHLEVEL = 9
27     -SUBLEVEL = 224
28     +SUBLEVEL = 225
29     EXTRAVERSION =
30     NAME = Roaring Lionus
31    
32     diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
33     index cc414382dab4..561b2ba6bc28 100644
34     --- a/arch/arm/include/asm/futex.h
35     +++ b/arch/arm/include/asm/futex.h
36     @@ -162,8 +162,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
37     preempt_enable();
38     #endif
39    
40     - if (!ret)
41     - *oval = oldval;
42     + /*
43     + * Store unconditionally. If ret != 0 the extra store is the least
44     + * of the worries but GCC cannot figure out that __futex_atomic_op()
45     + * is either setting ret to -EFAULT or storing the old value in
46     + * oldval which results in a uninitialized warning at the call site.
47     + */
48     + *oval = oldval;
49    
50     return ret;
51     }
52     diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
53     index bc96c8a7fc79..3e4b778f16a5 100644
54     --- a/arch/arm64/kernel/machine_kexec.c
55     +++ b/arch/arm64/kernel/machine_kexec.c
56     @@ -177,7 +177,8 @@ void machine_kexec(struct kimage *kimage)
57     /* Flush the reboot_code_buffer in preparation for its execution. */
58     __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
59     flush_icache_range((uintptr_t)reboot_code_buffer,
60     - arm64_relocate_new_kernel_size);
61     + (uintptr_t)reboot_code_buffer +
62     + arm64_relocate_new_kernel_size);
63    
64     /* Flush the kimage list and its buffers. */
65     kexec_list_flush(kimage);
66     diff --git a/drivers/base/component.c b/drivers/base/component.c
67     index 08da6160e94d..55f0856bd9b5 100644
68     --- a/drivers/base/component.c
69     +++ b/drivers/base/component.c
70     @@ -162,7 +162,8 @@ static int try_to_bring_up_master(struct master *master,
71     ret = master->ops->bind(master->dev);
72     if (ret < 0) {
73     devres_release_group(master->dev, NULL);
74     - dev_info(master->dev, "master bind failed: %d\n", ret);
75     + if (ret != -EPROBE_DEFER)
76     + dev_info(master->dev, "master bind failed: %d\n", ret);
77     return ret;
78     }
79    
80     @@ -431,8 +432,9 @@ static int component_bind(struct component *component, struct master *master,
81     devres_release_group(component->dev, NULL);
82     devres_release_group(master->dev, NULL);
83    
84     - dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
85     - dev_name(component->dev), component->ops, ret);
86     + if (ret != -EPROBE_DEFER)
87     + dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
88     + dev_name(component->dev), component->ops, ret);
89     }
90    
91     return ret;
92     diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
93     index 2d4aeba579f7..c16c06b3dd2f 100644
94     --- a/drivers/dma/tegra210-adma.c
95     +++ b/drivers/dma/tegra210-adma.c
96     @@ -793,7 +793,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
97     ret = dma_async_device_register(&tdma->dma_dev);
98     if (ret < 0) {
99     dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
100     - goto irq_dispose;
101     + goto rpm_put;
102     }
103    
104     ret = of_dma_controller_register(pdev->dev.of_node,
105     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
106     index 25c006338100..4630b58634d8 100644
107     --- a/drivers/hid/hid-ids.h
108     +++ b/drivers/hid/hid-ids.h
109     @@ -353,6 +353,7 @@
110     #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349
111     #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
112     #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
113     +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002
114    
115     #define USB_VENDOR_ID_ELAN 0x04f3
116    
117     diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
118     index fba655d639af..1207102823de 100644
119     --- a/drivers/hid/hid-multitouch.c
120     +++ b/drivers/hid/hid-multitouch.c
121     @@ -1332,6 +1332,9 @@ static const struct hid_device_id mt_devices[] = {
122     { .driver_data = MT_CLS_EGALAX_SERIAL,
123     MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
124     USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
125     + { .driver_data = MT_CLS_EGALAX,
126     + MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
127     + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
128    
129     /* Elitegroup panel */
130     { .driver_data = MT_CLS_SERIAL,
131     diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
132     index eaa312bc3a3c..c4066276eb7b 100644
133     --- a/drivers/i2c/i2c-dev.c
134     +++ b/drivers/i2c/i2c-dev.c
135     @@ -47,7 +47,7 @@
136     struct i2c_dev {
137     struct list_head list;
138     struct i2c_adapter *adap;
139     - struct device *dev;
140     + struct device dev;
141     struct cdev cdev;
142     };
143    
144     @@ -91,12 +91,14 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
145     return i2c_dev;
146     }
147    
148     -static void put_i2c_dev(struct i2c_dev *i2c_dev)
149     +static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev)
150     {
151     spin_lock(&i2c_dev_list_lock);
152     list_del(&i2c_dev->list);
153     spin_unlock(&i2c_dev_list_lock);
154     - kfree(i2c_dev);
155     + if (del_cdev)
156     + cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev);
157     + put_device(&i2c_dev->dev);
158     }
159    
160     static ssize_t name_show(struct device *dev,
161     @@ -542,6 +544,14 @@ static const struct file_operations i2cdev_fops = {
162    
163     static struct class *i2c_dev_class;
164    
165     +static void i2cdev_dev_release(struct device *dev)
166     +{
167     + struct i2c_dev *i2c_dev;
168     +
169     + i2c_dev = container_of(dev, struct i2c_dev, dev);
170     + kfree(i2c_dev);
171     +}
172     +
173     static int i2cdev_attach_adapter(struct device *dev, void *dummy)
174     {
175     struct i2c_adapter *adap;
176     @@ -558,27 +568,23 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
177    
178     cdev_init(&i2c_dev->cdev, &i2cdev_fops);
179     i2c_dev->cdev.owner = THIS_MODULE;
180     - res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1);
181     - if (res)
182     - goto error_cdev;
183     -
184     - /* register this i2c device with the driver core */
185     - i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
186     - MKDEV(I2C_MAJOR, adap->nr), NULL,
187     - "i2c-%d", adap->nr);
188     - if (IS_ERR(i2c_dev->dev)) {
189     - res = PTR_ERR(i2c_dev->dev);
190     - goto error;
191     +
192     + device_initialize(&i2c_dev->dev);
193     + i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr);
194     + i2c_dev->dev.class = i2c_dev_class;
195     + i2c_dev->dev.parent = &adap->dev;
196     + i2c_dev->dev.release = i2cdev_dev_release;
197     + dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
198     +
199     + res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
200     + if (res) {
201     + put_i2c_dev(i2c_dev, false);
202     + return res;
203     }
204    
205     pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
206     adap->name, adap->nr);
207     return 0;
208     -error:
209     - cdev_del(&i2c_dev->cdev);
210     -error_cdev:
211     - put_i2c_dev(i2c_dev);
212     - return res;
213     }
214    
215     static int i2cdev_detach_adapter(struct device *dev, void *dummy)
216     @@ -594,9 +600,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
217     if (!i2c_dev) /* attach_adapter must have failed */
218     return 0;
219    
220     - cdev_del(&i2c_dev->cdev);
221     - put_i2c_dev(i2c_dev);
222     - device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
223     + put_i2c_dev(i2c_dev, true);
224    
225     pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
226     return 0;
227     diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
228     index 3e6fe1760d82..a86c511c29e0 100644
229     --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
230     +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
231     @@ -270,6 +270,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
232     err_rollback_available:
233     device_remove_file(&pdev->dev, &dev_attr_available_masters);
234     err_rollback:
235     + i2c_demux_deactivate_master(priv);
236     for (j = 0; j < i; j++) {
237     of_node_put(priv->chan[j].parent_np);
238     of_changeset_destroy(&priv->chan[j].chgset);
239     diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
240     index c4ec7779b394..190a7c1c5604 100644
241     --- a/drivers/iio/dac/vf610_dac.c
242     +++ b/drivers/iio/dac/vf610_dac.c
243     @@ -235,6 +235,7 @@ static int vf610_dac_probe(struct platform_device *pdev)
244     return 0;
245    
246     error_iio_device_register:
247     + vf610_dac_exit(info);
248     clk_disable_unprepare(info->clk);
249    
250     return ret;
251     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
252     index e6ae8d123984..a3279f303b49 100644
253     --- a/drivers/iommu/amd_iommu_init.c
254     +++ b/drivers/iommu/amd_iommu_init.c
255     @@ -1171,8 +1171,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
256     }
257     case IVHD_DEV_ACPI_HID: {
258     u16 devid;
259     - u8 hid[ACPIHID_HID_LEN] = {0};
260     - u8 uid[ACPIHID_UID_LEN] = {0};
261     + u8 hid[ACPIHID_HID_LEN];
262     + u8 uid[ACPIHID_UID_LEN];
263     int ret;
264    
265     if (h->type != 0x40) {
266     @@ -1189,6 +1189,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
267     break;
268     }
269    
270     + uid[0] = '\0';
271     switch (e->uidf) {
272     case UID_NOT_PRESENT:
273    
274     @@ -1203,8 +1204,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
275     break;
276     case UID_IS_CHARACTER:
277    
278     - memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
279     - uid[ACPIHID_UID_LEN - 1] = '\0';
280     + memcpy(uid, &e->uid, e->uidl);
281     + uid[e->uidl] = '\0';
282    
283     break;
284     default:
285     diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
286     index 451d417eb451..1c8df33404b0 100644
287     --- a/drivers/misc/mei/client.c
288     +++ b/drivers/misc/mei/client.c
289     @@ -276,6 +276,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
290     down_write(&dev->me_clients_rwsem);
291     me_cl = __mei_me_cl_by_uuid(dev, uuid);
292     __mei_me_cl_del(dev, me_cl);
293     + mei_me_cl_put(me_cl);
294     up_write(&dev->me_clients_rwsem);
295     }
296    
297     @@ -297,6 +298,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
298     down_write(&dev->me_clients_rwsem);
299     me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
300     __mei_me_cl_del(dev, me_cl);
301     + mei_me_cl_put(me_cl);
302     up_write(&dev->me_clients_rwsem);
303     }
304    
305     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
306     index 5478a2ab45c4..54b5f61c8ed9 100644
307     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
308     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
309     @@ -2236,8 +2236,6 @@ static int cxgb_up(struct adapter *adap)
310     #if IS_ENABLED(CONFIG_IPV6)
311     update_clip(adap);
312     #endif
313     - /* Initialize hash mac addr list*/
314     - INIT_LIST_HEAD(&adap->mac_hlist);
315     return err;
316    
317     irq_err:
318     @@ -2259,6 +2257,7 @@ static void cxgb_down(struct adapter *adapter)
319    
320     t4_sge_stop(adapter);
321     t4_free_sge_resources(adapter);
322     +
323     adapter->flags &= ~FULL_INIT_DONE;
324     }
325    
326     @@ -4789,6 +4788,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
327     (is_t5(adapter->params.chip) ? STATMODE_V(0) :
328     T6_STATMODE_V(0)));
329    
330     + /* Initialize hash mac addr list */
331     + INIT_LIST_HEAD(&adapter->mac_hlist);
332     +
333     for_each_port(adapter, i) {
334     netdev = alloc_etherdev_mq(sizeof(struct port_info),
335     MAX_ETH_QSETS);
336     @@ -5067,6 +5069,7 @@ sriov:
337     static void remove_one(struct pci_dev *pdev)
338     {
339     struct adapter *adapter = pci_get_drvdata(pdev);
340     + struct hash_mac_addr *entry, *tmp;
341    
342     if (!adapter) {
343     pci_release_regions(pdev);
344     @@ -5105,6 +5108,12 @@ static void remove_one(struct pci_dev *pdev)
345     if (adapter->num_uld || adapter->num_ofld_uld)
346     t4_uld_mem_free(adapter);
347     free_some_resources(adapter);
348     + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
349     + list) {
350     + list_del(&entry->list);
351     + kfree(entry);
352     + }
353     +
354     #if IS_ENABLED(CONFIG_IPV6)
355     t4_cleanup_clip_tbl(adapter);
356     #endif
357     diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
358     index 9eb3071b69a4..17db5be9d2b7 100644
359     --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
360     +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
361     @@ -719,9 +719,6 @@ static int adapter_up(struct adapter *adapter)
362     if (adapter->flags & USING_MSIX)
363     name_msix_vecs(adapter);
364    
365     - /* Initialize hash mac addr list*/
366     - INIT_LIST_HEAD(&adapter->mac_hlist);
367     -
368     adapter->flags |= FULL_INIT_DONE;
369     }
370    
371     @@ -2902,6 +2899,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
372     if (err)
373     goto err_unmap_bar;
374    
375     + /* Initialize hash mac addr list */
376     + INIT_LIST_HEAD(&adapter->mac_hlist);
377     +
378     /*
379     * Allocate our "adapter ports" and stitch everything together.
380     */
381     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
382     index 7e35bd665630..90eab0521be1 100644
383     --- a/drivers/net/ethernet/intel/igb/igb_main.c
384     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
385     @@ -3395,7 +3395,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
386     tdba & 0x00000000ffffffffULL);
387     wr32(E1000_TDBAH(reg_idx), tdba >> 32);
388    
389     - ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
390     + ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
391     wr32(E1000_TDH(reg_idx), 0);
392     writel(0, ring->tail);
393    
394     @@ -3734,7 +3734,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
395     ring->count * sizeof(union e1000_adv_rx_desc));
396    
397     /* initialize head and tail */
398     - ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
399     + ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
400     wr32(E1000_RDH(reg_idx), 0);
401     writel(0, ring->tail);
402    
403     diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
404     index a9e8a7356c41..fe844888e0ed 100644
405     --- a/drivers/net/gtp.c
406     +++ b/drivers/net/gtp.c
407     @@ -1108,11 +1108,11 @@ static struct genl_family gtp_genl_family = {
408     };
409    
410     static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
411     - u32 type, struct pdp_ctx *pctx)
412     + int flags, u32 type, struct pdp_ctx *pctx)
413     {
414     void *genlh;
415    
416     - genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
417     + genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
418     type);
419     if (genlh == NULL)
420     goto nlmsg_failure;
421     @@ -1208,8 +1208,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
422     goto err_unlock;
423     }
424    
425     - err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
426     - info->snd_seq, info->nlhdr->nlmsg_type, pctx);
427     + err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
428     + 0, info->nlhdr->nlmsg_type, pctx);
429     if (err < 0)
430     goto err_unlock_free;
431    
432     @@ -1252,6 +1252,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
433     gtp_genl_fill_info(skb,
434     NETLINK_CB(cb->skb).portid,
435     cb->nlh->nlmsg_seq,
436     + NLM_F_MULTI,
437     cb->nlh->nlmsg_type, pctx)) {
438     cb->args[0] = i;
439     cb->args[1] = j;
440     diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
441     index 0c46ada027cf..e90ecb179622 100644
442     --- a/drivers/nvdimm/btt.c
443     +++ b/drivers/nvdimm/btt.c
444     @@ -447,9 +447,9 @@ static int btt_log_init(struct arena_info *arena)
445    
446     static int btt_freelist_init(struct arena_info *arena)
447     {
448     - int old, new, ret;
449     + int new, ret;
450     u32 i, map_entry;
451     - struct log_entry log_new, log_old;
452     + struct log_entry log_new;
453    
454     arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
455     GFP_KERNEL);
456     @@ -457,10 +457,6 @@ static int btt_freelist_init(struct arena_info *arena)
457     return -ENOMEM;
458    
459     for (i = 0; i < arena->nfree; i++) {
460     - old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
461     - if (old < 0)
462     - return old;
463     -
464     new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
465     if (new < 0)
466     return new;
467     diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
468     index bee2115ecf10..ec7482c7e7eb 100644
469     --- a/drivers/platform/x86/alienware-wmi.c
470     +++ b/drivers/platform/x86/alienware-wmi.c
471     @@ -504,23 +504,22 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
472    
473     input.length = (acpi_size) sizeof(*in_args);
474     input.pointer = in_args;
475     - if (out_data != NULL) {
476     + if (out_data) {
477     output.length = ACPI_ALLOCATE_BUFFER;
478     output.pointer = NULL;
479     status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
480     command, &input, &output);
481     - } else
482     + if (ACPI_SUCCESS(status)) {
483     + obj = (union acpi_object *)output.pointer;
484     + if (obj && obj->type == ACPI_TYPE_INTEGER)
485     + *out_data = (u32)obj->integer.value;
486     + }
487     + kfree(output.pointer);
488     + } else {
489     status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
490     command, &input, NULL);
491     -
492     - if (ACPI_SUCCESS(status) && out_data != NULL) {
493     - obj = (union acpi_object *)output.pointer;
494     - if (obj && obj->type == ACPI_TYPE_INTEGER)
495     - *out_data = (u32) obj->integer.value;
496     }
497     - kfree(output.pointer);
498     return status;
499     -
500     }
501    
502     /*
503     diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
504     index 0fd7e40b86a0..8137aa343706 100644
505     --- a/drivers/platform/x86/asus-nb-wmi.c
506     +++ b/drivers/platform/x86/asus-nb-wmi.c
507     @@ -561,9 +561,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
508     .detect_quirks = asus_nb_wmi_quirks,
509     };
510    
511     +static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = {
512     + {
513     + /*
514     + * asus-nb-wm adds no functionality. The T100TA has a detachable
515     + * USB kbd, so no hotkeys and it has no WMI rfkill; and loading
516     + * asus-nb-wm causes the camera LED to turn and _stay_ on.
517     + */
518     + .matches = {
519     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
520     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
521     + },
522     + },
523     + {
524     + /* The Asus T200TA has the same issue as the T100TA */
525     + .matches = {
526     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
527     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
528     + },
529     + },
530     + {} /* Terminating entry */
531     +};
532    
533     static int __init asus_nb_wmi_init(void)
534     {
535     + if (dmi_check_system(asus_nb_wmi_blacklist))
536     + return -ENODEV;
537     +
538     return asus_wmi_register_driver(&asus_nb_wmi_driver);
539     }
540    
541     diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
542     index 28c45db45aba..ebe8e8dc4677 100644
543     --- a/drivers/rapidio/devices/rio_mport_cdev.c
544     +++ b/drivers/rapidio/devices/rio_mport_cdev.c
545     @@ -905,6 +905,11 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
546     rmcd_error("pinned %ld out of %ld pages",
547     pinned, nr_pages);
548     ret = -EFAULT;
549     + /*
550     + * Set nr_pages up to mean "how many pages to unpin, in
551     + * the error handler:
552     + */
553     + nr_pages = pinned;
554     goto err_pg;
555     }
556    
557     diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
558     index 2633d2bfb1b4..9ef9cbfd8926 100644
559     --- a/drivers/staging/greybus/uart.c
560     +++ b/drivers/staging/greybus/uart.c
561     @@ -539,9 +539,9 @@ static void gb_tty_set_termios(struct tty_struct *tty,
562     }
563    
564     if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
565     - newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN;
566     + newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN;
567     else
568     - newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN;
569     + newline.flow_control = 0;
570    
571     if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
572     memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
573     diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
574     index d1cb9b9cf22b..391cbcc4ed77 100644
575     --- a/drivers/staging/iio/accel/sca3000_ring.c
576     +++ b/drivers/staging/iio/accel/sca3000_ring.c
577     @@ -56,7 +56,7 @@ static int sca3000_read_data(struct sca3000_state *st,
578     st->tx[0] = SCA3000_READ_REG(reg_address_high);
579     ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
580     if (ret) {
581     - dev_err(get_device(&st->us->dev), "problem reading register");
582     + dev_err(&st->us->dev, "problem reading register");
583     goto error_free_rx;
584     }
585    
586     diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
587     index 598f0faa48c8..0f5eb2bf5f73 100644
588     --- a/drivers/staging/iio/resolver/ad2s1210.c
589     +++ b/drivers/staging/iio/resolver/ad2s1210.c
590     @@ -126,17 +126,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
591     static int ad2s1210_config_read(struct ad2s1210_state *st,
592     unsigned char address)
593     {
594     - struct spi_transfer xfer = {
595     - .len = 2,
596     - .rx_buf = st->rx,
597     - .tx_buf = st->tx,
598     + struct spi_transfer xfers[] = {
599     + {
600     + .len = 1,
601     + .rx_buf = &st->rx[0],
602     + .tx_buf = &st->tx[0],
603     + .cs_change = 1,
604     + }, {
605     + .len = 1,
606     + .rx_buf = &st->rx[1],
607     + .tx_buf = &st->tx[1],
608     + },
609     };
610     int ret = 0;
611    
612     ad2s1210_set_mode(MOD_CONFIG, st);
613     st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
614     st->tx[1] = AD2S1210_REG_FAULT;
615     - ret = spi_sync_transfer(st->sdev, &xfer, 1);
616     + ret = spi_sync_transfer(st->sdev, xfers, 2);
617     if (ret < 0)
618     return ret;
619     st->old_data = true;
620     diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
621     index 2e541a029657..e33d23c2f6ea 100644
622     --- a/drivers/usb/core/message.c
623     +++ b/drivers/usb/core/message.c
624     @@ -1081,11 +1081,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
625    
626     if (usb_endpoint_out(epaddr)) {
627     ep = dev->ep_out[epnum];
628     - if (reset_hardware)
629     + if (reset_hardware && epnum != 0)
630     dev->ep_out[epnum] = NULL;
631     } else {
632     ep = dev->ep_in[epnum];
633     - if (reset_hardware)
634     + if (reset_hardware && epnum != 0)
635     dev->ep_in[epnum] = NULL;
636     }
637     if (ep) {
638     diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
639     index 9e17d933ea94..3167f276c4c2 100644
640     --- a/drivers/watchdog/watchdog_dev.c
641     +++ b/drivers/watchdog/watchdog_dev.c
642     @@ -38,7 +38,6 @@
643     #include <linux/init.h> /* For __init/__exit/... */
644     #include <linux/jiffies.h> /* For timeout functions */
645     #include <linux/kernel.h> /* For printk/panic/... */
646     -#include <linux/kref.h> /* For data references */
647     #include <linux/miscdevice.h> /* For handling misc devices */
648     #include <linux/module.h> /* For module stuff/... */
649     #include <linux/mutex.h> /* For mutexes */
650     @@ -53,14 +52,14 @@
651    
652     /*
653     * struct watchdog_core_data - watchdog core internal data
654     - * @kref: Reference count.
655     + * @dev: The watchdog's internal device
656     * @cdev: The watchdog's Character device.
657     * @wdd: Pointer to watchdog device.
658     * @lock: Lock for watchdog core.
659     * @status: Watchdog core internal status bits.
660     */
661     struct watchdog_core_data {
662     - struct kref kref;
663     + struct device dev;
664     struct cdev cdev;
665     struct watchdog_device *wdd;
666     struct mutex lock;
667     @@ -794,7 +793,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
668     file->private_data = wd_data;
669    
670     if (!hw_running)
671     - kref_get(&wd_data->kref);
672     + get_device(&wd_data->dev);
673    
674     /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
675     return nonseekable_open(inode, file);
676     @@ -806,11 +805,11 @@ out_clear:
677     return err;
678     }
679    
680     -static void watchdog_core_data_release(struct kref *kref)
681     +static void watchdog_core_data_release(struct device *dev)
682     {
683     struct watchdog_core_data *wd_data;
684    
685     - wd_data = container_of(kref, struct watchdog_core_data, kref);
686     + wd_data = container_of(dev, struct watchdog_core_data, dev);
687    
688     kfree(wd_data);
689     }
690     @@ -870,7 +869,7 @@ done:
691     */
692     if (!running) {
693     module_put(wd_data->cdev.owner);
694     - kref_put(&wd_data->kref, watchdog_core_data_release);
695     + put_device(&wd_data->dev);
696     }
697     return 0;
698     }
699     @@ -889,17 +888,22 @@ static struct miscdevice watchdog_miscdev = {
700     .fops = &watchdog_fops,
701     };
702    
703     +static struct class watchdog_class = {
704     + .name = "watchdog",
705     + .owner = THIS_MODULE,
706     + .dev_groups = wdt_groups,
707     +};
708     +
709     /*
710     * watchdog_cdev_register: register watchdog character device
711     * @wdd: watchdog device
712     - * @devno: character device number
713     *
714     * Register a watchdog character device including handling the legacy
715     * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
716     * thus we set it up like that.
717     */
718    
719     -static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
720     +static int watchdog_cdev_register(struct watchdog_device *wdd)
721     {
722     struct watchdog_core_data *wd_data;
723     int err;
724     @@ -907,7 +911,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
725     wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
726     if (!wd_data)
727     return -ENOMEM;
728     - kref_init(&wd_data->kref);
729     mutex_init(&wd_data->lock);
730    
731     wd_data->wdd = wdd;
732     @@ -934,23 +937,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
733     }
734     }
735    
736     + device_initialize(&wd_data->dev);
737     + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
738     + wd_data->dev.class = &watchdog_class;
739     + wd_data->dev.parent = wdd->parent;
740     + wd_data->dev.groups = wdd->groups;
741     + wd_data->dev.release = watchdog_core_data_release;
742     + dev_set_drvdata(&wd_data->dev, wdd);
743     + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
744     +
745     /* Fill in the data structures */
746     cdev_init(&wd_data->cdev, &watchdog_fops);
747     - wd_data->cdev.owner = wdd->ops->owner;
748    
749     /* Add the device */
750     - err = cdev_add(&wd_data->cdev, devno, 1);
751     + err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
752     if (err) {
753     pr_err("watchdog%d unable to add device %d:%d\n",
754     wdd->id, MAJOR(watchdog_devt), wdd->id);
755     if (wdd->id == 0) {
756     misc_deregister(&watchdog_miscdev);
757     old_wd_data = NULL;
758     - kref_put(&wd_data->kref, watchdog_core_data_release);
759     + put_device(&wd_data->dev);
760     }
761     return err;
762     }
763    
764     + wd_data->cdev.owner = wdd->ops->owner;
765     +
766     /* Record time of most recent heartbeat as 'just before now'. */
767     wd_data->last_hw_keepalive = jiffies - 1;
768    
769     @@ -960,7 +973,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
770     */
771     if (watchdog_hw_running(wdd)) {
772     __module_get(wdd->ops->owner);
773     - kref_get(&wd_data->kref);
774     + get_device(&wd_data->dev);
775     queue_delayed_work(watchdog_wq, &wd_data->work, 0);
776     }
777    
778     @@ -979,7 +992,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
779     {
780     struct watchdog_core_data *wd_data = wdd->wd_data;
781    
782     - cdev_del(&wd_data->cdev);
783     + cdev_device_del(&wd_data->cdev, &wd_data->dev);
784     if (wdd->id == 0) {
785     misc_deregister(&watchdog_miscdev);
786     old_wd_data = NULL;
787     @@ -992,15 +1005,9 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
788    
789     cancel_delayed_work_sync(&wd_data->work);
790    
791     - kref_put(&wd_data->kref, watchdog_core_data_release);
792     + put_device(&wd_data->dev);
793     }
794    
795     -static struct class watchdog_class = {
796     - .name = "watchdog",
797     - .owner = THIS_MODULE,
798     - .dev_groups = wdt_groups,
799     -};
800     -
801     /*
802     * watchdog_dev_register: register a watchdog device
803     * @wdd: watchdog device
804     @@ -1012,27 +1019,14 @@ static struct class watchdog_class = {
805    
806     int watchdog_dev_register(struct watchdog_device *wdd)
807     {
808     - struct device *dev;
809     - dev_t devno;
810     int ret;
811    
812     - devno = MKDEV(MAJOR(watchdog_devt), wdd->id);
813     -
814     - ret = watchdog_cdev_register(wdd, devno);
815     + ret = watchdog_cdev_register(wdd);
816     if (ret)
817     return ret;
818    
819     - dev = device_create_with_groups(&watchdog_class, wdd->parent,
820     - devno, wdd, wdd->groups,
821     - "watchdog%d", wdd->id);
822     - if (IS_ERR(dev)) {
823     - watchdog_cdev_unregister(wdd);
824     - return PTR_ERR(dev);
825     - }
826     -
827     ret = watchdog_register_pretimeout(wdd);
828     if (ret) {
829     - device_destroy(&watchdog_class, devno);
830     watchdog_cdev_unregister(wdd);
831     }
832    
833     @@ -1050,7 +1044,6 @@ int watchdog_dev_register(struct watchdog_device *wdd)
834     void watchdog_dev_unregister(struct watchdog_device *wdd)
835     {
836     watchdog_unregister_pretimeout(wdd);
837     - device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
838     watchdog_cdev_unregister(wdd);
839     }
840    
841     diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
842     index 617e9ae67f50..e11aacb35d6b 100644
843     --- a/fs/ceph/caps.c
844     +++ b/fs/ceph/caps.c
845     @@ -3394,6 +3394,7 @@ retry:
846     WARN_ON(1);
847     tsession = NULL;
848     target = -1;
849     + mutex_lock(&session->s_mutex);
850     }
851     goto retry;
852    
853     diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
854     index c2ef617d2f97..c875f246cb0e 100644
855     --- a/fs/configfs/dir.c
856     +++ b/fs/configfs/dir.c
857     @@ -1537,6 +1537,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
858     spin_lock(&configfs_dirent_lock);
859     configfs_detach_rollback(dentry);
860     spin_unlock(&configfs_dirent_lock);
861     + config_item_put(parent_item);
862     return -EINTR;
863     }
864     frag->frag_dead = true;
865     diff --git a/fs/file.c b/fs/file.c
866     index 09aac4d4729b..82d3f925bab3 100644
867     --- a/fs/file.c
868     +++ b/fs/file.c
869     @@ -89,7 +89,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
870     */
871     static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
872     {
873     - unsigned int cpy, set;
874     + size_t cpy, set;
875    
876     BUG_ON(nfdt->max_fds < ofdt->max_fds);
877    
878     diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
879     index adc1a97cfe96..efd44d5645d8 100644
880     --- a/fs/gfs2/glock.c
881     +++ b/fs/gfs2/glock.c
882     @@ -548,9 +548,6 @@ __acquires(&gl->gl_lockref.lock)
883     goto out_unlock;
884     if (nonblock)
885     goto out_sched;
886     - smp_mb();
887     - if (atomic_read(&gl->gl_revokes) != 0)
888     - goto out_sched;
889     set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
890     GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
891     gl->gl_target = gl->gl_demote_state;
892     diff --git a/include/linux/net.h b/include/linux/net.h
893     index cd0c8bd0a1de..54270c4707cf 100644
894     --- a/include/linux/net.h
895     +++ b/include/linux/net.h
896     @@ -298,6 +298,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
897     int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
898     int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
899    
900     +/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
901     +u32 kernel_sock_ip_overhead(struct sock *sk);
902     +
903     #define MODULE_ALIAS_NETPROTO(proto) \
904     MODULE_ALIAS("net-pf-" __stringify(proto))
905    
906     diff --git a/include/linux/padata.h b/include/linux/padata.h
907     index 0f9e567d5e15..3afa17ed59da 100644
908     --- a/include/linux/padata.h
909     +++ b/include/linux/padata.h
910     @@ -24,7 +24,6 @@
911     #include <linux/workqueue.h>
912     #include <linux/spinlock.h>
913     #include <linux/list.h>
914     -#include <linux/timer.h>
915     #include <linux/notifier.h>
916     #include <linux/kobject.h>
917    
918     @@ -37,6 +36,7 @@
919     * @list: List entry, to attach to the padata lists.
920     * @pd: Pointer to the internal control structure.
921     * @cb_cpu: Callback cpu for serializatioon.
922     + * @cpu: Cpu for parallelization.
923     * @seq_nr: Sequence number of the parallelized data object.
924     * @info: Used to pass information from the parallel to the serial function.
925     * @parallel: Parallel execution function.
926     @@ -46,6 +46,7 @@ struct padata_priv {
927     struct list_head list;
928     struct parallel_data *pd;
929     int cb_cpu;
930     + int cpu;
931     int info;
932     void (*parallel)(struct padata_priv *padata);
933     void (*serial)(struct padata_priv *padata);
934     @@ -83,7 +84,6 @@ struct padata_serial_queue {
935     * @serial: List to wait for serialization after reordering.
936     * @pwork: work struct for parallelization.
937     * @swork: work struct for serialization.
938     - * @pd: Backpointer to the internal control structure.
939     * @work: work struct for parallelization.
940     * @num_obj: Number of objects that are processed by this cpu.
941     * @cpu_index: Index of the cpu.
942     @@ -91,7 +91,6 @@ struct padata_serial_queue {
943     struct padata_parallel_queue {
944     struct padata_list parallel;
945     struct padata_list reorder;
946     - struct parallel_data *pd;
947     struct work_struct work;
948     atomic_t num_obj;
949     int cpu_index;
950     @@ -118,10 +117,10 @@ struct padata_cpumask {
951     * @reorder_objects: Number of objects waiting in the reorder queues.
952     * @refcnt: Number of objects holding a reference on this parallel_data.
953     * @max_seq_nr: Maximal used sequence number.
954     + * @cpu: Next CPU to be processed.
955     * @cpumask: The cpumasks in use for parallel and serial workers.
956     + * @reorder_work: work struct for reordering.
957     * @lock: Reorder lock.
958     - * @processed: Number of already processed objects.
959     - * @timer: Reorder timer.
960     */
961     struct parallel_data {
962     struct padata_instance *pinst;
963     @@ -130,10 +129,10 @@ struct parallel_data {
964     atomic_t reorder_objects;
965     atomic_t refcnt;
966     atomic_t seq_nr;
967     + int cpu;
968     struct padata_cpumask cpumask;
969     + struct work_struct reorder_work;
970     spinlock_t lock ____cacheline_aligned;
971     - unsigned int processed;
972     - struct timer_list timer;
973     };
974    
975     /**
976     diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h
977     index 4bd1f55d6377..6418c4d10241 100644
978     --- a/include/uapi/linux/if_pppol2tp.h
979     +++ b/include/uapi/linux/if_pppol2tp.h
980     @@ -18,6 +18,7 @@
981     #include <linux/types.h>
982     #include <linux/in.h>
983     #include <linux/in6.h>
984     +#include <linux/l2tp.h>
985    
986     /* Structure used to connect() the socket to a particular tunnel UDP
987     * socket over IPv4.
988     @@ -90,14 +91,12 @@ enum {
989     PPPOL2TP_SO_REORDERTO = 5,
990     };
991    
992     -/* Debug message categories for the DEBUG socket option */
993     +/* Debug message categories for the DEBUG socket option (deprecated) */
994     enum {
995     - PPPOL2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
996     - * compiled in) */
997     - PPPOL2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
998     - * interface */
999     - PPPOL2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
1000     - PPPOL2TP_MSG_DATA = (1 << 3), /* data packets */
1001     + PPPOL2TP_MSG_DEBUG = L2TP_MSG_DEBUG,
1002     + PPPOL2TP_MSG_CONTROL = L2TP_MSG_CONTROL,
1003     + PPPOL2TP_MSG_SEQ = L2TP_MSG_SEQ,
1004     + PPPOL2TP_MSG_DATA = L2TP_MSG_DATA,
1005     };
1006    
1007    
1008     diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
1009     index 4bd27d0270a2..bb2d62037037 100644
1010     --- a/include/uapi/linux/l2tp.h
1011     +++ b/include/uapi/linux/l2tp.h
1012     @@ -108,7 +108,7 @@ enum {
1013     L2TP_ATTR_VLAN_ID, /* u16 */
1014     L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
1015     L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
1016     - L2TP_ATTR_DEBUG, /* u32 */
1017     + L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
1018     L2TP_ATTR_RECV_SEQ, /* u8 */
1019     L2TP_ATTR_SEND_SEQ, /* u8 */
1020     L2TP_ATTR_LNS_MODE, /* u8 */
1021     @@ -175,6 +175,21 @@ enum l2tp_seqmode {
1022     L2TP_SEQ_ALL = 2,
1023     };
1024    
1025     +/**
1026     + * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions
1027     + *
1028     + * @L2TP_MSG_DEBUG: verbose debug (if compiled in)
1029     + * @L2TP_MSG_CONTROL: userspace - kernel interface
1030     + * @L2TP_MSG_SEQ: sequence numbers
1031     + * @L2TP_MSG_DATA: data packets
1032     + */
1033     +enum l2tp_debug_flags {
1034     + L2TP_MSG_DEBUG = (1 << 0),
1035     + L2TP_MSG_CONTROL = (1 << 1),
1036     + L2TP_MSG_SEQ = (1 << 2),
1037     + L2TP_MSG_DATA = (1 << 3),
1038     +};
1039     +
1040     /*
1041     * NETLINK_GENERIC related info
1042     */
1043     diff --git a/kernel/padata.c b/kernel/padata.c
1044     index 6939111b3cbe..e82f066d63ac 100644
1045     --- a/kernel/padata.c
1046     +++ b/kernel/padata.c
1047     @@ -66,15 +66,11 @@ static int padata_cpu_hash(struct parallel_data *pd)
1048     static void padata_parallel_worker(struct work_struct *parallel_work)
1049     {
1050     struct padata_parallel_queue *pqueue;
1051     - struct parallel_data *pd;
1052     - struct padata_instance *pinst;
1053     LIST_HEAD(local_list);
1054    
1055     local_bh_disable();
1056     pqueue = container_of(parallel_work,
1057     struct padata_parallel_queue, work);
1058     - pd = pqueue->pd;
1059     - pinst = pd->pinst;
1060    
1061     spin_lock(&pqueue->parallel.lock);
1062     list_replace_init(&pqueue->parallel.list, &local_list);
1063     @@ -137,6 +133,7 @@ int padata_do_parallel(struct padata_instance *pinst,
1064     padata->cb_cpu = cb_cpu;
1065    
1066     target_cpu = padata_cpu_hash(pd);
1067     + padata->cpu = target_cpu;
1068     queue = per_cpu_ptr(pd->pqueue, target_cpu);
1069    
1070     spin_lock(&queue->parallel.lock);
1071     @@ -160,8 +157,6 @@ EXPORT_SYMBOL(padata_do_parallel);
1072     * A pointer to the control struct of the next object that needs
1073     * serialization, if present in one of the percpu reorder queues.
1074     *
1075     - * NULL, if all percpu reorder queues are empty.
1076     - *
1077     * -EINPROGRESS, if the next object that needs serialization will
1078     * be parallel processed by another cpu and is not yet present in
1079     * the cpu's reorder queue.
1080     @@ -171,25 +166,12 @@ EXPORT_SYMBOL(padata_do_parallel);
1081     */
1082     static struct padata_priv *padata_get_next(struct parallel_data *pd)
1083     {
1084     - int cpu, num_cpus;
1085     - unsigned int next_nr, next_index;
1086     struct padata_parallel_queue *next_queue;
1087     struct padata_priv *padata;
1088     struct padata_list *reorder;
1089     + int cpu = pd->cpu;
1090    
1091     - num_cpus = cpumask_weight(pd->cpumask.pcpu);
1092     -
1093     - /*
1094     - * Calculate the percpu reorder queue and the sequence
1095     - * number of the next object.
1096     - */
1097     - next_nr = pd->processed;
1098     - next_index = next_nr % num_cpus;
1099     - cpu = padata_index_to_cpu(pd, next_index);
1100     next_queue = per_cpu_ptr(pd->pqueue, cpu);
1101     -
1102     - padata = NULL;
1103     -
1104     reorder = &next_queue->reorder;
1105    
1106     spin_lock(&reorder->lock);
1107     @@ -200,7 +182,8 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
1108     list_del_init(&padata->list);
1109     atomic_dec(&pd->reorder_objects);
1110    
1111     - pd->processed++;
1112     + pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1,
1113     + false);
1114    
1115     spin_unlock(&reorder->lock);
1116     goto out;
1117     @@ -223,6 +206,7 @@ static void padata_reorder(struct parallel_data *pd)
1118     struct padata_priv *padata;
1119     struct padata_serial_queue *squeue;
1120     struct padata_instance *pinst = pd->pinst;
1121     + struct padata_parallel_queue *next_queue;
1122    
1123     /*
1124     * We need to ensure that only one cpu can work on dequeueing of
1125     @@ -241,12 +225,11 @@ static void padata_reorder(struct parallel_data *pd)
1126     padata = padata_get_next(pd);
1127    
1128     /*
1129     - * All reorder queues are empty, or the next object that needs
1130     - * serialization is parallel processed by another cpu and is
1131     - * still on it's way to the cpu's reorder queue, nothing to
1132     - * do for now.
1133     + * If the next object that needs serialization is parallel
1134     + * processed by another cpu and is still on it's way to the
1135     + * cpu's reorder queue, nothing to do for now.
1136     */
1137     - if (!padata || PTR_ERR(padata) == -EINPROGRESS)
1138     + if (PTR_ERR(padata) == -EINPROGRESS)
1139     break;
1140    
1141     /*
1142     @@ -255,7 +238,6 @@ static void padata_reorder(struct parallel_data *pd)
1143     * so exit immediately.
1144     */
1145     if (PTR_ERR(padata) == -ENODATA) {
1146     - del_timer(&pd->timer);
1147     spin_unlock_bh(&pd->lock);
1148     return;
1149     }
1150     @@ -274,28 +256,27 @@ static void padata_reorder(struct parallel_data *pd)
1151    
1152     /*
1153     * The next object that needs serialization might have arrived to
1154     - * the reorder queues in the meantime, we will be called again
1155     - * from the timer function if no one else cares for it.
1156     + * the reorder queues in the meantime.
1157     *
1158     - * Ensure reorder_objects is read after pd->lock is dropped so we see
1159     - * an increment from another task in padata_do_serial. Pairs with
1160     + * Ensure reorder queue is read after pd->lock is dropped so we see
1161     + * new objects from another task in padata_do_serial. Pairs with
1162     * smp_mb__after_atomic in padata_do_serial.
1163     */
1164     smp_mb();
1165     - if (atomic_read(&pd->reorder_objects)
1166     - && !(pinst->flags & PADATA_RESET))
1167     - mod_timer(&pd->timer, jiffies + HZ);
1168     - else
1169     - del_timer(&pd->timer);
1170    
1171     - return;
1172     + next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
1173     + if (!list_empty(&next_queue->reorder.list))
1174     + queue_work(pinst->wq, &pd->reorder_work);
1175     }
1176    
1177     -static void padata_reorder_timer(unsigned long arg)
1178     +static void invoke_padata_reorder(struct work_struct *work)
1179     {
1180     - struct parallel_data *pd = (struct parallel_data *)arg;
1181     + struct parallel_data *pd;
1182    
1183     + local_bh_disable();
1184     + pd = container_of(work, struct parallel_data, reorder_work);
1185     padata_reorder(pd);
1186     + local_bh_enable();
1187     }
1188    
1189     static void padata_serial_worker(struct work_struct *serial_work)
1190     @@ -342,29 +323,22 @@ static void padata_serial_worker(struct work_struct *serial_work)
1191     */
1192     void padata_do_serial(struct padata_priv *padata)
1193     {
1194     - int cpu;
1195     - struct padata_parallel_queue *pqueue;
1196     - struct parallel_data *pd;
1197     -
1198     - pd = padata->pd;
1199     -
1200     - cpu = get_cpu();
1201     - pqueue = per_cpu_ptr(pd->pqueue, cpu);
1202     + struct parallel_data *pd = padata->pd;
1203     + struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
1204     + padata->cpu);
1205    
1206     spin_lock(&pqueue->reorder.lock);
1207     - atomic_inc(&pd->reorder_objects);
1208     list_add_tail(&padata->list, &pqueue->reorder.list);
1209     + atomic_inc(&pd->reorder_objects);
1210     spin_unlock(&pqueue->reorder.lock);
1211    
1212     /*
1213     - * Ensure the atomic_inc of reorder_objects above is ordered correctly
1214     + * Ensure the addition to the reorder list is ordered correctly
1215     * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
1216     * in padata_reorder.
1217     */
1218     smp_mb__after_atomic();
1219    
1220     - put_cpu();
1221     -
1222     padata_reorder(pd);
1223     }
1224     EXPORT_SYMBOL(padata_do_serial);
1225     @@ -413,9 +387,14 @@ static void padata_init_pqueues(struct parallel_data *pd)
1226     struct padata_parallel_queue *pqueue;
1227    
1228     cpu_index = 0;
1229     - for_each_cpu(cpu, pd->cpumask.pcpu) {
1230     + for_each_possible_cpu(cpu) {
1231     pqueue = per_cpu_ptr(pd->pqueue, cpu);
1232     - pqueue->pd = pd;
1233     +
1234     + if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
1235     + pqueue->cpu_index = -1;
1236     + continue;
1237     + }
1238     +
1239     pqueue->cpu_index = cpu_index;
1240     cpu_index++;
1241    
1242     @@ -449,12 +428,13 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
1243    
1244     padata_init_pqueues(pd);
1245     padata_init_squeues(pd);
1246     - setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
1247     atomic_set(&pd->seq_nr, -1);
1248     atomic_set(&pd->reorder_objects, 0);
1249     atomic_set(&pd->refcnt, 1);
1250     pd->pinst = pinst;
1251     spin_lock_init(&pd->lock);
1252     + pd->cpu = cpumask_first(pd->cpumask.pcpu);
1253     + INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
1254    
1255     return pd;
1256    
1257     diff --git a/lib/Makefile b/lib/Makefile
1258     index 452d2956a5a2..7a55c5205281 100644
1259     --- a/lib/Makefile
1260     +++ b/lib/Makefile
1261     @@ -230,5 +230,7 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
1262     obj-$(CONFIG_UBSAN) += ubsan.o
1263    
1264     UBSAN_SANITIZE_ubsan.o := n
1265     +KASAN_SANITIZE_ubsan.o := n
1266     +CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
1267    
1268     obj-$(CONFIG_SBITMAP) += sbitmap.o
1269     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1270     index 7c3da29fad8e..36c7f616294a 100644
1271     --- a/net/l2tp/l2tp_core.c
1272     +++ b/net/l2tp/l2tp_core.c
1273     @@ -112,53 +112,19 @@ struct l2tp_net {
1274     spinlock_t l2tp_session_hlist_lock;
1275     };
1276    
1277     -static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
1278    
1279     static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
1280     {
1281     return sk->sk_user_data;
1282     }
1283    
1284     -static inline struct l2tp_net *l2tp_pernet(struct net *net)
1285     +static inline struct l2tp_net *l2tp_pernet(const struct net *net)
1286     {
1287     BUG_ON(!net);
1288    
1289     return net_generic(net, l2tp_net_id);
1290     }
1291    
1292     -/* Tunnel reference counts. Incremented per session that is added to
1293     - * the tunnel.
1294     - */
1295     -static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
1296     -{
1297     - atomic_inc(&tunnel->ref_count);
1298     -}
1299     -
1300     -static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
1301     -{
1302     - if (atomic_dec_and_test(&tunnel->ref_count))
1303     - l2tp_tunnel_free(tunnel);
1304     -}
1305     -#ifdef L2TP_REFCNT_DEBUG
1306     -#define l2tp_tunnel_inc_refcount(_t) \
1307     -do { \
1308     - pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
1309     - __func__, __LINE__, (_t)->name, \
1310     - atomic_read(&_t->ref_count)); \
1311     - l2tp_tunnel_inc_refcount_1(_t); \
1312     -} while (0)
1313     -#define l2tp_tunnel_dec_refcount(_t) \
1314     -do { \
1315     - pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
1316     - __func__, __LINE__, (_t)->name, \
1317     - atomic_read(&_t->ref_count)); \
1318     - l2tp_tunnel_dec_refcount_1(_t); \
1319     -} while (0)
1320     -#else
1321     -#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
1322     -#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
1323     -#endif
1324     -
1325     /* Session hash global list for L2TPv3.
1326     * The session_id SHOULD be random according to RFC3931, but several
1327     * L2TP implementations use incrementing session_ids. So we do a real
1328     @@ -216,27 +182,6 @@ static void l2tp_tunnel_sock_put(struct sock *sk)
1329     sock_put(sk);
1330     }
1331    
1332     -/* Lookup a session by id in the global session list
1333     - */
1334     -static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
1335     -{
1336     - struct l2tp_net *pn = l2tp_pernet(net);
1337     - struct hlist_head *session_list =
1338     - l2tp_session_id_hash_2(pn, session_id);
1339     - struct l2tp_session *session;
1340     -
1341     - rcu_read_lock_bh();
1342     - hlist_for_each_entry_rcu(session, session_list, global_hlist) {
1343     - if (session->session_id == session_id) {
1344     - rcu_read_unlock_bh();
1345     - return session;
1346     - }
1347     - }
1348     - rcu_read_unlock_bh();
1349     -
1350     - return NULL;
1351     -}
1352     -
1353     /* Session hash list.
1354     * The session_id SHOULD be random according to RFC2661, but several
1355     * L2TP implementations (Cisco and Microsoft) use incrementing
1356     @@ -249,38 +194,31 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
1357     return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
1358     }
1359    
1360     -/* Lookup a session by id
1361     - */
1362     -struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
1363     +/* Lookup a tunnel. A new reference is held on the returned tunnel. */
1364     +struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
1365     {
1366     - struct hlist_head *session_list;
1367     - struct l2tp_session *session;
1368     + const struct l2tp_net *pn = l2tp_pernet(net);
1369     + struct l2tp_tunnel *tunnel;
1370    
1371     - /* In L2TPv3, session_ids are unique over all tunnels and we
1372     - * sometimes need to look them up before we know the
1373     - * tunnel.
1374     - */
1375     - if (tunnel == NULL)
1376     - return l2tp_session_find_2(net, session_id);
1377     + rcu_read_lock_bh();
1378     + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1379     + if (tunnel->tunnel_id == tunnel_id) {
1380     + l2tp_tunnel_inc_refcount(tunnel);
1381     + rcu_read_unlock_bh();
1382    
1383     - session_list = l2tp_session_id_hash(tunnel, session_id);
1384     - read_lock_bh(&tunnel->hlist_lock);
1385     - hlist_for_each_entry(session, session_list, hlist) {
1386     - if (session->session_id == session_id) {
1387     - read_unlock_bh(&tunnel->hlist_lock);
1388     - return session;
1389     + return tunnel;
1390     }
1391     }
1392     - read_unlock_bh(&tunnel->hlist_lock);
1393     + rcu_read_unlock_bh();
1394    
1395     return NULL;
1396     }
1397     -EXPORT_SYMBOL_GPL(l2tp_session_find);
1398     +EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
1399    
1400     -/* Like l2tp_session_find() but takes a reference on the returned session.
1401     +/* Lookup a session. A new reference is held on the returned session.
1402     * Optionally calls session->ref() too if do_ref is true.
1403     */
1404     -struct l2tp_session *l2tp_session_get(struct net *net,
1405     +struct l2tp_session *l2tp_session_get(const struct net *net,
1406     struct l2tp_tunnel *tunnel,
1407     u32 session_id, bool do_ref)
1408     {
1409     @@ -355,7 +293,8 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
1410     /* Lookup a session by interface name.
1411     * This is very inefficient but is only used by management interfaces.
1412     */
1413     -struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
1414     +struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
1415     + const char *ifname,
1416     bool do_ref)
1417     {
1418     struct l2tp_net *pn = l2tp_pernet(net);
1419     @@ -382,20 +321,28 @@ struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
1420     }
1421     EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
1422    
1423     -static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
1424     - struct l2tp_session *session)
1425     +int l2tp_session_register(struct l2tp_session *session,
1426     + struct l2tp_tunnel *tunnel)
1427     {
1428     struct l2tp_session *session_walk;
1429     struct hlist_head *g_head;
1430     struct hlist_head *head;
1431     struct l2tp_net *pn;
1432     + int err;
1433    
1434     head = l2tp_session_id_hash(tunnel, session->session_id);
1435    
1436     write_lock_bh(&tunnel->hlist_lock);
1437     + if (!tunnel->acpt_newsess) {
1438     + err = -ENODEV;
1439     + goto err_tlock;
1440     + }
1441     +
1442     hlist_for_each_entry(session_walk, head, hlist)
1443     - if (session_walk->session_id == session->session_id)
1444     - goto exist;
1445     + if (session_walk->session_id == session->session_id) {
1446     + err = -EEXIST;
1447     + goto err_tlock;
1448     + }
1449    
1450     if (tunnel->version == L2TP_HDR_VER_3) {
1451     pn = l2tp_pernet(tunnel->l2tp_net);
1452     @@ -403,30 +350,44 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
1453     session->session_id);
1454    
1455     spin_lock_bh(&pn->l2tp_session_hlist_lock);
1456     +
1457     hlist_for_each_entry(session_walk, g_head, global_hlist)
1458     - if (session_walk->session_id == session->session_id)
1459     - goto exist_glob;
1460     + if (session_walk->session_id == session->session_id) {
1461     + err = -EEXIST;
1462     + goto err_tlock_pnlock;
1463     + }
1464    
1465     + l2tp_tunnel_inc_refcount(tunnel);
1466     + sock_hold(tunnel->sock);
1467     hlist_add_head_rcu(&session->global_hlist, g_head);
1468     +
1469     spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1470     + } else {
1471     + l2tp_tunnel_inc_refcount(tunnel);
1472     + sock_hold(tunnel->sock);
1473     }
1474    
1475     hlist_add_head(&session->hlist, head);
1476     write_unlock_bh(&tunnel->hlist_lock);
1477    
1478     + /* Ignore management session in session count value */
1479     + if (session->session_id != 0)
1480     + atomic_inc(&l2tp_session_count);
1481     +
1482     return 0;
1483    
1484     -exist_glob:
1485     +err_tlock_pnlock:
1486     spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1487     -exist:
1488     +err_tlock:
1489     write_unlock_bh(&tunnel->hlist_lock);
1490    
1491     - return -EEXIST;
1492     + return err;
1493     }
1494     +EXPORT_SYMBOL_GPL(l2tp_session_register);
1495    
1496     /* Lookup a tunnel by id
1497     */
1498     -struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
1499     +struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
1500     {
1501     struct l2tp_tunnel *tunnel;
1502     struct l2tp_net *pn = l2tp_pernet(net);
1503     @@ -444,7 +405,7 @@ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
1504     }
1505     EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
1506    
1507     -struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
1508     +struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
1509     {
1510     struct l2tp_net *pn = l2tp_pernet(net);
1511     struct l2tp_tunnel *tunnel;
1512     @@ -1307,7 +1268,6 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1513     /* Remove hooks into tunnel socket */
1514     sk->sk_destruct = tunnel->old_sk_destruct;
1515     sk->sk_user_data = NULL;
1516     - tunnel->sock = NULL;
1517    
1518     /* Remove the tunnel struct from the tunnel list */
1519     pn = l2tp_pernet(tunnel->l2tp_net);
1520     @@ -1317,6 +1277,8 @@ static void l2tp_tunnel_destruct(struct sock *sk)
1521     atomic_dec(&l2tp_tunnel_count);
1522    
1523     l2tp_tunnel_closeall(tunnel);
1524     +
1525     + tunnel->sock = NULL;
1526     l2tp_tunnel_dec_refcount(tunnel);
1527    
1528     /* Call the original destructor */
1529     @@ -1341,6 +1303,7 @@ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1530     tunnel->name);
1531    
1532     write_lock_bh(&tunnel->hlist_lock);
1533     + tunnel->acpt_newsess = false;
1534     for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1535     again:
1536     hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1537     @@ -1394,17 +1357,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
1538     }
1539     }
1540    
1541     -/* Really kill the tunnel.
1542     - * Come here only when all sessions have been cleared from the tunnel.
1543     - */
1544     -static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1545     -{
1546     - BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1547     - BUG_ON(tunnel->sock != NULL);
1548     - l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1549     - kfree_rcu(tunnel, rcu);
1550     -}
1551     -
1552     /* Workqueue tunnel deletion function */
1553     static void l2tp_tunnel_del_work(struct work_struct *work)
1554     {
1555     @@ -1655,6 +1607,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1556     tunnel->magic = L2TP_TUNNEL_MAGIC;
1557     sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1558     rwlock_init(&tunnel->hlist_lock);
1559     + tunnel->acpt_newsess = true;
1560    
1561     /* The net we belong to */
1562     tunnel->l2tp_net = net;
1563     @@ -1840,7 +1793,6 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1564     struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1565     {
1566     struct l2tp_session *session;
1567     - int err;
1568    
1569     session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1570     if (session != NULL) {
1571     @@ -1895,25 +1847,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1572    
1573     l2tp_session_set_header_len(session, tunnel->version);
1574    
1575     - err = l2tp_session_add_to_tunnel(tunnel, session);
1576     - if (err) {
1577     - kfree(session);
1578     -
1579     - return ERR_PTR(err);
1580     - }
1581     -
1582     - /* Bump the reference count. The session context is deleted
1583     - * only when this drops to zero.
1584     - */
1585     l2tp_session_inc_refcount(session);
1586     - l2tp_tunnel_inc_refcount(tunnel);
1587     -
1588     - /* Ensure tunnel socket isn't deleted */
1589     - sock_hold(tunnel->sock);
1590     -
1591     - /* Ignore management session in session count value */
1592     - if (session->session_id != 0)
1593     - atomic_inc(&l2tp_session_count);
1594    
1595     return session;
1596     }
1597     diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
1598     index 7c2037184b6c..2b9b6fb67ae9 100644
1599     --- a/net/l2tp/l2tp_core.h
1600     +++ b/net/l2tp/l2tp_core.h
1601     @@ -23,16 +23,6 @@
1602     #define L2TP_HASH_BITS_2 8
1603     #define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2)
1604    
1605     -/* Debug message categories for the DEBUG socket option */
1606     -enum {
1607     - L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
1608     - * compiled in) */
1609     - L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
1610     - * interface */
1611     - L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
1612     - L2TP_MSG_DATA = (1 << 3), /* data packets */
1613     -};
1614     -
1615     struct sk_buff;
1616    
1617     struct l2tp_stats {
1618     @@ -172,6 +162,10 @@ struct l2tp_tunnel {
1619    
1620     struct rcu_head rcu;
1621     rwlock_t hlist_lock; /* protect session_hlist */
1622     + bool acpt_newsess; /* Indicates whether this
1623     + * tunnel accepts new sessions.
1624     + * Protected by hlist_lock.
1625     + */
1626     struct hlist_head session_hlist[L2TP_HASH_SIZE];
1627     /* hashed list of sessions,
1628     * hashed by id */
1629     @@ -207,7 +201,9 @@ struct l2tp_tunnel {
1630     };
1631    
1632     struct l2tp_nl_cmd_ops {
1633     - int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
1634     + int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel,
1635     + u32 session_id, u32 peer_session_id,
1636     + struct l2tp_session_cfg *cfg);
1637     int (*session_delete)(struct l2tp_session *session);
1638     };
1639    
1640     @@ -241,18 +237,18 @@ out:
1641     return tunnel;
1642     }
1643    
1644     -struct l2tp_session *l2tp_session_get(struct net *net,
1645     +struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
1646     +
1647     +struct l2tp_session *l2tp_session_get(const struct net *net,
1648     struct l2tp_tunnel *tunnel,
1649     u32 session_id, bool do_ref);
1650     -struct l2tp_session *l2tp_session_find(struct net *net,
1651     - struct l2tp_tunnel *tunnel,
1652     - u32 session_id);
1653     struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
1654     bool do_ref);
1655     -struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
1656     +struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
1657     + const char *ifname,
1658     bool do_ref);
1659     -struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
1660     -struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
1661     +struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id);
1662     +struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
1663    
1664     int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
1665     u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
1666     @@ -263,6 +259,9 @@ struct l2tp_session *l2tp_session_create(int priv_size,
1667     struct l2tp_tunnel *tunnel,
1668     u32 session_id, u32 peer_session_id,
1669     struct l2tp_session_cfg *cfg);
1670     +int l2tp_session_register(struct l2tp_session *session,
1671     + struct l2tp_tunnel *tunnel);
1672     +
1673     void __l2tp_session_unhash(struct l2tp_session *session);
1674     int l2tp_session_delete(struct l2tp_session *session);
1675     void l2tp_session_free(struct l2tp_session *session);
1676     @@ -281,6 +280,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
1677     void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
1678     int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
1679    
1680     +static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
1681     +{
1682     + atomic_inc(&tunnel->ref_count);
1683     +}
1684     +
1685     +static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
1686     +{
1687     + if (atomic_dec_and_test(&tunnel->ref_count))
1688     + kfree_rcu(tunnel, rcu);
1689     +}
1690     +
1691     /* Session reference counts. Incremented when code obtains a reference
1692     * to a session.
1693     */
1694     diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
1695     index eecc64e138de..8b8fc2337960 100644
1696     --- a/net/l2tp/l2tp_eth.c
1697     +++ b/net/l2tp/l2tp_eth.c
1698     @@ -30,6 +30,9 @@
1699     #include <net/xfrm.h>
1700     #include <net/net_namespace.h>
1701     #include <net/netns/generic.h>
1702     +#include <linux/ip.h>
1703     +#include <linux/ipv6.h>
1704     +#include <linux/udp.h>
1705    
1706     #include "l2tp_core.h"
1707    
1708     @@ -41,7 +44,6 @@ struct l2tp_eth {
1709     struct net_device *dev;
1710     struct sock *tunnel_sock;
1711     struct l2tp_session *session;
1712     - struct list_head list;
1713     atomic_long_t tx_bytes;
1714     atomic_long_t tx_packets;
1715     atomic_long_t tx_dropped;
1716     @@ -52,20 +54,9 @@ struct l2tp_eth {
1717    
1718     /* via l2tp_session_priv() */
1719     struct l2tp_eth_sess {
1720     - struct net_device *dev;
1721     + struct net_device __rcu *dev;
1722     };
1723    
1724     -/* per-net private data for this module */
1725     -static unsigned int l2tp_eth_net_id;
1726     -struct l2tp_eth_net {
1727     - struct list_head l2tp_eth_dev_list;
1728     - spinlock_t l2tp_eth_lock;
1729     -};
1730     -
1731     -static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
1732     -{
1733     - return net_generic(net, l2tp_eth_net_id);
1734     -}
1735    
1736     static int l2tp_eth_dev_init(struct net_device *dev)
1737     {
1738     @@ -82,12 +73,13 @@ static int l2tp_eth_dev_init(struct net_device *dev)
1739     static void l2tp_eth_dev_uninit(struct net_device *dev)
1740     {
1741     struct l2tp_eth *priv = netdev_priv(dev);
1742     - struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
1743     + struct l2tp_eth_sess *spriv;
1744    
1745     - spin_lock(&pn->l2tp_eth_lock);
1746     - list_del_init(&priv->list);
1747     - spin_unlock(&pn->l2tp_eth_lock);
1748     - dev_put(dev);
1749     + spriv = l2tp_session_priv(priv->session);
1750     + RCU_INIT_POINTER(spriv->dev, NULL);
1751     + /* No need for synchronize_net() here. We're called by
1752     + * unregister_netdev*(), which does the synchronisation for us.
1753     + */
1754     }
1755    
1756     static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1757     @@ -141,8 +133,8 @@ static void l2tp_eth_dev_setup(struct net_device *dev)
1758     static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
1759     {
1760     struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
1761     - struct net_device *dev = spriv->dev;
1762     - struct l2tp_eth *priv = netdev_priv(dev);
1763     + struct net_device *dev;
1764     + struct l2tp_eth *priv;
1765    
1766     if (session->debug & L2TP_MSG_DATA) {
1767     unsigned int length;
1768     @@ -166,16 +158,25 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
1769     skb_dst_drop(skb);
1770     nf_reset(skb);
1771    
1772     + rcu_read_lock();
1773     + dev = rcu_dereference(spriv->dev);
1774     + if (!dev)
1775     + goto error_rcu;
1776     +
1777     + priv = netdev_priv(dev);
1778     if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
1779     atomic_long_inc(&priv->rx_packets);
1780     atomic_long_add(data_len, &priv->rx_bytes);
1781     } else {
1782     atomic_long_inc(&priv->rx_errors);
1783     }
1784     + rcu_read_unlock();
1785     +
1786     return;
1787    
1788     +error_rcu:
1789     + rcu_read_unlock();
1790     error:
1791     - atomic_long_inc(&priv->rx_errors);
1792     kfree_skb(skb);
1793     }
1794    
1795     @@ -186,11 +187,15 @@ static void l2tp_eth_delete(struct l2tp_session *session)
1796    
1797     if (session) {
1798     spriv = l2tp_session_priv(session);
1799     - dev = spriv->dev;
1800     +
1801     + rtnl_lock();
1802     + dev = rtnl_dereference(spriv->dev);
1803     if (dev) {
1804     - unregister_netdev(dev);
1805     - spriv->dev = NULL;
1806     + unregister_netdevice(dev);
1807     + rtnl_unlock();
1808     module_put(THIS_MODULE);
1809     + } else {
1810     + rtnl_unlock();
1811     }
1812     }
1813     }
1814     @@ -200,35 +205,89 @@ static void l2tp_eth_show(struct seq_file *m, void *arg)
1815     {
1816     struct l2tp_session *session = arg;
1817     struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
1818     - struct net_device *dev = spriv->dev;
1819     + struct net_device *dev;
1820     +
1821     + rcu_read_lock();
1822     + dev = rcu_dereference(spriv->dev);
1823     + if (!dev) {
1824     + rcu_read_unlock();
1825     + return;
1826     + }
1827     + dev_hold(dev);
1828     + rcu_read_unlock();
1829    
1830     seq_printf(m, " interface %s\n", dev->name);
1831     +
1832     + dev_put(dev);
1833     }
1834     #endif
1835    
1836     -static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1837     +static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel,
1838     + struct l2tp_session *session,
1839     + struct net_device *dev)
1840     +{
1841     + unsigned int overhead = 0;
1842     + struct dst_entry *dst;
1843     + u32 l3_overhead = 0;
1844     +
1845     + /* if the encap is UDP, account for UDP header size */
1846     + if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1847     + overhead += sizeof(struct udphdr);
1848     + dev->needed_headroom += sizeof(struct udphdr);
1849     + }
1850     + if (session->mtu != 0) {
1851     + dev->mtu = session->mtu;
1852     + dev->needed_headroom += session->hdr_len;
1853     + return;
1854     + }
1855     + lock_sock(tunnel->sock);
1856     + l3_overhead = kernel_sock_ip_overhead(tunnel->sock);
1857     + release_sock(tunnel->sock);
1858     + if (l3_overhead == 0) {
1859     + /* L3 Overhead couldn't be identified, this could be
1860     + * because tunnel->sock was NULL or the socket's
1861     + * address family was not IPv4 or IPv6,
1862     + * dev mtu stays at 1500.
1863     + */
1864     + return;
1865     + }
1866     + /* Adjust MTU, factor overhead - underlay L3, overlay L2 hdr
1867     + * UDP overhead, if any, was already factored in above.
1868     + */
1869     + overhead += session->hdr_len + ETH_HLEN + l3_overhead;
1870     +
1871     + /* If PMTU discovery was enabled, use discovered MTU on L2TP device */
1872     + dst = sk_dst_get(tunnel->sock);
1873     + if (dst) {
1874     + /* dst_mtu will use PMTU if found, else fallback to intf MTU */
1875     + u32 pmtu = dst_mtu(dst);
1876     +
1877     + if (pmtu != 0)
1878     + dev->mtu = pmtu;
1879     + dst_release(dst);
1880     + }
1881     + session->mtu = dev->mtu - overhead;
1882     + dev->mtu = session->mtu;
1883     + dev->needed_headroom += session->hdr_len;
1884     +}
1885     +
1886     +static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
1887     + u32 session_id, u32 peer_session_id,
1888     + struct l2tp_session_cfg *cfg)
1889     {
1890     struct net_device *dev;
1891     char name[IFNAMSIZ];
1892     - struct l2tp_tunnel *tunnel;
1893     struct l2tp_session *session;
1894     struct l2tp_eth *priv;
1895     struct l2tp_eth_sess *spriv;
1896     int rc;
1897     - struct l2tp_eth_net *pn;
1898     -
1899     - tunnel = l2tp_tunnel_find(net, tunnel_id);
1900     - if (!tunnel) {
1901     - rc = -ENODEV;
1902     - goto out;
1903     - }
1904    
1905     if (cfg->ifname) {
1906     dev = dev_get_by_name(net, cfg->ifname);
1907     if (dev) {
1908     dev_put(dev);
1909     rc = -EEXIST;
1910     - goto out;
1911     + goto err;
1912     }
1913     strlcpy(name, cfg->ifname, IFNAMSIZ);
1914     } else
1915     @@ -238,26 +297,22 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
1916     peer_session_id, cfg);
1917     if (IS_ERR(session)) {
1918     rc = PTR_ERR(session);
1919     - goto out;
1920     + goto err;
1921     }
1922    
1923     dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
1924     l2tp_eth_dev_setup);
1925     if (!dev) {
1926     rc = -ENOMEM;
1927     - goto out_del_session;
1928     + goto err_sess;
1929     }
1930    
1931     dev_net_set(dev, net);
1932     - if (session->mtu == 0)
1933     - session->mtu = dev->mtu - session->hdr_len;
1934     - dev->mtu = session->mtu;
1935     - dev->needed_headroom += session->hdr_len;
1936     + l2tp_eth_adjust_mtu(tunnel, session, dev);
1937    
1938     priv = netdev_priv(dev);
1939     priv->dev = dev;
1940     priv->session = session;
1941     - INIT_LIST_HEAD(&priv->list);
1942    
1943     priv->tunnel_sock = tunnel->sock;
1944     session->recv_skb = l2tp_eth_dev_recv;
1945     @@ -267,48 +322,50 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
1946     #endif
1947    
1948     spriv = l2tp_session_priv(session);
1949     - spriv->dev = dev;
1950    
1951     - rc = register_netdev(dev);
1952     - if (rc < 0)
1953     - goto out_del_dev;
1954     + l2tp_session_inc_refcount(session);
1955    
1956     - __module_get(THIS_MODULE);
1957     - /* Must be done after register_netdev() */
1958     - strlcpy(session->ifname, dev->name, IFNAMSIZ);
1959     + rtnl_lock();
1960    
1961     - dev_hold(dev);
1962     - pn = l2tp_eth_pernet(dev_net(dev));
1963     - spin_lock(&pn->l2tp_eth_lock);
1964     - list_add(&priv->list, &pn->l2tp_eth_dev_list);
1965     - spin_unlock(&pn->l2tp_eth_lock);
1966     + /* Register both device and session while holding the rtnl lock. This
1967     + * ensures that l2tp_eth_delete() will see that there's a device to
1968     + * unregister, even if it happened to run before we assign spriv->dev.
1969     + */
1970     + rc = l2tp_session_register(session, tunnel);
1971     + if (rc < 0) {
1972     + rtnl_unlock();
1973     + goto err_sess_dev;
1974     + }
1975    
1976     - return 0;
1977     + rc = register_netdevice(dev);
1978     + if (rc < 0) {
1979     + rtnl_unlock();
1980     + l2tp_session_delete(session);
1981     + l2tp_session_dec_refcount(session);
1982     + free_netdev(dev);
1983    
1984     -out_del_dev:
1985     - free_netdev(dev);
1986     - spriv->dev = NULL;
1987     -out_del_session:
1988     - l2tp_session_delete(session);
1989     -out:
1990     - return rc;
1991     -}
1992     + return rc;
1993     + }
1994    
1995     -static __net_init int l2tp_eth_init_net(struct net *net)
1996     -{
1997     - struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
1998     + strlcpy(session->ifname, dev->name, IFNAMSIZ);
1999     + rcu_assign_pointer(spriv->dev, dev);
2000     +
2001     + rtnl_unlock();
2002    
2003     - INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
2004     - spin_lock_init(&pn->l2tp_eth_lock);
2005     + l2tp_session_dec_refcount(session);
2006     +
2007     + __module_get(THIS_MODULE);
2008    
2009     return 0;
2010     -}
2011    
2012     -static struct pernet_operations l2tp_eth_net_ops = {
2013     - .init = l2tp_eth_init_net,
2014     - .id = &l2tp_eth_net_id,
2015     - .size = sizeof(struct l2tp_eth_net),
2016     -};
2017     +err_sess_dev:
2018     + l2tp_session_dec_refcount(session);
2019     + free_netdev(dev);
2020     +err_sess:
2021     + kfree(session);
2022     +err:
2023     + return rc;
2024     +}
2025    
2026    
2027     static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
2028     @@ -323,25 +380,18 @@ static int __init l2tp_eth_init(void)
2029    
2030     err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
2031     if (err)
2032     - goto out;
2033     -
2034     - err = register_pernet_device(&l2tp_eth_net_ops);
2035     - if (err)
2036     - goto out_unreg;
2037     + goto err;
2038    
2039     pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
2040    
2041     return 0;
2042    
2043     -out_unreg:
2044     - l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
2045     -out:
2046     +err:
2047     return err;
2048     }
2049    
2050     static void __exit l2tp_eth_exit(void)
2051     {
2052     - unregister_pernet_device(&l2tp_eth_net_ops);
2053     l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
2054     }
2055    
2056     diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
2057     index d6fccfdca201..47d7bdff8be8 100644
2058     --- a/net/l2tp/l2tp_netlink.c
2059     +++ b/net/l2tp/l2tp_netlink.c
2060     @@ -72,10 +72,12 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
2061     (info->attrs[L2TP_ATTR_CONN_ID])) {
2062     tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2063     session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
2064     - tunnel = l2tp_tunnel_find(net, tunnel_id);
2065     - if (tunnel)
2066     + tunnel = l2tp_tunnel_get(net, tunnel_id);
2067     + if (tunnel) {
2068     session = l2tp_session_get(net, tunnel, session_id,
2069     do_ref);
2070     + l2tp_tunnel_dec_refcount(tunnel);
2071     + }
2072     }
2073    
2074     return session;
2075     @@ -278,8 +280,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
2076     }
2077     tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2078    
2079     - tunnel = l2tp_tunnel_find(net, tunnel_id);
2080     - if (tunnel == NULL) {
2081     + tunnel = l2tp_tunnel_get(net, tunnel_id);
2082     + if (!tunnel) {
2083     ret = -ENODEV;
2084     goto out;
2085     }
2086     @@ -289,6 +291,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
2087    
2088     l2tp_tunnel_delete(tunnel);
2089    
2090     + l2tp_tunnel_dec_refcount(tunnel);
2091     +
2092     out:
2093     return ret;
2094     }
2095     @@ -306,8 +310,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
2096     }
2097     tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2098    
2099     - tunnel = l2tp_tunnel_find(net, tunnel_id);
2100     - if (tunnel == NULL) {
2101     + tunnel = l2tp_tunnel_get(net, tunnel_id);
2102     + if (!tunnel) {
2103     ret = -ENODEV;
2104     goto out;
2105     }
2106     @@ -318,6 +322,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
2107     ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
2108     tunnel, L2TP_CMD_TUNNEL_MODIFY);
2109    
2110     + l2tp_tunnel_dec_refcount(tunnel);
2111     +
2112     out:
2113     return ret;
2114     }
2115     @@ -430,34 +436,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
2116    
2117     if (!info->attrs[L2TP_ATTR_CONN_ID]) {
2118     ret = -EINVAL;
2119     - goto out;
2120     + goto err;
2121     }
2122    
2123     tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2124    
2125     - tunnel = l2tp_tunnel_find(net, tunnel_id);
2126     - if (tunnel == NULL) {
2127     - ret = -ENODEV;
2128     - goto out;
2129     - }
2130     -
2131     msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2132     if (!msg) {
2133     ret = -ENOMEM;
2134     - goto out;
2135     + goto err;
2136     + }
2137     +
2138     + tunnel = l2tp_tunnel_get(net, tunnel_id);
2139     + if (!tunnel) {
2140     + ret = -ENODEV;
2141     + goto err_nlmsg;
2142     }
2143    
2144     ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
2145     NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
2146     if (ret < 0)
2147     - goto err_out;
2148     + goto err_nlmsg_tunnel;
2149     +
2150     + l2tp_tunnel_dec_refcount(tunnel);
2151    
2152     return genlmsg_unicast(net, msg, info->snd_portid);
2153    
2154     -err_out:
2155     +err_nlmsg_tunnel:
2156     + l2tp_tunnel_dec_refcount(tunnel);
2157     +err_nlmsg:
2158     nlmsg_free(msg);
2159     -
2160     -out:
2161     +err:
2162     return ret;
2163     }
2164    
2165     @@ -501,8 +510,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2166     ret = -EINVAL;
2167     goto out;
2168     }
2169     +
2170     tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
2171     - tunnel = l2tp_tunnel_find(net, tunnel_id);
2172     + tunnel = l2tp_tunnel_get(net, tunnel_id);
2173     if (!tunnel) {
2174     ret = -ENODEV;
2175     goto out;
2176     @@ -510,29 +520,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2177    
2178     if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
2179     ret = -EINVAL;
2180     - goto out;
2181     + goto out_tunnel;
2182     }
2183     session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
2184     - session = l2tp_session_find(net, tunnel, session_id);
2185     - if (session) {
2186     - ret = -EEXIST;
2187     - goto out;
2188     - }
2189    
2190     if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
2191     ret = -EINVAL;
2192     - goto out;
2193     + goto out_tunnel;
2194     }
2195     peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
2196    
2197     if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
2198     ret = -EINVAL;
2199     - goto out;
2200     + goto out_tunnel;
2201     }
2202     cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
2203     if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
2204     ret = -EINVAL;
2205     - goto out;
2206     + goto out_tunnel;
2207     }
2208    
2209     if (tunnel->version > 2) {
2210     @@ -551,7 +556,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2211     u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
2212     if (len > 8) {
2213     ret = -EINVAL;
2214     - goto out;
2215     + goto out_tunnel;
2216     }
2217     cfg.cookie_len = len;
2218     memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
2219     @@ -560,7 +565,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2220     u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
2221     if (len > 8) {
2222     ret = -EINVAL;
2223     - goto out;
2224     + goto out_tunnel;
2225     }
2226     cfg.peer_cookie_len = len;
2227     memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
2228     @@ -603,7 +608,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2229     if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
2230     (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
2231     ret = -EPROTONOSUPPORT;
2232     - goto out;
2233     + goto out_tunnel;
2234     }
2235    
2236     /* Check that pseudowire-specific params are present */
2237     @@ -613,7 +618,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2238     case L2TP_PWTYPE_ETH_VLAN:
2239     if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
2240     ret = -EINVAL;
2241     - goto out;
2242     + goto out_tunnel;
2243     }
2244     break;
2245     case L2TP_PWTYPE_ETH:
2246     @@ -627,10 +632,10 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2247     break;
2248     }
2249    
2250     - ret = -EPROTONOSUPPORT;
2251     - if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
2252     - ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
2253     - session_id, peer_session_id, &cfg);
2254     + ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel,
2255     + session_id,
2256     + peer_session_id,
2257     + &cfg);
2258    
2259     if (ret >= 0) {
2260     session = l2tp_session_get(net, tunnel, session_id, false);
2261     @@ -641,6 +646,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
2262     }
2263     }
2264    
2265     +out_tunnel:
2266     + l2tp_tunnel_dec_refcount(tunnel);
2267     out:
2268     return ret;
2269     }
2270     diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
2271     index d919b3e6b548..979fa868a4f1 100644
2272     --- a/net/l2tp/l2tp_ppp.c
2273     +++ b/net/l2tp/l2tp_ppp.c
2274     @@ -122,8 +122,11 @@
2275     struct pppol2tp_session {
2276     int owner; /* pid that opened the socket */
2277    
2278     - struct sock *sock; /* Pointer to the session
2279     + struct mutex sk_lock; /* Protects .sk */
2280     + struct sock __rcu *sk; /* Pointer to the session
2281     * PPPoX socket */
2282     + struct sock *__sk; /* Copy of .sk, for cleanup */
2283     + struct rcu_head rcu; /* For asynchronous release */
2284     struct sock *tunnel_sock; /* Pointer to the tunnel UDP
2285     * socket */
2286     int flags; /* accessed by PPPIOCGFLAGS.
2287     @@ -138,6 +141,24 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = {
2288    
2289     static const struct proto_ops pppol2tp_ops;
2290    
2291     +/* Retrieves the pppol2tp socket associated to a session.
2292     + * A reference is held on the returned socket, so this function must be paired
2293     + * with sock_put().
2294     + */
2295     +static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session)
2296     +{
2297     + struct pppol2tp_session *ps = l2tp_session_priv(session);
2298     + struct sock *sk;
2299     +
2300     + rcu_read_lock();
2301     + sk = rcu_dereference(ps->sk);
2302     + if (sk)
2303     + sock_hold(sk);
2304     + rcu_read_unlock();
2305     +
2306     + return sk;
2307     +}
2308     +
2309     /* Helpers to obtain tunnel/session contexts from sockets.
2310     */
2311     static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
2312     @@ -224,21 +245,22 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
2313     /* If the socket is bound, send it in to PPP's input queue. Otherwise
2314     * queue it on the session socket.
2315     */
2316     - sk = ps->sock;
2317     + rcu_read_lock();
2318     + sk = rcu_dereference(ps->sk);
2319     if (sk == NULL)
2320     goto no_sock;
2321    
2322     if (sk->sk_state & PPPOX_BOUND) {
2323     struct pppox_sock *po;
2324    
2325     - l2tp_dbg(session, PPPOL2TP_MSG_DATA,
2326     + l2tp_dbg(session, L2TP_MSG_DATA,
2327     "%s: recv %d byte data frame, passing to ppp\n",
2328     session->name, data_len);
2329    
2330     po = pppox_sk(sk);
2331     ppp_input(&po->chan, skb);
2332     } else {
2333     - l2tp_dbg(session, PPPOL2TP_MSG_DATA,
2334     + l2tp_dbg(session, L2TP_MSG_DATA,
2335     "%s: recv %d byte data frame, passing to L2TP socket\n",
2336     session->name, data_len);
2337    
2338     @@ -247,30 +269,16 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
2339     kfree_skb(skb);
2340     }
2341     }
2342     + rcu_read_unlock();
2343    
2344     return;
2345    
2346     no_sock:
2347     - l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name);
2348     + rcu_read_unlock();
2349     + l2tp_info(session, L2TP_MSG_DATA, "%s: no socket\n", session->name);
2350     kfree_skb(skb);
2351     }
2352    
2353     -static void pppol2tp_session_sock_hold(struct l2tp_session *session)
2354     -{
2355     - struct pppol2tp_session *ps = l2tp_session_priv(session);
2356     -
2357     - if (ps->sock)
2358     - sock_hold(ps->sock);
2359     -}
2360     -
2361     -static void pppol2tp_session_sock_put(struct l2tp_session *session)
2362     -{
2363     - struct pppol2tp_session *ps = l2tp_session_priv(session);
2364     -
2365     - if (ps->sock)
2366     - sock_put(ps->sock);
2367     -}
2368     -
2369     /************************************************************************
2370     * Transmit handling
2371     ***********************************************************************/
2372     @@ -431,17 +439,16 @@ abort:
2373     */
2374     static void pppol2tp_session_close(struct l2tp_session *session)
2375     {
2376     - struct pppol2tp_session *ps = l2tp_session_priv(session);
2377     - struct sock *sk = ps->sock;
2378     - struct socket *sock = sk->sk_socket;
2379     + struct sock *sk;
2380    
2381     BUG_ON(session->magic != L2TP_SESSION_MAGIC);
2382    
2383     - if (sock)
2384     - inet_shutdown(sock, SEND_SHUTDOWN);
2385     -
2386     - /* Don't let the session go away before our socket does */
2387     - l2tp_session_inc_refcount(session);
2388     + sk = pppol2tp_session_get_sock(session);
2389     + if (sk) {
2390     + if (sk->sk_socket)
2391     + inet_shutdown(sk->sk_socket, SEND_SHUTDOWN);
2392     + sock_put(sk);
2393     + }
2394     }
2395    
2396     /* Really kill the session socket. (Called from sock_put() if
2397     @@ -461,6 +468,14 @@ static void pppol2tp_session_destruct(struct sock *sk)
2398     }
2399     }
2400    
2401     +static void pppol2tp_put_sk(struct rcu_head *head)
2402     +{
2403     + struct pppol2tp_session *ps;
2404     +
2405     + ps = container_of(head, typeof(*ps), rcu);
2406     + sock_put(ps->__sk);
2407     +}
2408     +
2409     /* Called when the PPPoX socket (session) is closed.
2410     */
2411     static int pppol2tp_release(struct socket *sock)
2412     @@ -486,11 +501,23 @@ static int pppol2tp_release(struct socket *sock)
2413    
2414     session = pppol2tp_sock_to_session(sk);
2415    
2416     - /* Purge any queued data */
2417     if (session != NULL) {
2418     - __l2tp_session_unhash(session);
2419     - l2tp_session_queue_purge(session);
2420     - sock_put(sk);
2421     + struct pppol2tp_session *ps;
2422     +
2423     + l2tp_session_delete(session);
2424     +
2425     + ps = l2tp_session_priv(session);
2426     + mutex_lock(&ps->sk_lock);
2427     + ps->__sk = rcu_dereference_protected(ps->sk,
2428     + lockdep_is_held(&ps->sk_lock));
2429     + RCU_INIT_POINTER(ps->sk, NULL);
2430     + mutex_unlock(&ps->sk_lock);
2431     + call_rcu(&ps->rcu, pppol2tp_put_sk);
2432     +
2433     + /* Rely on the sock_put() call at the end of the function for
2434     + * dropping the reference held by pppol2tp_sock_to_session().
2435     + * The last reference will be dropped by pppol2tp_put_sk().
2436     + */
2437     }
2438     release_sock(sk);
2439    
2440     @@ -557,16 +584,47 @@ out:
2441     static void pppol2tp_show(struct seq_file *m, void *arg)
2442     {
2443     struct l2tp_session *session = arg;
2444     - struct pppol2tp_session *ps = l2tp_session_priv(session);
2445     + struct sock *sk;
2446     +
2447     + sk = pppol2tp_session_get_sock(session);
2448     + if (sk) {
2449     + struct pppox_sock *po = pppox_sk(sk);
2450    
2451     - if (ps) {
2452     - struct pppox_sock *po = pppox_sk(ps->sock);
2453     - if (po)
2454     - seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
2455     + seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
2456     + sock_put(sk);
2457     }
2458     }
2459     #endif
2460    
2461     +static void pppol2tp_session_init(struct l2tp_session *session)
2462     +{
2463     + struct pppol2tp_session *ps;
2464     + struct dst_entry *dst;
2465     +
2466     + session->recv_skb = pppol2tp_recv;
2467     + session->session_close = pppol2tp_session_close;
2468     +#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
2469     + session->show = pppol2tp_show;
2470     +#endif
2471     +
2472     + ps = l2tp_session_priv(session);
2473     + mutex_init(&ps->sk_lock);
2474     + ps->tunnel_sock = session->tunnel->sock;
2475     + ps->owner = current->pid;
2476     +
2477     + /* If PMTU discovery was enabled, use the MTU that was discovered */
2478     + dst = sk_dst_get(session->tunnel->sock);
2479     + if (dst) {
2480     + u32 pmtu = dst_mtu(dst);
2481     +
2482     + if (pmtu) {
2483     + session->mtu = pmtu - PPPOL2TP_HEADER_OVERHEAD;
2484     + session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD;
2485     + }
2486     + dst_release(dst);
2487     + }
2488     +}
2489     +
2490     /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
2491     */
2492     static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
2493     @@ -578,7 +636,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
2494     struct l2tp_session *session = NULL;
2495     struct l2tp_tunnel *tunnel;
2496     struct pppol2tp_session *ps;
2497     - struct dst_entry *dst;
2498     struct l2tp_session_cfg cfg = { 0, };
2499     int error = 0;
2500     u32 tunnel_id, peer_tunnel_id;
2501     @@ -700,13 +757,17 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
2502     /* Using a pre-existing session is fine as long as it hasn't
2503     * been connected yet.
2504     */
2505     - if (ps->sock) {
2506     + mutex_lock(&ps->sk_lock);
2507     + if (rcu_dereference_protected(ps->sk,
2508     + lockdep_is_held(&ps->sk_lock))) {
2509     + mutex_unlock(&ps->sk_lock);
2510     error = -EEXIST;
2511     goto end;
2512     }
2513    
2514     /* consistency checks */
2515     if (ps->tunnel_sock != tunnel->sock) {
2516     + mutex_unlock(&ps->sk_lock);
2517     error = -EEXIST;
2518     goto end;
2519     }
2520     @@ -722,35 +783,19 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
2521     error = PTR_ERR(session);
2522     goto end;
2523     }
2524     - }
2525     -
2526     - /* Associate session with its PPPoL2TP socket */
2527     - ps = l2tp_session_priv(session);
2528     - ps->owner = current->pid;
2529     - ps->sock = sk;
2530     - ps->tunnel_sock = tunnel->sock;
2531    
2532     - session->recv_skb = pppol2tp_recv;
2533     - session->session_close = pppol2tp_session_close;
2534     -#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
2535     - session->show = pppol2tp_show;
2536     -#endif
2537     -
2538     - /* We need to know each time a skb is dropped from the reorder
2539     - * queue.
2540     - */
2541     - session->ref = pppol2tp_session_sock_hold;
2542     - session->deref = pppol2tp_session_sock_put;
2543     -
2544     - /* If PMTU discovery was enabled, use the MTU that was discovered */
2545     - dst = sk_dst_get(tunnel->sock);
2546     - if (dst != NULL) {
2547     - u32 pmtu = dst_mtu(dst);
2548     + pppol2tp_session_init(session);
2549     + ps = l2tp_session_priv(session);
2550     + l2tp_session_inc_refcount(session);
2551    
2552     - if (pmtu != 0)
2553     - session->mtu = session->mru = pmtu -
2554     - PPPOL2TP_HEADER_OVERHEAD;
2555     - dst_release(dst);
2556     + mutex_lock(&ps->sk_lock);
2557     + error = l2tp_session_register(session, tunnel);
2558     + if (error < 0) {
2559     + mutex_unlock(&ps->sk_lock);
2560     + kfree(session);
2561     + goto end;
2562     + }
2563     + drop_refcnt = true;
2564     }
2565    
2566     /* Special case: if source & dest session_id == 0x0000, this
2567     @@ -775,14 +820,25 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
2568     po->chan.mtu = session->mtu;
2569    
2570     error = ppp_register_net_channel(sock_net(sk), &po->chan);
2571     - if (error)
2572     + if (error) {
2573     + mutex_unlock(&ps->sk_lock);
2574     goto end;
2575     + }
2576    
2577     out_no_ppp:
2578     /* This is how we get the session context from the socket. */
2579     sk->sk_user_data = session;
2580     + rcu_assign_pointer(ps->sk, sk);
2581     + mutex_unlock(&ps->sk_lock);
2582     +
2583     + /* Keep the reference we've grabbed on the session: sk doesn't expect
2584     + * the session to disappear. pppol2tp_session_destruct() is responsible
2585     + * for dropping it.
2586     + */
2587     + drop_refcnt = false;
2588     +
2589     sk->sk_state = PPPOX_CONNECTED;
2590     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
2591     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: created\n",
2592     session->name);
2593    
2594     end:
2595     @@ -795,25 +851,19 @@ end:
2596    
2597     #ifdef CONFIG_L2TP_V3
2598    
2599     -/* Called when creating sessions via the netlink interface.
2600     - */
2601     -static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
2602     +/* Called when creating sessions via the netlink interface. */
2603     +static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel,
2604     + u32 session_id, u32 peer_session_id,
2605     + struct l2tp_session_cfg *cfg)
2606     {
2607     int error;
2608     - struct l2tp_tunnel *tunnel;
2609     struct l2tp_session *session;
2610     - struct pppol2tp_session *ps;
2611     -
2612     - tunnel = l2tp_tunnel_find(net, tunnel_id);
2613     -
2614     - /* Error if we can't find the tunnel */
2615     - error = -ENOENT;
2616     - if (tunnel == NULL)
2617     - goto out;
2618    
2619     /* Error if tunnel socket is not prepped */
2620     - if (tunnel->sock == NULL)
2621     - goto out;
2622     + if (!tunnel->sock) {
2623     + error = -ENOENT;
2624     + goto err;
2625     + }
2626    
2627     /* Default MTU values. */
2628     if (cfg->mtu == 0)
2629     @@ -827,18 +877,20 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
2630     peer_session_id, cfg);
2631     if (IS_ERR(session)) {
2632     error = PTR_ERR(session);
2633     - goto out;
2634     + goto err;
2635     }
2636    
2637     - ps = l2tp_session_priv(session);
2638     - ps->tunnel_sock = tunnel->sock;
2639     + pppol2tp_session_init(session);
2640    
2641     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: created\n",
2642     - session->name);
2643     + error = l2tp_session_register(session, tunnel);
2644     + if (error < 0)
2645     + goto err_sess;
2646    
2647     - error = 0;
2648     + return 0;
2649    
2650     -out:
2651     +err_sess:
2652     + kfree(session);
2653     +err:
2654     return error;
2655     }
2656    
2657     @@ -995,16 +1047,14 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
2658     struct l2tp_tunnel *tunnel = session->tunnel;
2659     struct pppol2tp_ioc_stats stats;
2660    
2661     - l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
2662     + l2tp_dbg(session, L2TP_MSG_CONTROL,
2663     "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
2664     session->name, cmd, arg);
2665    
2666     - sk = ps->sock;
2667     + sk = pppol2tp_session_get_sock(session);
2668     if (!sk)
2669     return -EBADR;
2670    
2671     - sock_hold(sk);
2672     -
2673     switch (cmd) {
2674     case SIOCGIFMTU:
2675     err = -ENXIO;
2676     @@ -1018,7 +1068,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
2677     if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
2678     break;
2679    
2680     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
2681     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mtu=%d\n",
2682     session->name, session->mtu);
2683     err = 0;
2684     break;
2685     @@ -1034,7 +1084,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
2686    
2687     session->mtu = ifr.ifr_mtu;
2688    
2689     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
2690     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mtu=%d\n",
2691     session->name, session->mtu);
2692     err = 0;
2693     break;
2694     @@ -1048,7 +1098,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
2695     if (put_user(session->mru, (int __user *) arg))
2696     break;
2697    
2698     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
2699     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get mru=%d\n",
2700     session->name, session->mru);
2701     err = 0;
2702     break;
2703     @@ -1063,7 +1113,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
2704     break;
2705    
2706     session->mru = val;
2707     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
2708     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set mru=%d\n",
2709     session->name, session->mru);
2710     err = 0;
2711     break;
2712     @@ -1073,7 +1123,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
2713     if (put_user(ps->flags, (int __user *) arg))
2714     break;
2715    
2716     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
2717     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get flags=%d\n",
2718     session->name, ps->flags);
2719     err = 0;
2720     break;
2721     @@ -1083,7 +1133,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
2722     if (get_user(val, (int __user *) arg))
2723     break;
2724     ps->flags = val;
2725     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
2726     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set flags=%d\n",
2727     session->name, ps->flags);
2728     err = 0;
2729     break;
2730     @@ -1100,7 +1150,7 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
2731     if (copy_to_user((void __user *) arg, &stats,
2732     sizeof(stats)))
2733     break;
2734     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
2735     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get L2TP stats\n",
2736     session->name);
2737     err = 0;
2738     break;
2739     @@ -1128,7 +1178,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
2740     struct sock *sk;
2741     struct pppol2tp_ioc_stats stats;
2742    
2743     - l2tp_dbg(tunnel, PPPOL2TP_MSG_CONTROL,
2744     + l2tp_dbg(tunnel, L2TP_MSG_CONTROL,
2745     "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
2746     tunnel->name, cmd, arg);
2747    
2748     @@ -1171,7 +1221,7 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
2749     err = -EFAULT;
2750     break;
2751     }
2752     - l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
2753     + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get L2TP stats\n",
2754     tunnel->name);
2755     err = 0;
2756     break;
2757     @@ -1261,7 +1311,7 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk,
2758     switch (optname) {
2759     case PPPOL2TP_SO_DEBUG:
2760     tunnel->debug = val;
2761     - l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
2762     + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: set debug=%x\n",
2763     tunnel->name, tunnel->debug);
2764     break;
2765    
2766     @@ -1280,7 +1330,6 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
2767     int optname, int val)
2768     {
2769     int err = 0;
2770     - struct pppol2tp_session *ps = l2tp_session_priv(session);
2771    
2772     switch (optname) {
2773     case PPPOL2TP_SO_RECVSEQ:
2774     @@ -1289,7 +1338,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
2775     break;
2776     }
2777     session->recv_seq = val ? -1 : 0;
2778     - l2tp_info(session, PPPOL2TP_MSG_CONTROL,
2779     + l2tp_info(session, L2TP_MSG_CONTROL,
2780     "%s: set recv_seq=%d\n",
2781     session->name, session->recv_seq);
2782     break;
2783     @@ -1301,13 +1350,13 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
2784     }
2785     session->send_seq = val ? -1 : 0;
2786     {
2787     - struct sock *ssk = ps->sock;
2788     - struct pppox_sock *po = pppox_sk(ssk);
2789     + struct pppox_sock *po = pppox_sk(sk);
2790     +
2791     po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
2792     PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
2793     }
2794     l2tp_session_set_header_len(session, session->tunnel->version);
2795     - l2tp_info(session, PPPOL2TP_MSG_CONTROL,
2796     + l2tp_info(session, L2TP_MSG_CONTROL,
2797     "%s: set send_seq=%d\n",
2798     session->name, session->send_seq);
2799     break;
2800     @@ -1318,20 +1367,20 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
2801     break;
2802     }
2803     session->lns_mode = val ? -1 : 0;
2804     - l2tp_info(session, PPPOL2TP_MSG_CONTROL,
2805     + l2tp_info(session, L2TP_MSG_CONTROL,
2806     "%s: set lns_mode=%d\n",
2807     session->name, session->lns_mode);
2808     break;
2809    
2810     case PPPOL2TP_SO_DEBUG:
2811     session->debug = val;
2812     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set debug=%x\n",
2813     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: set debug=%x\n",
2814     session->name, session->debug);
2815     break;
2816    
2817     case PPPOL2TP_SO_REORDERTO:
2818     session->reorder_timeout = msecs_to_jiffies(val);
2819     - l2tp_info(session, PPPOL2TP_MSG_CONTROL,
2820     + l2tp_info(session, L2TP_MSG_CONTROL,
2821     "%s: set reorder_timeout=%d\n",
2822     session->name, session->reorder_timeout);
2823     break;
2824     @@ -1412,7 +1461,7 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk,
2825     switch (optname) {
2826     case PPPOL2TP_SO_DEBUG:
2827     *val = tunnel->debug;
2828     - l2tp_info(tunnel, PPPOL2TP_MSG_CONTROL, "%s: get debug=%x\n",
2829     + l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get debug=%x\n",
2830     tunnel->name, tunnel->debug);
2831     break;
2832    
2833     @@ -1435,31 +1484,31 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
2834     switch (optname) {
2835     case PPPOL2TP_SO_RECVSEQ:
2836     *val = session->recv_seq;
2837     - l2tp_info(session, PPPOL2TP_MSG_CONTROL,
2838     + l2tp_info(session, L2TP_MSG_CONTROL,
2839     "%s: get recv_seq=%d\n", session->name, *val);
2840     break;
2841    
2842     case PPPOL2TP_SO_SENDSEQ:
2843     *val = session->send_seq;
2844     - l2tp_info(session, PPPOL2TP_MSG_CONTROL,
2845     + l2tp_info(session, L2TP_MSG_CONTROL,
2846     "%s: get send_seq=%d\n", session->name, *val);
2847     break;
2848    
2849     case PPPOL2TP_SO_LNSMODE:
2850     *val = session->lns_mode;
2851     - l2tp_info(session, PPPOL2TP_MSG_CONTROL,
2852     + l2tp_info(session, L2TP_MSG_CONTROL,
2853     "%s: get lns_mode=%d\n", session->name, *val);
2854     break;
2855    
2856     case PPPOL2TP_SO_DEBUG:
2857     *val = session->debug;
2858     - l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get debug=%d\n",
2859     + l2tp_info(session, L2TP_MSG_CONTROL, "%s: get debug=%d\n",
2860     session->name, *val);
2861     break;
2862    
2863     case PPPOL2TP_SO_REORDERTO:
2864     *val = (int) jiffies_to_msecs(session->reorder_timeout);
2865     - l2tp_info(session, PPPOL2TP_MSG_CONTROL,
2866     + l2tp_info(session, L2TP_MSG_CONTROL,
2867     "%s: get reorder_timeout=%d\n", session->name, *val);
2868     break;
2869    
2870     @@ -1638,8 +1687,9 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
2871     {
2872     struct l2tp_session *session = v;
2873     struct l2tp_tunnel *tunnel = session->tunnel;
2874     - struct pppol2tp_session *ps = l2tp_session_priv(session);
2875     - struct pppox_sock *po = pppox_sk(ps->sock);
2876     + unsigned char state;
2877     + char user_data_ok;
2878     + struct sock *sk;
2879     u32 ip = 0;
2880     u16 port = 0;
2881    
2882     @@ -1649,6 +1699,15 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
2883     port = ntohs(inet->inet_sport);
2884     }
2885    
2886     + sk = pppol2tp_session_get_sock(session);
2887     + if (sk) {
2888     + state = sk->sk_state;
2889     + user_data_ok = (session == sk->sk_user_data) ? 'Y' : 'N';
2890     + } else {
2891     + state = 0;
2892     + user_data_ok = 'N';
2893     + }
2894     +
2895     seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
2896     "%04X/%04X %d %c\n",
2897     session->name, ip, port,
2898     @@ -1656,9 +1715,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
2899     session->session_id,
2900     tunnel->peer_tunnel_id,
2901     session->peer_session_id,
2902     - ps->sock->sk_state,
2903     - (session == ps->sock->sk_user_data) ?
2904     - 'Y' : 'N');
2905     + state, user_data_ok);
2906     seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
2907     session->mtu, session->mru,
2908     session->recv_seq ? 'R' : '-',
2909     @@ -1675,8 +1732,12 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
2910     atomic_long_read(&session->stats.rx_bytes),
2911     atomic_long_read(&session->stats.rx_errors));
2912    
2913     - if (po)
2914     + if (sk) {
2915     + struct pppox_sock *po = pppox_sk(sk);
2916     +
2917     seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
2918     + sock_put(sk);
2919     + }
2920     }
2921    
2922     static int pppol2tp_seq_show(struct seq_file *m, void *v)
2923     diff --git a/net/socket.c b/net/socket.c
2924     index 65afc8ec68d4..88abc72df2a6 100644
2925     --- a/net/socket.c
2926     +++ b/net/socket.c
2927     @@ -3321,3 +3321,49 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
2928     return sock->ops->shutdown(sock, how);
2929     }
2930     EXPORT_SYMBOL(kernel_sock_shutdown);
2931     +
2932     +/* This routine returns the IP overhead imposed by a socket i.e.
2933     + * the length of the underlying IP header, depending on whether
2934     + * this is an IPv4 or IPv6 socket and the length from IP options turned
2935     + * on at the socket. Assumes that the caller has a lock on the socket.
2936     + */
2937     +u32 kernel_sock_ip_overhead(struct sock *sk)
2938     +{
2939     + struct inet_sock *inet;
2940     + struct ip_options_rcu *opt;
2941     + u32 overhead = 0;
2942     + bool owned_by_user;
2943     +#if IS_ENABLED(CONFIG_IPV6)
2944     + struct ipv6_pinfo *np;
2945     + struct ipv6_txoptions *optv6 = NULL;
2946     +#endif /* IS_ENABLED(CONFIG_IPV6) */
2947     +
2948     + if (!sk)
2949     + return overhead;
2950     +
2951     + owned_by_user = sock_owned_by_user(sk);
2952     + switch (sk->sk_family) {
2953     + case AF_INET:
2954     + inet = inet_sk(sk);
2955     + overhead += sizeof(struct iphdr);
2956     + opt = rcu_dereference_protected(inet->inet_opt,
2957     + owned_by_user);
2958     + if (opt)
2959     + overhead += opt->opt.optlen;
2960     + return overhead;
2961     +#if IS_ENABLED(CONFIG_IPV6)
2962     + case AF_INET6:
2963     + np = inet6_sk(sk);
2964     + overhead += sizeof(struct ipv6hdr);
2965     + if (np)
2966     + optv6 = rcu_dereference_protected(np->opt,
2967     + owned_by_user);
2968     + if (optv6)
2969     + overhead += (optv6->opt_flen + optv6->opt_nflen);
2970     + return overhead;
2971     +#endif /* IS_ENABLED(CONFIG_IPV6) */
2972     + default: /* Returns 0 overhead if the socket is not ipv4 or ipv6 */
2973     + return overhead;
2974     + }
2975     +}
2976     +EXPORT_SYMBOL(kernel_sock_ip_overhead);
2977     diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
2978     index 8b29dc17c73c..2cad963c4fb7 100644
2979     --- a/scripts/gcc-plugins/Makefile
2980     +++ b/scripts/gcc-plugins/Makefile
2981     @@ -9,6 +9,7 @@ else
2982     HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti
2983     HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb
2984     HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable
2985     + HOST_EXTRACXXFLAGS += -Wno-format-diag
2986     export HOST_EXTRACXXFLAGS
2987     endif
2988    
2989     diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
2990     index 08fe09c28bd2..6792915f5174 100644
2991     --- a/scripts/gcc-plugins/gcc-common.h
2992     +++ b/scripts/gcc-plugins/gcc-common.h
2993     @@ -31,7 +31,9 @@
2994     #include "ggc.h"
2995     #include "timevar.h"
2996    
2997     +#if BUILDING_GCC_VERSION < 10000
2998     #include "params.h"
2999     +#endif
3000    
3001     #if BUILDING_GCC_VERSION <= 4009
3002     #include "pointer-set.h"
3003     @@ -796,6 +798,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
3004     return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
3005     }
3006    
3007     +#if BUILDING_GCC_VERSION < 10000
3008     template <>
3009     template <>
3010     inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
3011     @@ -809,6 +812,7 @@ inline bool is_a_helper<const greturn *>::test(const_gimple gs)
3012     {
3013     return gs->code == GIMPLE_RETURN;
3014     }
3015     +#endif
3016    
3017     static inline gasm *as_a_gasm(gimple stmt)
3018     {
3019     diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
3020     index c783fefa558a..e034dc21421e 100644
3021     --- a/security/integrity/evm/evm_crypto.c
3022     +++ b/security/integrity/evm/evm_crypto.c
3023     @@ -90,7 +90,7 @@ static struct shash_desc *init_desc(char type)
3024     algo = evm_hash;
3025     }
3026    
3027     - if (*tfm == NULL) {
3028     + if (IS_ERR_OR_NULL(*tfm)) {
3029     mutex_lock(&mutex);
3030     if (*tfm)
3031     goto out;
3032     diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
3033     index 44b44d7e0dbc..853a7d2333b3 100644
3034     --- a/security/integrity/ima/ima_fs.c
3035     +++ b/security/integrity/ima/ima_fs.c
3036     @@ -331,8 +331,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf,
3037     integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
3038     "policy_update", "signed policy required",
3039     1, 0);
3040     - if (ima_appraise & IMA_APPRAISE_ENFORCE)
3041     - result = -EACCES;
3042     + result = -EACCES;
3043     } else {
3044     result = ima_parse_add_rule(data);
3045     }
3046     diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
3047     index f09ae7efc695..f0052c06d065 100644
3048     --- a/sound/core/pcm_lib.c
3049     +++ b/sound/core/pcm_lib.c
3050     @@ -456,6 +456,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
3051    
3052     no_delta_check:
3053     if (runtime->status->hw_ptr == new_hw_ptr) {
3054     + runtime->hw_ptr_jiffies = curr_jiffies;
3055     update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
3056     return 0;
3057     }