Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0254-4.9.155-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3307 - (hide annotations) (download)
Tue Mar 12 10:43:13 2019 UTC (5 years, 3 months ago) by niro
File size: 34902 byte(s)
-linux-4.9.155
1 niro 3307 diff --git a/Makefile b/Makefile
2     index 9964792e200f..1933ac9c3406 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 154
9     +SUBLEVEL = 155
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
14     index 318394ed5c7a..5e11ad3164e0 100644
15     --- a/arch/arm/mach-cns3xxx/pcie.c
16     +++ b/arch/arm/mach-cns3xxx/pcie.c
17     @@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
18     } else /* remote PCI bus */
19     base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
20    
21     - return base + (where & 0xffc) + (devfn << 12);
22     + return base + where + (devfn << 12);
23     }
24    
25     static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
26     diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
27     index f6e71c73cceb..76c9b51fa7f1 100644
28     --- a/arch/arm64/kernel/hibernate.c
29     +++ b/arch/arm64/kernel/hibernate.c
30     @@ -297,8 +297,10 @@ int swsusp_arch_suspend(void)
31     dcache_clean_range(__idmap_text_start, __idmap_text_end);
32    
33     /* Clean kvm setup code to PoC? */
34     - if (el2_reset_needed())
35     + if (el2_reset_needed()) {
36     dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
37     + dcache_clean_range(__hyp_text_start, __hyp_text_end);
38     + }
39    
40     /*
41     * Tell the hibernation core that we've just restored
42     diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
43     index d3b5f75e652e..fcb486d09555 100644
44     --- a/arch/arm64/kernel/hyp-stub.S
45     +++ b/arch/arm64/kernel/hyp-stub.S
46     @@ -28,6 +28,8 @@
47     #include <asm/virt.h>
48    
49     .text
50     + .pushsection .hyp.text, "ax"
51     +
52     .align 11
53    
54     ENTRY(__hyp_stub_vectors)
55     diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
56     index 2a21318fed1d..c9ca903462a6 100644
57     --- a/arch/arm64/kernel/kaslr.c
58     +++ b/arch/arm64/kernel/kaslr.c
59     @@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
60     * we end up running with module randomization disabled.
61     */
62     module_alloc_base = (u64)_etext - MODULES_VSIZE;
63     + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
64    
65     /*
66     * Try to map the FDT early. If this fails, we simply bail,
67     diff --git a/drivers/base/core.c b/drivers/base/core.c
68     index f43caad30e1e..901aec4bb01d 100644
69     --- a/drivers/base/core.c
70     +++ b/drivers/base/core.c
71     @@ -862,6 +862,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
72     return;
73    
74     mutex_lock(&gdp_mutex);
75     + if (!kobject_has_children(glue_dir))
76     + kobject_del(glue_dir);
77     kobject_put(glue_dir);
78     mutex_unlock(&gdp_mutex);
79     }
80     diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
81     index 524c8e0b72fd..40bdeca6d692 100644
82     --- a/drivers/mmc/host/sdhci-iproc.c
83     +++ b/drivers/mmc/host/sdhci-iproc.c
84     @@ -242,7 +242,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
85    
86     iproc_host->data = iproc_data;
87    
88     - mmc_of_parse(host->mmc);
89     + ret = mmc_of_parse(host->mmc);
90     + if (ret)
91     + goto err;
92     +
93     sdhci_get_of_property(pdev);
94    
95     host->mmc->caps |= iproc_host->data->mmc_caps;
96     diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
97     index ef9bc26ebc1a..714593023bbc 100644
98     --- a/drivers/net/ethernet/freescale/ucc_geth.c
99     +++ b/drivers/net/ethernet/freescale/ucc_geth.c
100     @@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
101     u16 i, j;
102     u8 __iomem *bd;
103    
104     + netdev_reset_queue(ugeth->ndev);
105     +
106     ug_info = ugeth->ug_info;
107     uf_info = &ug_info->uf_info;
108    
109     diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
110     index 84bab9f0732e..9af0887c8a29 100644
111     --- a/drivers/net/ethernet/mellanox/mlx4/fw.c
112     +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
113     @@ -2037,9 +2037,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
114     {
115     struct mlx4_cmd_mailbox *mailbox;
116     __be32 *outbox;
117     + u64 qword_field;
118     u32 dword_field;
119     - int err;
120     + u16 word_field;
121     u8 byte_field;
122     + int err;
123     static const u8 a0_dmfs_query_hw_steering[] = {
124     [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
125     [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
126     @@ -2067,19 +2069,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
127    
128     /* QPC/EEC/CQC/EQC/RDMARC attributes */
129    
130     - MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
131     - MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
132     - MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
133     - MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
134     - MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
135     - MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
136     - MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
137     - MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
138     - MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
139     - MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
140     - MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
141     - MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
142     - MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
143     + MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
144     + param->qpc_base = qword_field & ~((u64)0x1f);
145     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
146     + param->log_num_qps = byte_field & 0x1f;
147     + MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
148     + param->srqc_base = qword_field & ~((u64)0x1f);
149     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
150     + param->log_num_srqs = byte_field & 0x1f;
151     + MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
152     + param->cqc_base = qword_field & ~((u64)0x1f);
153     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
154     + param->log_num_cqs = byte_field & 0x1f;
155     + MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
156     + param->altc_base = qword_field;
157     + MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
158     + param->auxc_base = qword_field;
159     + MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
160     + param->eqc_base = qword_field & ~((u64)0x1f);
161     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
162     + param->log_num_eqs = byte_field & 0x1f;
163     + MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
164     + param->num_sys_eqs = word_field & 0xfff;
165     + MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
166     + param->rdmarc_base = qword_field & ~((u64)0x1f);
167     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
168     + param->log_rd_per_qp = byte_field & 0x7;
169    
170     MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
171     if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
172     @@ -2098,22 +2113,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
173     /* steering attributes */
174     if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
175     MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
176     - MLX4_GET(param->log_mc_entry_sz, outbox,
177     - INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
178     - MLX4_GET(param->log_mc_table_sz, outbox,
179     - INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
180     - MLX4_GET(byte_field, outbox,
181     - INIT_HCA_FS_A0_OFFSET);
182     + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
183     + param->log_mc_entry_sz = byte_field & 0x1f;
184     + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
185     + param->log_mc_table_sz = byte_field & 0x1f;
186     + MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
187     param->dmfs_high_steer_mode =
188     a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
189     } else {
190     MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
191     - MLX4_GET(param->log_mc_entry_sz, outbox,
192     - INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
193     - MLX4_GET(param->log_mc_hash_sz, outbox,
194     - INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
195     - MLX4_GET(param->log_mc_table_sz, outbox,
196     - INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
197     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
198     + param->log_mc_entry_sz = byte_field & 0x1f;
199     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
200     + param->log_mc_hash_sz = byte_field & 0x1f;
201     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
202     + param->log_mc_table_sz = byte_field & 0x1f;
203     }
204    
205     /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
206     @@ -2137,15 +2151,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
207     /* TPT attributes */
208    
209     MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
210     - MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
211     - MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
212     + MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
213     + param->mw_enabled = byte_field >> 7;
214     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
215     + param->log_mpt_sz = byte_field & 0x3f;
216     MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
217     MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
218    
219     /* UAR attributes */
220    
221     MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
222     - MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
223     + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
224     + param->log_uar_sz = byte_field & 0xf;
225    
226     /* phv_check enable */
227     MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
228     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
229     index 5d6eab19a9d8..da9246f6c31e 100644
230     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
231     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
232     @@ -1216,14 +1216,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
233     int err = 0;
234     u8 *smac_v;
235    
236     - if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
237     - mlx5_core_warn(esw->dev,
238     - "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
239     - vport->vport);
240     - return -EPERM;
241     -
242     - }
243     -
244     esw_vport_cleanup_ingress_rules(esw, vport);
245    
246     if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
247     @@ -1709,13 +1701,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
248     mutex_lock(&esw->state_lock);
249     evport = &esw->vports[vport];
250    
251     - if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
252     + if (evport->info.spoofchk && !is_valid_ether_addr(mac))
253     mlx5_core_warn(esw->dev,
254     - "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
255     + "Set invalid MAC while spoofchk is on, vport(%d)\n",
256     vport);
257     - err = -EPERM;
258     - goto unlock;
259     - }
260    
261     err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
262     if (err) {
263     @@ -1859,6 +1848,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
264     evport = &esw->vports[vport];
265     pschk = evport->info.spoofchk;
266     evport->info.spoofchk = spoofchk;
267     + if (pschk && !is_valid_ether_addr(evport->info.mac))
268     + mlx5_core_warn(esw->dev,
269     + "Spoofchk in set while MAC is invalid, vport(%d)\n",
270     + evport->vport);
271     if (evport->enabled && esw->mode == SRIOV_LEGACY)
272     err = esw_vport_ingress_config(esw, evport);
273     if (err)
274     diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
275     index b299277361b7..4a2609c4dd6e 100644
276     --- a/drivers/net/ipvlan/ipvlan_main.c
277     +++ b/drivers/net/ipvlan/ipvlan_main.c
278     @@ -85,12 +85,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
279     err = ipvlan_register_nf_hook();
280     if (!err) {
281     mdev->l3mdev_ops = &ipvl_l3mdev_ops;
282     - mdev->priv_flags |= IFF_L3MDEV_MASTER;
283     + mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
284     } else
285     goto fail;
286     } else if (port->mode == IPVLAN_MODE_L3S) {
287     /* Old mode was L3S */
288     - mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
289     + mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
290     ipvlan_unregister_nf_hook();
291     mdev->l3mdev_ops = NULL;
292     }
293     @@ -158,7 +158,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
294    
295     dev->priv_flags &= ~IFF_IPVLAN_MASTER;
296     if (port->mode == IPVLAN_MODE_L3S) {
297     - dev->priv_flags &= ~IFF_L3MDEV_MASTER;
298     + dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
299     ipvlan_unregister_nf_hook();
300     dev->l3mdev_ops = NULL;
301     }
302     diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
303     index c857d2d7bbec..69ffbd7b76f7 100644
304     --- a/drivers/platform/x86/asus-nb-wmi.c
305     +++ b/drivers/platform/x86/asus-nb-wmi.c
306     @@ -477,8 +477,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
307     { KE_KEY, 0x30, { KEY_VOLUMEUP } },
308     { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
309     { KE_KEY, 0x32, { KEY_MUTE } },
310     - { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
311     - { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
312     + { KE_KEY, 0x35, { KEY_SCREENLOCK } },
313     { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
314     { KE_KEY, 0x41, { KEY_NEXTSONG } },
315     { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
316     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
317     index 441d434a48c1..33e65b71c49a 100644
318     --- a/fs/cifs/connect.c
319     +++ b/fs/cifs/connect.c
320     @@ -48,6 +48,7 @@
321     #include "cifs_unicode.h"
322     #include "cifs_debug.h"
323     #include "cifs_fs_sb.h"
324     +#include "dns_resolve.h"
325     #include "ntlmssp.h"
326     #include "nterr.h"
327     #include "rfc1002pdu.h"
328     @@ -306,6 +307,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
329     static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
330     const char *devname);
331    
332     +/*
333     + * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
334     + * get their ip addresses changed at some point.
335     + *
336     + * This should be called with server->srv_mutex held.
337     + */
338     +#ifdef CONFIG_CIFS_DFS_UPCALL
339     +static int reconn_set_ipaddr(struct TCP_Server_Info *server)
340     +{
341     + int rc;
342     + int len;
343     + char *unc, *ipaddr = NULL;
344     +
345     + if (!server->hostname)
346     + return -EINVAL;
347     +
348     + len = strlen(server->hostname) + 3;
349     +
350     + unc = kmalloc(len, GFP_KERNEL);
351     + if (!unc) {
352     + cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
353     + return -ENOMEM;
354     + }
355     + snprintf(unc, len, "\\\\%s", server->hostname);
356     +
357     + rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
358     + kfree(unc);
359     +
360     + if (rc < 0) {
361     + cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
362     + __func__, server->hostname, rc);
363     + return rc;
364     + }
365     +
366     + rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
367     + strlen(ipaddr));
368     + kfree(ipaddr);
369     +
370     + return !rc ? -1 : 0;
371     +}
372     +#else
373     +static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
374     +{
375     + return 0;
376     +}
377     +#endif
378     +
379     /*
380     * cifs tcp session reconnection
381     *
382     @@ -403,6 +451,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
383     rc = generic_ip_connect(server);
384     if (rc) {
385     cifs_dbg(FYI, "reconnect error %d\n", rc);
386     + rc = reconn_set_ipaddr(server);
387     + if (rc) {
388     + cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
389     + __func__, rc);
390     + }
391     mutex_unlock(&server->srv_mutex);
392     msleep(3000);
393     } else {
394     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
395     index 50251a8af0ce..52b6e4a40748 100644
396     --- a/fs/cifs/smb2pdu.c
397     +++ b/fs/cifs/smb2pdu.c
398     @@ -2686,8 +2686,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
399     if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
400     srch_inf->endOfSearch = true;
401     rc = 0;
402     - }
403     - cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
404     + } else
405     + cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
406     goto qdir_exit;
407     }
408    
409     diff --git a/fs/dcache.c b/fs/dcache.c
410     index f903b86b06e5..29c0286bd638 100644
411     --- a/fs/dcache.c
412     +++ b/fs/dcache.c
413     @@ -1164,15 +1164,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
414     */
415     void shrink_dcache_sb(struct super_block *sb)
416     {
417     - long freed;
418     -
419     do {
420     LIST_HEAD(dispose);
421    
422     - freed = list_lru_walk(&sb->s_dentry_lru,
423     + list_lru_walk(&sb->s_dentry_lru,
424     dentry_lru_isolate_shrink, &dispose, 1024);
425     -
426     - this_cpu_sub(nr_dentry_unused, freed);
427     shrink_dentry_list(&dispose);
428     cond_resched();
429     } while (list_lru_count(&sb->s_dentry_lru) > 0);
430     diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
431     index 05f1ec728840..073126707270 100644
432     --- a/fs/gfs2/rgrp.c
433     +++ b/fs/gfs2/rgrp.c
434     @@ -1705,9 +1705,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
435     goto next_iter;
436     }
437     if (ret == -E2BIG) {
438     - n += rbm->bii - initial_bii;
439     rbm->bii = 0;
440     rbm->offset = 0;
441     + n += (rbm->bii - initial_bii);
442     goto res_covered_end_of_rgrp;
443     }
444     return ret;
445     diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
446     index a64adc2fced9..56b4f855fa9b 100644
447     --- a/fs/notify/fsnotify.c
448     +++ b/fs/notify/fsnotify.c
449     @@ -101,9 +101,9 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
450     parent = dget_parent(dentry);
451     p_inode = parent->d_inode;
452    
453     - if (unlikely(!fsnotify_inode_watches_children(p_inode)))
454     + if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
455     __fsnotify_update_child_dentry_flags(p_inode);
456     - else if (p_inode->i_fsnotify_mask & mask) {
457     + } else if (p_inode->i_fsnotify_mask & mask & ~FS_EVENT_ON_CHILD) {
458     struct name_snapshot name;
459    
460     /* we are notifying a parent so come up with the new mask which
461     @@ -207,6 +207,10 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
462     else
463     mnt = NULL;
464    
465     + /* An event "on child" is not intended for a mount mark */
466     + if (mask & FS_EVENT_ON_CHILD)
467     + mnt = NULL;
468     +
469     /*
470     * Optimization: srcu_read_lock() has a memory barrier which can
471     * be expensive. It protects walking the *_fsnotify_marks lists.
472     diff --git a/fs/read_write.c b/fs/read_write.c
473     index ba280596ec78..9819f7c6c8c5 100644
474     --- a/fs/read_write.c
475     +++ b/fs/read_write.c
476     @@ -392,8 +392,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos)
477     iter->type |= WRITE;
478     ret = file->f_op->write_iter(&kiocb, iter);
479     BUG_ON(ret == -EIOCBQUEUED);
480     - if (ret > 0)
481     + if (ret > 0) {
482     *ppos = kiocb.ki_pos;
483     + fsnotify_modify(file);
484     + }
485     return ret;
486     }
487     EXPORT_SYMBOL(vfs_iter_write);
488     diff --git a/fs/super.c b/fs/super.c
489     index 7e9beab77259..abe2541fb28c 100644
490     --- a/fs/super.c
491     +++ b/fs/super.c
492     @@ -119,13 +119,23 @@ static unsigned long super_cache_count(struct shrinker *shrink,
493     sb = container_of(shrink, struct super_block, s_shrink);
494    
495     /*
496     - * Don't call trylock_super as it is a potential
497     - * scalability bottleneck. The counts could get updated
498     - * between super_cache_count and super_cache_scan anyway.
499     - * Call to super_cache_count with shrinker_rwsem held
500     - * ensures the safety of call to list_lru_shrink_count() and
501     - * s_op->nr_cached_objects().
502     + * We don't call trylock_super() here as it is a scalability bottleneck,
503     + * so we're exposed to partial setup state. The shrinker rwsem does not
504     + * protect filesystem operations backing list_lru_shrink_count() or
505     + * s_op->nr_cached_objects(). Counts can change between
506     + * super_cache_count and super_cache_scan, so we really don't need locks
507     + * here.
508     + *
509     + * However, if we are currently mounting the superblock, the underlying
510     + * filesystem might be in a state of partial construction and hence it
511     + * is dangerous to access it. trylock_super() uses a MS_BORN check to
512     + * avoid this situation, so do the same here. The memory barrier is
513     + * matched with the one in mount_fs() as we don't hold locks here.
514     */
515     + if (!(sb->s_flags & MS_BORN))
516     + return 0;
517     + smp_rmb();
518     +
519     if (sb->s_op && sb->s_op->nr_cached_objects)
520     total_objects = sb->s_op->nr_cached_objects(sb, sc);
521    
522     @@ -1193,6 +1203,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
523     sb = root->d_sb;
524     BUG_ON(!sb);
525     WARN_ON(!sb->s_bdi);
526     +
527     + /*
528     + * Write barrier is for super_cache_count(). We place it before setting
529     + * MS_BORN as the data dependency between the two functions is the
530     + * superblock structure contents that we just set up, not the MS_BORN
531     + * flag.
532     + */
533     + smp_wmb();
534     sb->s_flags |= MS_BORN;
535    
536     error = security_sb_kern_mount(sb, flags, secdata);
537     diff --git a/include/linux/kobject.h b/include/linux/kobject.h
538     index e6284591599e..5957c6a3fd7f 100644
539     --- a/include/linux/kobject.h
540     +++ b/include/linux/kobject.h
541     @@ -113,6 +113,23 @@ extern void kobject_put(struct kobject *kobj);
542     extern const void *kobject_namespace(struct kobject *kobj);
543     extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
544    
545     +/**
546     + * kobject_has_children - Returns whether a kobject has children.
547     + * @kobj: the object to test
548     + *
549     + * This will return whether a kobject has other kobjects as children.
550     + *
551     + * It does NOT account for the presence of attribute files, only sub
552     + * directories. It also assumes there is no concurrent addition or
553     + * removal of such children, and thus relies on external locking.
554     + */
555     +static inline bool kobject_has_children(struct kobject *kobj)
556     +{
557     + WARN_ON_ONCE(atomic_read(&kobj->kref.refcount) == 0);
558     +
559     + return kobj->sd && kobj->sd->dir.subdirs;
560     +}
561     +
562     struct kobj_type {
563     void (*release)(struct kobject *kobj);
564     const struct sysfs_ops *sysfs_ops;
565     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
566     index f254982e1a8f..2ecf0f32444e 100644
567     --- a/include/linux/netdevice.h
568     +++ b/include/linux/netdevice.h
569     @@ -1368,6 +1368,7 @@ struct net_device_ops {
570     * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
571     * entity (i.e. the master device for bridged veth)
572     * @IFF_MACSEC: device is a MACsec device
573     + * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
574     */
575     enum netdev_priv_flags {
576     IFF_802_1Q_VLAN = 1<<0,
577     @@ -1398,6 +1399,7 @@ enum netdev_priv_flags {
578     IFF_RXFH_CONFIGURED = 1<<25,
579     IFF_PHONY_HEADROOM = 1<<26,
580     IFF_MACSEC = 1<<27,
581     + IFF_L3MDEV_RX_HANDLER = 1<<28,
582     };
583    
584     #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
585     @@ -1427,6 +1429,7 @@ enum netdev_priv_flags {
586     #define IFF_TEAM IFF_TEAM
587     #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
588     #define IFF_MACSEC IFF_MACSEC
589     +#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
590    
591     /**
592     * struct net_device - The DEVICE structure.
593     @@ -4244,6 +4247,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
594     return dev->priv_flags & IFF_SUPP_NOFCS;
595     }
596    
597     +static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
598     +{
599     + return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
600     +}
601     +
602     static inline bool netif_is_l3_master(const struct net_device *dev)
603     {
604     return dev->priv_flags & IFF_L3MDEV_MASTER;
605     diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
606     index 3832099289c5..128487658ff7 100644
607     --- a/include/net/l3mdev.h
608     +++ b/include/net/l3mdev.h
609     @@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
610    
611     if (netif_is_l3_slave(skb->dev))
612     master = netdev_master_upper_dev_get_rcu(skb->dev);
613     - else if (netif_is_l3_master(skb->dev))
614     + else if (netif_is_l3_master(skb->dev) ||
615     + netif_has_l3_rx_handler(skb->dev))
616     master = skb->dev;
617    
618     if (master && master->l3mdev_ops->l3mdev_l3_rcv)
619     diff --git a/kernel/exit.c b/kernel/exit.c
620     index 6dd7ff4b337a..d9394fcd0e2c 100644
621     --- a/kernel/exit.c
622     +++ b/kernel/exit.c
623     @@ -525,12 +525,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
624     return NULL;
625     }
626    
627     -static struct task_struct *find_child_reaper(struct task_struct *father)
628     +static struct task_struct *find_child_reaper(struct task_struct *father,
629     + struct list_head *dead)
630     __releases(&tasklist_lock)
631     __acquires(&tasklist_lock)
632     {
633     struct pid_namespace *pid_ns = task_active_pid_ns(father);
634     struct task_struct *reaper = pid_ns->child_reaper;
635     + struct task_struct *p, *n;
636    
637     if (likely(reaper != father))
638     return reaper;
639     @@ -546,6 +548,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
640     panic("Attempted to kill init! exitcode=0x%08x\n",
641     father->signal->group_exit_code ?: father->exit_code);
642     }
643     +
644     + list_for_each_entry_safe(p, n, dead, ptrace_entry) {
645     + list_del_init(&p->ptrace_entry);
646     + release_task(p);
647     + }
648     +
649     zap_pid_ns_processes(pid_ns);
650     write_lock_irq(&tasklist_lock);
651    
652     @@ -632,7 +640,7 @@ static void forget_original_parent(struct task_struct *father,
653     exit_ptrace(father, dead);
654    
655     /* Can drop and reacquire tasklist_lock */
656     - reaper = find_child_reaper(father);
657     + reaper = find_child_reaper(father, dead);
658     if (list_empty(&father->children))
659     return;
660    
661     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
662     index 851efb004857..4f1f5fd12042 100644
663     --- a/mm/memory-failure.c
664     +++ b/mm/memory-failure.c
665     @@ -336,7 +336,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
666     if (fail || tk->addr_valid == 0) {
667     pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
668     pfn, tk->tsk->comm, tk->tsk->pid);
669     - force_sig(SIGKILL, tk->tsk);
670     + do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
671     + tk->tsk, PIDTYPE_PID);
672     }
673    
674     /*
675     diff --git a/mm/migrate.c b/mm/migrate.c
676     index 821623fc7091..b08c1a4a1c22 100644
677     --- a/mm/migrate.c
678     +++ b/mm/migrate.c
679     @@ -1044,10 +1044,13 @@ out:
680     * If migration is successful, decrease refcount of the newpage
681     * which will not free the page because new page owner increased
682     * refcounter. As well, if it is LRU page, add the page to LRU
683     - * list in here.
684     + * list in here. Use the old state of the isolated source page to
685     + * determine if we migrated a LRU page. newpage was already unlocked
686     + * and possibly modified by its owner - don't rely on the page
687     + * state.
688     */
689     if (rc == MIGRATEPAGE_SUCCESS) {
690     - if (unlikely(__PageMovable(newpage)))
691     + if (unlikely(!is_lru))
692     put_page(newpage);
693     else
694     putback_lru_page(newpage);
695     diff --git a/mm/oom_kill.c b/mm/oom_kill.c
696     index 4a184157cc3d..1de3695cb419 100644
697     --- a/mm/oom_kill.c
698     +++ b/mm/oom_kill.c
699     @@ -861,6 +861,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
700     * still freeing memory.
701     */
702     read_lock(&tasklist_lock);
703     +
704     + /*
705     + * The task 'p' might have already exited before reaching here. The
706     + * put_task_struct() will free task_struct 'p' while the loop still try
707     + * to access the field of 'p', so, get an extra reference.
708     + */
709     + get_task_struct(p);
710     for_each_thread(p, t) {
711     list_for_each_entry(child, &t->children, sibling) {
712     unsigned int child_points;
713     @@ -880,6 +887,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
714     }
715     }
716     }
717     + put_task_struct(p);
718     read_unlock(&tasklist_lock);
719    
720     p = find_lock_task_mm(victim);
721     diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
722     index 496f8d86b503..c7334d1e392a 100644
723     --- a/net/ipv4/ip_fragment.c
724     +++ b/net/ipv4/ip_fragment.c
725     @@ -423,6 +423,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
726     * fragment.
727     */
728    
729     + err = -EINVAL;
730     /* Find out where to put this fragment. */
731     prev_tail = qp->q.fragments_tail;
732     if (!prev_tail)
733     @@ -499,7 +500,6 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
734    
735     discard_qp:
736     inet_frag_kill(&qp->q);
737     - err = -EINVAL;
738     __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
739     err:
740     kfree_skb(skb);
741     diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
742     index c81b2c5caf26..8885dbad217b 100644
743     --- a/net/ipv6/af_inet6.c
744     +++ b/net/ipv6/af_inet6.c
745     @@ -359,6 +359,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
746     err = -EINVAL;
747     goto out_unlock;
748     }
749     + }
750     +
751     + if (sk->sk_bound_dev_if) {
752     dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
753     if (!dev) {
754     err = -ENODEV;
755     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
756     index b96dbe38ecad..4ae758bcb2cf 100644
757     --- a/net/l2tp/l2tp_core.c
758     +++ b/net/l2tp/l2tp_core.c
759     @@ -83,8 +83,7 @@
760     #define L2TP_SLFLAG_S 0x40000000
761     #define L2TP_SL_SEQ_MASK 0x00ffffff
762    
763     -#define L2TP_HDR_SIZE_SEQ 10
764     -#define L2TP_HDR_SIZE_NOSEQ 6
765     +#define L2TP_HDR_SIZE_MAX 14
766    
767     /* Default trace flags */
768     #define L2TP_DEFAULT_DEBUG_FLAGS 0
769     @@ -796,11 +795,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
770     "%s: recv data ns=%u, session nr=%u\n",
771     session->name, ns, session->nr);
772     }
773     + ptr += 4;
774     }
775    
776     - /* Advance past L2-specific header, if present */
777     - ptr += session->l2specific_len;
778     -
779     if (L2TP_SKB_CB(skb)->has_seq) {
780     /* Received a packet with sequence numbers. If we're the LNS,
781     * check if we sre sending sequence numbers and if not,
782     @@ -944,7 +941,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
783     __skb_pull(skb, sizeof(struct udphdr));
784    
785     /* Short packet? */
786     - if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
787     + if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
788     l2tp_info(tunnel, L2TP_MSG_DATA,
789     "%s: recv short packet (len=%d)\n",
790     tunnel->name, skb->len);
791     @@ -1023,6 +1020,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
792     goto error;
793     }
794    
795     + if (tunnel->version == L2TP_HDR_VER_3 &&
796     + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
797     + goto error;
798     +
799     l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
800     l2tp_session_dec_refcount(session);
801    
802     @@ -1122,21 +1123,20 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
803     memcpy(bufp, &session->cookie[0], session->cookie_len);
804     bufp += session->cookie_len;
805     }
806     - if (session->l2specific_len) {
807     - if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
808     - u32 l2h = 0;
809     - if (session->send_seq) {
810     - l2h = 0x40000000 | session->ns;
811     - session->ns++;
812     - session->ns &= 0xffffff;
813     - l2tp_dbg(session, L2TP_MSG_SEQ,
814     - "%s: updated ns to %u\n",
815     - session->name, session->ns);
816     - }
817     + if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
818     + u32 l2h = 0;
819    
820     - *((__be32 *) bufp) = htonl(l2h);
821     + if (session->send_seq) {
822     + l2h = 0x40000000 | session->ns;
823     + session->ns++;
824     + session->ns &= 0xffffff;
825     + l2tp_dbg(session, L2TP_MSG_SEQ,
826     + "%s: updated ns to %u\n",
827     + session->name, session->ns);
828     }
829     - bufp += session->l2specific_len;
830     +
831     + *((__be32 *)bufp) = htonl(l2h);
832     + bufp += 4;
833     }
834    
835     return bufp - optr;
836     @@ -1813,7 +1813,7 @@ int l2tp_session_delete(struct l2tp_session *session)
837     EXPORT_SYMBOL_GPL(l2tp_session_delete);
838    
839     /* We come here whenever a session's send_seq, cookie_len or
840     - * l2specific_len parameters are set.
841     + * l2specific_type parameters are set.
842     */
843     void l2tp_session_set_header_len(struct l2tp_session *session, int version)
844     {
845     @@ -1822,7 +1822,8 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
846     if (session->send_seq)
847     session->hdr_len += 4;
848     } else {
849     - session->hdr_len = 4 + session->cookie_len + session->l2specific_len;
850     + session->hdr_len = 4 + session->cookie_len;
851     + session->hdr_len += l2tp_get_l2specific_len(session);
852     if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
853     session->hdr_len += 4;
854     }
855     diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
856     index 86356a23a0a7..7cc49715606e 100644
857     --- a/net/l2tp/l2tp_core.h
858     +++ b/net/l2tp/l2tp_core.h
859     @@ -314,6 +314,37 @@ do { \
860     #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
861     #endif
862    
863     +static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
864     +{
865     + switch (session->l2specific_type) {
866     + case L2TP_L2SPECTYPE_DEFAULT:
867     + return 4;
868     + case L2TP_L2SPECTYPE_NONE:
869     + default:
870     + return 0;
871     + }
872     +}
873     +
874     +static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
875     + unsigned char **ptr, unsigned char **optr)
876     +{
877     + int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
878     +
879     + if (opt_len > 0) {
880     + int off = *ptr - *optr;
881     +
882     + if (!pskb_may_pull(skb, off + opt_len))
883     + return -1;
884     +
885     + if (skb->data != *optr) {
886     + *optr = skb->data;
887     + *ptr = skb->data + off;
888     + }
889     + }
890     +
891     + return 0;
892     +}
893     +
894     #define l2tp_printk(ptr, type, func, fmt, ...) \
895     do { \
896     if (((ptr)->debug) & (type)) \
897     diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
898     index 9d77a54e8854..03a696d3bcd9 100644
899     --- a/net/l2tp/l2tp_ip.c
900     +++ b/net/l2tp/l2tp_ip.c
901     @@ -157,6 +157,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
902     print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
903     }
904    
905     + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
906     + goto discard_sess;
907     +
908     l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
909     l2tp_session_dec_refcount(session);
910    
911     diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
912     index 247097289fd0..5e6d09863480 100644
913     --- a/net/l2tp/l2tp_ip6.c
914     +++ b/net/l2tp/l2tp_ip6.c
915     @@ -169,6 +169,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
916     print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
917     }
918    
919     + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
920     + goto discard_sess;
921     +
922     l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
923     tunnel->recv_payload_hook);
924     l2tp_session_dec_refcount(session);
925     diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
926     index 94d05806a9a2..f0ecaec1ff3d 100644
927     --- a/net/netrom/nr_timer.c
928     +++ b/net/netrom/nr_timer.c
929     @@ -53,21 +53,21 @@ void nr_start_t1timer(struct sock *sk)
930     {
931     struct nr_sock *nr = nr_sk(sk);
932    
933     - mod_timer(&nr->t1timer, jiffies + nr->t1);
934     + sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
935     }
936    
937     void nr_start_t2timer(struct sock *sk)
938     {
939     struct nr_sock *nr = nr_sk(sk);
940    
941     - mod_timer(&nr->t2timer, jiffies + nr->t2);
942     + sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
943     }
944    
945     void nr_start_t4timer(struct sock *sk)
946     {
947     struct nr_sock *nr = nr_sk(sk);
948    
949     - mod_timer(&nr->t4timer, jiffies + nr->t4);
950     + sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
951     }
952    
953     void nr_start_idletimer(struct sock *sk)
954     @@ -75,37 +75,37 @@ void nr_start_idletimer(struct sock *sk)
955     struct nr_sock *nr = nr_sk(sk);
956    
957     if (nr->idle > 0)
958     - mod_timer(&nr->idletimer, jiffies + nr->idle);
959     + sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
960     }
961    
962     void nr_start_heartbeat(struct sock *sk)
963     {
964     - mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
965     + sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
966     }
967    
968     void nr_stop_t1timer(struct sock *sk)
969     {
970     - del_timer(&nr_sk(sk)->t1timer);
971     + sk_stop_timer(sk, &nr_sk(sk)->t1timer);
972     }
973    
974     void nr_stop_t2timer(struct sock *sk)
975     {
976     - del_timer(&nr_sk(sk)->t2timer);
977     + sk_stop_timer(sk, &nr_sk(sk)->t2timer);
978     }
979    
980     void nr_stop_t4timer(struct sock *sk)
981     {
982     - del_timer(&nr_sk(sk)->t4timer);
983     + sk_stop_timer(sk, &nr_sk(sk)->t4timer);
984     }
985    
986     void nr_stop_idletimer(struct sock *sk)
987     {
988     - del_timer(&nr_sk(sk)->idletimer);
989     + sk_stop_timer(sk, &nr_sk(sk)->idletimer);
990     }
991    
992     void nr_stop_heartbeat(struct sock *sk)
993     {
994     - del_timer(&sk->sk_timer);
995     + sk_stop_timer(sk, &sk->sk_timer);
996     }
997    
998     int nr_t1timer_running(struct sock *sk)
999     diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
1000     index 0fc76d845103..9f704a7f2a28 100644
1001     --- a/net/rose/rose_route.c
1002     +++ b/net/rose/rose_route.c
1003     @@ -848,6 +848,7 @@ void rose_link_device_down(struct net_device *dev)
1004    
1005     /*
1006     * Route a frame to an appropriate AX.25 connection.
1007     + * A NULL ax25_cb indicates an internally generated frame.
1008     */
1009     int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
1010     {
1011     @@ -865,6 +866,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
1012    
1013     if (skb->len < ROSE_MIN_LEN)
1014     return res;
1015     +
1016     + if (!ax25)
1017     + return rose_loopback_queue(skb, NULL);
1018     +
1019     frametype = skb->data[2];
1020     lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
1021     if (frametype == ROSE_CALL_REQUEST &&