Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0126-4.9.27-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2956 - (hide annotations) (download)
Mon Jul 24 12:03:46 2017 UTC (6 years, 9 months ago) by niro
File size: 29538 byte(s)
-added patches-4.9
1 niro 2956 diff --git a/Makefile b/Makefile
2     index c09679c1a70d..35d6b4e76264 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 26
9     +SUBLEVEL = 27
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
14     index 3a9149cf0110..d0ac2d56520f 100644
15     --- a/drivers/char/tpm/tpm-interface.c
16     +++ b/drivers/char/tpm/tpm-interface.c
17     @@ -489,8 +489,7 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
18     int tpm_get_timeouts(struct tpm_chip *chip)
19     {
20     struct tpm_cmd_t tpm_cmd;
21     - unsigned long new_timeout[4];
22     - unsigned long old_timeout[4];
23     + unsigned long timeout_old[4], timeout_chip[4], timeout_eff[4];
24     struct duration_t *duration_cap;
25     ssize_t rc;
26    
27     @@ -542,11 +541,15 @@ int tpm_get_timeouts(struct tpm_chip *chip)
28     != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
29     return -EINVAL;
30    
31     - old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
32     - old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
33     - old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
34     - old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
35     - memcpy(new_timeout, old_timeout, sizeof(new_timeout));
36     + timeout_old[0] = jiffies_to_usecs(chip->timeout_a);
37     + timeout_old[1] = jiffies_to_usecs(chip->timeout_b);
38     + timeout_old[2] = jiffies_to_usecs(chip->timeout_c);
39     + timeout_old[3] = jiffies_to_usecs(chip->timeout_d);
40     + timeout_chip[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
41     + timeout_chip[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
42     + timeout_chip[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
43     + timeout_chip[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
44     + memcpy(timeout_eff, timeout_chip, sizeof(timeout_eff));
45    
46     /*
47     * Provide ability for vendor overrides of timeout values in case
48     @@ -554,16 +557,24 @@ int tpm_get_timeouts(struct tpm_chip *chip)
49     */
50     if (chip->ops->update_timeouts != NULL)
51     chip->timeout_adjusted =
52     - chip->ops->update_timeouts(chip, new_timeout);
53     + chip->ops->update_timeouts(chip, timeout_eff);
54    
55     if (!chip->timeout_adjusted) {
56     - /* Don't overwrite default if value is 0 */
57     - if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
58     - int i;
59     + /* Restore default if chip reported 0 */
60     + int i;
61    
62     + for (i = 0; i < ARRAY_SIZE(timeout_eff); i++) {
63     + if (timeout_eff[i])
64     + continue;
65     +
66     + timeout_eff[i] = timeout_old[i];
67     + chip->timeout_adjusted = true;
68     + }
69     +
70     + if (timeout_eff[0] != 0 && timeout_eff[0] < 1000) {
71     /* timeouts in msec rather usec */
72     - for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
73     - new_timeout[i] *= 1000;
74     + for (i = 0; i != ARRAY_SIZE(timeout_eff); i++)
75     + timeout_eff[i] *= 1000;
76     chip->timeout_adjusted = true;
77     }
78     }
79     @@ -572,16 +583,16 @@ int tpm_get_timeouts(struct tpm_chip *chip)
80     if (chip->timeout_adjusted) {
81     dev_info(&chip->dev,
82     HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
83     - old_timeout[0], new_timeout[0],
84     - old_timeout[1], new_timeout[1],
85     - old_timeout[2], new_timeout[2],
86     - old_timeout[3], new_timeout[3]);
87     + timeout_chip[0], timeout_eff[0],
88     + timeout_chip[1], timeout_eff[1],
89     + timeout_chip[2], timeout_eff[2],
90     + timeout_chip[3], timeout_eff[3]);
91     }
92    
93     - chip->timeout_a = usecs_to_jiffies(new_timeout[0]);
94     - chip->timeout_b = usecs_to_jiffies(new_timeout[1]);
95     - chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
96     - chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
97     + chip->timeout_a = usecs_to_jiffies(timeout_eff[0]);
98     + chip->timeout_b = usecs_to_jiffies(timeout_eff[1]);
99     + chip->timeout_c = usecs_to_jiffies(timeout_eff[2]);
100     + chip->timeout_d = usecs_to_jiffies(timeout_eff[3]);
101    
102     duration:
103     tpm_cmd.header.in = tpm_getcap_header;
104     diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
105     index 43146162c122..b99c1df48156 100644
106     --- a/drivers/hwmon/it87.c
107     +++ b/drivers/hwmon/it87.c
108     @@ -3115,7 +3115,7 @@ static int __init sm_it87_init(void)
109     {
110     int sioaddr[2] = { REG_2E, REG_4E };
111     struct it87_sio_data sio_data;
112     - unsigned short isa_address;
113     + unsigned short isa_address[2];
114     bool found = false;
115     int i, err;
116    
117     @@ -3125,15 +3125,29 @@ static int __init sm_it87_init(void)
118    
119     for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
120     memset(&sio_data, 0, sizeof(struct it87_sio_data));
121     - isa_address = 0;
122     - err = it87_find(sioaddr[i], &isa_address, &sio_data);
123     - if (err || isa_address == 0)
124     + isa_address[i] = 0;
125     + err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
126     + if (err || isa_address[i] == 0)
127     continue;
128     + /*
129     + * Don't register second chip if its ISA address matches
130     + * the first chip's ISA address.
131     + */
132     + if (i && isa_address[i] == isa_address[0])
133     + break;
134    
135     - err = it87_device_add(i, isa_address, &sio_data);
136     + err = it87_device_add(i, isa_address[i], &sio_data);
137     if (err)
138     goto exit_dev_unregister;
139     +
140     found = true;
141     +
142     + /*
143     + * IT8705F may respond on both SIO addresses.
144     + * Stop probing after finding one.
145     + */
146     + if (sio_data.type == it87)
147     + break;
148     }
149    
150     if (!found) {
151     diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
152     index 966eb4b61aed..a68c650aad11 100644
153     --- a/drivers/md/dm-ioctl.c
154     +++ b/drivers/md/dm-ioctl.c
155     @@ -1847,7 +1847,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
156     if (r)
157     goto out;
158    
159     - param->data_size = sizeof(*param);
160     + param->data_size = offsetof(struct dm_ioctl, data);
161     r = fn(param, input_param_size);
162    
163     if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
164     diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
165     index 3f218f5cf29b..c5ab1b0037fc 100644
166     --- a/drivers/scsi/storvsc_drv.c
167     +++ b/drivers/scsi/storvsc_drv.c
168     @@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels")
169     */
170     static int storvsc_timeout = 180;
171    
172     -static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
173     -
174     #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
175     static struct scsi_transport_template *fc_transport_template;
176     #endif
177     @@ -1283,6 +1281,22 @@ static int storvsc_do_io(struct hv_device *device,
178     return ret;
179     }
180    
181     +static int storvsc_device_alloc(struct scsi_device *sdevice)
182     +{
183     + /*
184     + * Set blist flag to permit the reading of the VPD pages even when
185     + * the target may claim SPC-2 compliance. MSFT targets currently
186     + * claim SPC-2 compliance while they implement post SPC-2 features.
187     + * With this flag we can correctly handle WRITE_SAME_16 issues.
188     + *
189     + * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
190     + * still supports REPORT LUN.
191     + */
192     + sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
193     +
194     + return 0;
195     +}
196     +
197     static int storvsc_device_configure(struct scsi_device *sdevice)
198     {
199    
200     @@ -1298,14 +1312,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
201     sdevice->no_write_same = 1;
202    
203     /*
204     - * Add blist flags to permit the reading of the VPD pages even when
205     - * the target may claim SPC-2 compliance. MSFT targets currently
206     - * claim SPC-2 compliance while they implement post SPC-2 features.
207     - * With this patch we can correctly handle WRITE_SAME_16 issues.
208     - */
209     - sdevice->sdev_bflags |= msft_blist_flags;
210     -
211     - /*
212     * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
213     * if the device is a MSFT virtual device. If the host is
214     * WIN10 or newer, allow write_same.
215     @@ -1569,6 +1575,7 @@ static struct scsi_host_template scsi_driver = {
216     .eh_host_reset_handler = storvsc_host_reset_handler,
217     .proc_name = "storvsc_host",
218     .eh_timed_out = storvsc_eh_timed_out,
219     + .slave_alloc = storvsc_device_alloc,
220     .slave_configure = storvsc_device_configure,
221     .cmd_per_lun = 255,
222     .this_id = -1,
223     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
224     index 6e3e63675e56..22d32d295c5b 100644
225     --- a/drivers/tty/serial/8250/8250_pci.c
226     +++ b/drivers/tty/serial/8250/8250_pci.c
227     @@ -5621,17 +5621,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
228     static void serial8250_io_resume(struct pci_dev *dev)
229     {
230     struct serial_private *priv = pci_get_drvdata(dev);
231     - const struct pciserial_board *board;
232     + struct serial_private *new;
233    
234     if (!priv)
235     return;
236    
237     - board = priv->board;
238     - kfree(priv);
239     - priv = pciserial_init_ports(dev, board);
240     -
241     - if (!IS_ERR(priv)) {
242     - pci_set_drvdata(dev, priv);
243     + new = pciserial_init_ports(dev, priv->board);
244     + if (!IS_ERR(new)) {
245     + pci_set_drvdata(dev, new);
246     + kfree(priv);
247     }
248     }
249    
250     diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
251     index 1e643c718917..18dc18f8af2c 100644
252     --- a/fs/ceph/addr.c
253     +++ b/fs/ceph/addr.c
254     @@ -315,7 +315,32 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
255     struct page **pages;
256     pgoff_t next_index;
257     int nr_pages = 0;
258     - int ret;
259     + int got = 0;
260     + int ret = 0;
261     +
262     + if (!current->journal_info) {
263     + /* caller of readpages does not hold buffer and read caps
264     + * (fadvise, madvise and readahead cases) */
265     + int want = CEPH_CAP_FILE_CACHE;
266     + ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
267     + if (ret < 0) {
268     + dout("start_read %p, error getting cap\n", inode);
269     + } else if (!(got & want)) {
270     + dout("start_read %p, no cache cap\n", inode);
271     + ret = 0;
272     + }
273     + if (ret <= 0) {
274     + if (got)
275     + ceph_put_cap_refs(ci, got);
276     + while (!list_empty(page_list)) {
277     + page = list_entry(page_list->prev,
278     + struct page, lru);
279     + list_del(&page->lru);
280     + put_page(page);
281     + }
282     + return ret;
283     + }
284     + }
285    
286     off = (u64) page_offset(page);
287    
288     @@ -338,15 +363,18 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
289     CEPH_OSD_FLAG_READ, NULL,
290     ci->i_truncate_seq, ci->i_truncate_size,
291     false);
292     - if (IS_ERR(req))
293     - return PTR_ERR(req);
294     + if (IS_ERR(req)) {
295     + ret = PTR_ERR(req);
296     + goto out;
297     + }
298    
299     /* build page vector */
300     nr_pages = calc_pages_for(0, len);
301     pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
302     - ret = -ENOMEM;
303     - if (!pages)
304     - goto out;
305     + if (!pages) {
306     + ret = -ENOMEM;
307     + goto out_put;
308     + }
309     for (i = 0; i < nr_pages; ++i) {
310     page = list_entry(page_list->prev, struct page, lru);
311     BUG_ON(PageLocked(page));
312     @@ -379,6 +407,12 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
313     if (ret < 0)
314     goto out_pages;
315     ceph_osdc_put_request(req);
316     +
317     + /* After adding locked pages to page cache, the inode holds cache cap.
318     + * So we can drop our cap refs. */
319     + if (got)
320     + ceph_put_cap_refs(ci, got);
321     +
322     return nr_pages;
323    
324     out_pages:
325     @@ -387,8 +421,11 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
326     unlock_page(pages[i]);
327     }
328     ceph_put_page_vector(pages, nr_pages, false);
329     -out:
330     +out_put:
331     ceph_osdc_put_request(req);
332     +out:
333     + if (got)
334     + ceph_put_cap_refs(ci, got);
335     return ret;
336     }
337    
338     @@ -425,7 +462,6 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
339     rc = start_read(inode, page_list, max);
340     if (rc < 0)
341     goto out;
342     - BUG_ON(rc == 0);
343     }
344     out:
345     ceph_fscache_readpages_cancel(inode, page_list);
346     @@ -1372,9 +1408,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
347     inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
348    
349     if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
350     - ci->i_inline_version == CEPH_INLINE_NONE)
351     + ci->i_inline_version == CEPH_INLINE_NONE) {
352     + current->journal_info = vma->vm_file;
353     ret = filemap_fault(vma, vmf);
354     - else
355     + current->journal_info = NULL;
356     + } else
357     ret = -EAGAIN;
358    
359     dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
360     diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
361     index f3f21105b860..03951f90ecf7 100644
362     --- a/fs/ceph/caps.c
363     +++ b/fs/ceph/caps.c
364     @@ -2479,6 +2479,27 @@ static void check_max_size(struct inode *inode, loff_t endoff)
365     ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
366     }
367    
368     +int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
369     +{
370     + int ret, err = 0;
371     +
372     + BUG_ON(need & ~CEPH_CAP_FILE_RD);
373     + BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
374     + ret = ceph_pool_perm_check(ci, need);
375     + if (ret < 0)
376     + return ret;
377     +
378     + ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
379     + if (ret) {
380     + if (err == -EAGAIN) {
381     + ret = 0;
382     + } else if (err < 0) {
383     + ret = err;
384     + }
385     + }
386     + return ret;
387     +}
388     +
389     /*
390     * Wait for caps, and take cap references. If we can't get a WR cap
391     * due to a small max_size, make sure we check_max_size (and possibly
392     diff --git a/fs/ceph/file.c b/fs/ceph/file.c
393     index f995e3528a33..ca3f630db90f 100644
394     --- a/fs/ceph/file.c
395     +++ b/fs/ceph/file.c
396     @@ -1249,8 +1249,9 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
397     dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
398     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
399     ceph_cap_string(got));
400     -
401     + current->journal_info = filp;
402     ret = generic_file_read_iter(iocb, to);
403     + current->journal_info = NULL;
404     }
405     dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
406     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
407     diff --git a/fs/ceph/super.h b/fs/ceph/super.h
408     index 3e3fa9163059..622d5dd9f616 100644
409     --- a/fs/ceph/super.h
410     +++ b/fs/ceph/super.h
411     @@ -905,6 +905,8 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
412    
413     extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
414     loff_t endoff, int *got, struct page **pinned_page);
415     +extern int ceph_try_get_caps(struct ceph_inode_info *ci,
416     + int need, int want, int *got);
417    
418     /* for counting open files by mode */
419     extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
420     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
421     index 94661cf77ae8..b3830f7ab260 100644
422     --- a/fs/cifs/cifsglob.h
423     +++ b/fs/cifs/cifsglob.h
424     @@ -241,6 +241,7 @@ struct smb_version_operations {
425     /* verify the message */
426     int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
427     bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
428     + int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
429     void (*downgrade_oplock)(struct TCP_Server_Info *,
430     struct cifsInodeInfo *, bool);
431     /* process transaction2 response */
432     @@ -1314,12 +1315,19 @@ struct mid_q_entry {
433     void *callback_data; /* general purpose pointer for callback */
434     void *resp_buf; /* pointer to received SMB header */
435     int mid_state; /* wish this were enum but can not pass to wait_event */
436     + unsigned int mid_flags;
437     __le16 command; /* smb command code */
438     bool large_buf:1; /* if valid response, is pointer to large buf */
439     bool multiRsp:1; /* multiple trans2 responses for one request */
440     bool multiEnd:1; /* both received */
441     };
442    
443     +struct close_cancelled_open {
444     + struct cifs_fid fid;
445     + struct cifs_tcon *tcon;
446     + struct work_struct work;
447     +};
448     +
449     /* Make code in transport.c a little cleaner by moving
450     update of optional stats into function below */
451     #ifdef CONFIG_CIFS_STATS2
452     @@ -1451,6 +1459,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
453     #define MID_RESPONSE_MALFORMED 0x10
454     #define MID_SHUTDOWN 0x20
455    
456     +/* Flags */
457     +#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
458     +
459     /* Types of response buffer returned from SendReceive2 */
460     #define CIFS_NO_BUFFER 0 /* Response buffer not returned */
461     #define CIFS_SMALL_BUFFER 1
462     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
463     index e3fed9249a04..586fdac05ec2 100644
464     --- a/fs/cifs/cifssmb.c
465     +++ b/fs/cifs/cifssmb.c
466     @@ -1423,6 +1423,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
467    
468     length = discard_remaining_data(server);
469     dequeue_mid(mid, rdata->result);
470     + mid->resp_buf = server->smallbuf;
471     + server->smallbuf = NULL;
472     return length;
473     }
474    
475     @@ -1534,6 +1536,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
476     return cifs_readv_discard(server, mid);
477    
478     dequeue_mid(mid, false);
479     + mid->resp_buf = server->smallbuf;
480     + server->smallbuf = NULL;
481     return length;
482     }
483    
484     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
485     index 893be0722643..b8015de88e8c 100644
486     --- a/fs/cifs/connect.c
487     +++ b/fs/cifs/connect.c
488     @@ -882,10 +882,19 @@ cifs_demultiplex_thread(void *p)
489    
490     server->lstrp = jiffies;
491     if (mid_entry != NULL) {
492     + if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
493     + mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
494     + server->ops->handle_cancelled_mid)
495     + server->ops->handle_cancelled_mid(
496     + mid_entry->resp_buf,
497     + server);
498     +
499     if (!mid_entry->multiRsp || mid_entry->multiEnd)
500     mid_entry->callback(mid_entry);
501     - } else if (!server->ops->is_oplock_break ||
502     - !server->ops->is_oplock_break(buf, server)) {
503     + } else if (server->ops->is_oplock_break &&
504     + server->ops->is_oplock_break(buf, server)) {
505     + cifs_dbg(FYI, "Received oplock break\n");
506     + } else {
507     cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
508     atomic_read(&midCount));
509     cifs_dump_mem("Received Data is: ", buf,
510     diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
511     index 3d383489b9cf..97307808ae42 100644
512     --- a/fs/cifs/smb2misc.c
513     +++ b/fs/cifs/smb2misc.c
514     @@ -654,3 +654,47 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
515     cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
516     return false;
517     }
518     +
519     +void
520     +smb2_cancelled_close_fid(struct work_struct *work)
521     +{
522     + struct close_cancelled_open *cancelled = container_of(work,
523     + struct close_cancelled_open, work);
524     +
525     + cifs_dbg(VFS, "Close unmatched open\n");
526     +
527     + SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
528     + cancelled->fid.volatile_fid);
529     + cifs_put_tcon(cancelled->tcon);
530     + kfree(cancelled);
531     +}
532     +
533     +int
534     +smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
535     +{
536     + struct smb2_hdr *hdr = (struct smb2_hdr *)buffer;
537     + struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
538     + struct cifs_tcon *tcon;
539     + struct close_cancelled_open *cancelled;
540     +
541     + if (hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS)
542     + return 0;
543     +
544     + cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
545     + if (!cancelled)
546     + return -ENOMEM;
547     +
548     + tcon = smb2_find_smb_tcon(server, hdr->SessionId, hdr->TreeId);
549     + if (!tcon) {
550     + kfree(cancelled);
551     + return -ENOENT;
552     + }
553     +
554     + cancelled->fid.persistent_fid = rsp->PersistentFileId;
555     + cancelled->fid.volatile_fid = rsp->VolatileFileId;
556     + cancelled->tcon = tcon;
557     + INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
558     + queue_work(cifsiod_wq, &cancelled->work);
559     +
560     + return 0;
561     +}
562     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
563     index 5d456ebb3813..007abf7195af 100644
564     --- a/fs/cifs/smb2ops.c
565     +++ b/fs/cifs/smb2ops.c
566     @@ -1565,6 +1565,7 @@ struct smb_version_operations smb20_operations = {
567     .clear_stats = smb2_clear_stats,
568     .print_stats = smb2_print_stats,
569     .is_oplock_break = smb2_is_valid_oplock_break,
570     + .handle_cancelled_mid = smb2_handle_cancelled_mid,
571     .downgrade_oplock = smb2_downgrade_oplock,
572     .need_neg = smb2_need_neg,
573     .negotiate = smb2_negotiate,
574     @@ -1645,6 +1646,7 @@ struct smb_version_operations smb21_operations = {
575     .clear_stats = smb2_clear_stats,
576     .print_stats = smb2_print_stats,
577     .is_oplock_break = smb2_is_valid_oplock_break,
578     + .handle_cancelled_mid = smb2_handle_cancelled_mid,
579     .downgrade_oplock = smb2_downgrade_oplock,
580     .need_neg = smb2_need_neg,
581     .negotiate = smb2_negotiate,
582     @@ -1727,6 +1729,7 @@ struct smb_version_operations smb30_operations = {
583     .print_stats = smb2_print_stats,
584     .dump_share_caps = smb2_dump_share_caps,
585     .is_oplock_break = smb2_is_valid_oplock_break,
586     + .handle_cancelled_mid = smb2_handle_cancelled_mid,
587     .downgrade_oplock = smb2_downgrade_oplock,
588     .need_neg = smb2_need_neg,
589     .negotiate = smb2_negotiate,
590     @@ -1815,6 +1818,7 @@ struct smb_version_operations smb311_operations = {
591     .print_stats = smb2_print_stats,
592     .dump_share_caps = smb2_dump_share_caps,
593     .is_oplock_break = smb2_is_valid_oplock_break,
594     + .handle_cancelled_mid = smb2_handle_cancelled_mid,
595     .downgrade_oplock = smb2_downgrade_oplock,
596     .need_neg = smb2_need_neg,
597     .negotiate = smb2_negotiate,
598     diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
599     index f2d511a6971b..04ef6e914597 100644
600     --- a/fs/cifs/smb2proto.h
601     +++ b/fs/cifs/smb2proto.h
602     @@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
603     struct smb_rqst *rqst);
604     extern struct mid_q_entry *smb2_setup_async_request(
605     struct TCP_Server_Info *server, struct smb_rqst *rqst);
606     +extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
607     + __u64 ses_id);
608     +extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
609     + __u64 ses_id, __u32 tid);
610     extern int smb2_calc_signature(struct smb_rqst *rqst,
611     struct TCP_Server_Info *server);
612     extern int smb3_calc_signature(struct smb_rqst *rqst,
613     @@ -158,6 +162,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
614     extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
615     const u64 persistent_fid, const u64 volatile_fid,
616     const __u8 oplock_level);
617     +extern int smb2_handle_cancelled_mid(char *buffer,
618     + struct TCP_Server_Info *server);
619     +void smb2_cancelled_close_fid(struct work_struct *work);
620     extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
621     u64 persistent_file_id, u64 volatile_file_id,
622     struct kstatfs *FSData);
623     diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
624     index bc9a7b634643..390b0d0198f8 100644
625     --- a/fs/cifs/smb2transport.c
626     +++ b/fs/cifs/smb2transport.c
627     @@ -115,22 +115,68 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
628     }
629    
630     static struct cifs_ses *
631     -smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server)
632     +smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
633     {
634     struct cifs_ses *ses;
635    
636     - spin_lock(&cifs_tcp_ses_lock);
637     list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
638     - if (ses->Suid != smb2hdr->SessionId)
639     + if (ses->Suid != ses_id)
640     continue;
641     - spin_unlock(&cifs_tcp_ses_lock);
642     return ses;
643     }
644     +
645     + return NULL;
646     +}
647     +
648     +struct cifs_ses *
649     +smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
650     +{
651     + struct cifs_ses *ses;
652     +
653     + spin_lock(&cifs_tcp_ses_lock);
654     + ses = smb2_find_smb_ses_unlocked(server, ses_id);
655     spin_unlock(&cifs_tcp_ses_lock);
656    
657     + return ses;
658     +}
659     +
660     +static struct cifs_tcon *
661     +smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
662     +{
663     + struct cifs_tcon *tcon;
664     +
665     + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
666     + if (tcon->tid != tid)
667     + continue;
668     + ++tcon->tc_count;
669     + return tcon;
670     + }
671     +
672     return NULL;
673     }
674    
675     +/*
676     + * Obtain tcon corresponding to the tid in the given
677     + * cifs_ses
678     + */
679     +
680     +struct cifs_tcon *
681     +smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
682     +{
683     + struct cifs_ses *ses;
684     + struct cifs_tcon *tcon;
685     +
686     + spin_lock(&cifs_tcp_ses_lock);
687     + ses = smb2_find_smb_ses_unlocked(server, ses_id);
688     + if (!ses) {
689     + spin_unlock(&cifs_tcp_ses_lock);
690     + return NULL;
691     + }
692     + tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
693     + spin_unlock(&cifs_tcp_ses_lock);
694     +
695     + return tcon;
696     +}
697    
698     int
699     smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
700     @@ -142,7 +188,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
701     struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
702     struct cifs_ses *ses;
703    
704     - ses = smb2_find_smb_ses(smb2_pdu, server);
705     + ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
706     if (!ses) {
707     cifs_dbg(VFS, "%s: Could not find session\n", __func__);
708     return 0;
709     @@ -359,7 +405,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
710     struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
711     struct cifs_ses *ses;
712    
713     - ses = smb2_find_smb_ses(smb2_pdu, server);
714     + ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
715     if (!ses) {
716     cifs_dbg(VFS, "%s: Could not find session\n", __func__);
717     return 0;
718     diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
719     index 206a597b2293..cc26d4138d70 100644
720     --- a/fs/cifs/transport.c
721     +++ b/fs/cifs/transport.c
722     @@ -727,9 +727,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
723    
724     rc = wait_for_response(ses->server, midQ);
725     if (rc != 0) {
726     + cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
727     send_cancel(ses->server, buf, midQ);
728     spin_lock(&GlobalMid_Lock);
729     if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
730     + midQ->mid_flags |= MID_WAIT_CANCELLED;
731     midQ->callback = DeleteMidQEntry;
732     spin_unlock(&GlobalMid_Lock);
733     cifs_small_buf_release(buf);
734     diff --git a/fs/timerfd.c b/fs/timerfd.c
735     index 9ae4abb4110b..ab8dd1538381 100644
736     --- a/fs/timerfd.c
737     +++ b/fs/timerfd.c
738     @@ -40,6 +40,7 @@ struct timerfd_ctx {
739     short unsigned settime_flags; /* to show in fdinfo */
740     struct rcu_head rcu;
741     struct list_head clist;
742     + spinlock_t cancel_lock;
743     bool might_cancel;
744     };
745    
746     @@ -112,7 +113,7 @@ void timerfd_clock_was_set(void)
747     rcu_read_unlock();
748     }
749    
750     -static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
751     +static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
752     {
753     if (ctx->might_cancel) {
754     ctx->might_cancel = false;
755     @@ -122,6 +123,13 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
756     }
757     }
758    
759     +static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
760     +{
761     + spin_lock(&ctx->cancel_lock);
762     + __timerfd_remove_cancel(ctx);
763     + spin_unlock(&ctx->cancel_lock);
764     +}
765     +
766     static bool timerfd_canceled(struct timerfd_ctx *ctx)
767     {
768     if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
769     @@ -132,6 +140,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
770    
771     static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
772     {
773     + spin_lock(&ctx->cancel_lock);
774     if ((ctx->clockid == CLOCK_REALTIME ||
775     ctx->clockid == CLOCK_REALTIME_ALARM) &&
776     (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) {
777     @@ -141,9 +150,10 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
778     list_add_rcu(&ctx->clist, &cancel_list);
779     spin_unlock(&cancel_lock);
780     }
781     - } else if (ctx->might_cancel) {
782     - timerfd_remove_cancel(ctx);
783     + } else {
784     + __timerfd_remove_cancel(ctx);
785     }
786     + spin_unlock(&ctx->cancel_lock);
787     }
788    
789     static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
790     @@ -400,6 +410,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
791     return -ENOMEM;
792    
793     init_waitqueue_head(&ctx->wqh);
794     + spin_lock_init(&ctx->cancel_lock);
795     ctx->clockid = clockid;
796    
797     if (isalarm(ctx))
798     diff --git a/kernel/cpu.c b/kernel/cpu.c
799     index 217fd2e7f435..99c6c568bc55 100644
800     --- a/kernel/cpu.c
801     +++ b/kernel/cpu.c
802     @@ -1441,14 +1441,12 @@ static void cpuhp_store_callbacks(enum cpuhp_state state,
803     /* (Un)Install the callbacks for further cpu hotplug operations */
804     struct cpuhp_step *sp;
805    
806     - mutex_lock(&cpuhp_state_mutex);
807     sp = cpuhp_get_step(state);
808     sp->startup.single = startup;
809     sp->teardown.single = teardown;
810     sp->name = name;
811     sp->multi_instance = multi_instance;
812     INIT_HLIST_HEAD(&sp->list);
813     - mutex_unlock(&cpuhp_state_mutex);
814     }
815    
816     static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
817     @@ -1518,16 +1516,13 @@ static int cpuhp_reserve_state(enum cpuhp_state state)
818     {
819     enum cpuhp_state i;
820    
821     - mutex_lock(&cpuhp_state_mutex);
822     for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
823     if (cpuhp_ap_states[i].name)
824     continue;
825    
826     cpuhp_ap_states[i].name = "Reserved";
827     - mutex_unlock(&cpuhp_state_mutex);
828     return i;
829     }
830     - mutex_unlock(&cpuhp_state_mutex);
831     WARN(1, "No more dynamic states available for CPU hotplug\n");
832     return -ENOSPC;
833     }
834     @@ -1544,6 +1539,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
835     return -EINVAL;
836    
837     get_online_cpus();
838     + mutex_lock(&cpuhp_state_mutex);
839    
840     if (!invoke || !sp->startup.multi)
841     goto add_node;
842     @@ -1568,11 +1564,10 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
843     }
844     add_node:
845     ret = 0;
846     - mutex_lock(&cpuhp_state_mutex);
847     hlist_add_head(node, &sp->list);
848     - mutex_unlock(&cpuhp_state_mutex);
849    
850     err:
851     + mutex_unlock(&cpuhp_state_mutex);
852     put_online_cpus();
853     return ret;
854     }
855     @@ -1601,6 +1596,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
856     return -EINVAL;
857    
858     get_online_cpus();
859     + mutex_lock(&cpuhp_state_mutex);
860    
861     /* currently assignments for the ONLINE state are possible */
862     if (state == CPUHP_AP_ONLINE_DYN) {
863     @@ -1636,6 +1632,8 @@ int __cpuhp_setup_state(enum cpuhp_state state,
864     }
865     }
866     out:
867     + mutex_unlock(&cpuhp_state_mutex);
868     +
869     put_online_cpus();
870     if (!ret && dyn_state)
871     return state;
872     @@ -1655,6 +1653,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
873     return -EINVAL;
874    
875     get_online_cpus();
876     + mutex_lock(&cpuhp_state_mutex);
877     +
878     if (!invoke || !cpuhp_get_teardown_cb(state))
879     goto remove;
880     /*
881     @@ -1671,7 +1671,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
882     }
883    
884     remove:
885     - mutex_lock(&cpuhp_state_mutex);
886     hlist_del(node);
887     mutex_unlock(&cpuhp_state_mutex);
888     put_online_cpus();
889     @@ -1696,6 +1695,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
890     BUG_ON(cpuhp_cb_check(state));
891    
892     get_online_cpus();
893     + mutex_lock(&cpuhp_state_mutex);
894    
895     if (sp->multi_instance) {
896     WARN(!hlist_empty(&sp->list),
897     @@ -1721,6 +1721,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
898     }
899     remove:
900     cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
901     + mutex_unlock(&cpuhp_state_mutex);
902     put_online_cpus();
903     }
904     EXPORT_SYMBOL(__cpuhp_remove_state);