Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.4/0106-3.4.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1872 - (hide annotations) (download)
Mon Aug 13 07:55:36 2012 UTC (11 years, 9 months ago) by niro
File size: 26227 byte(s)
-upstream patches
1 niro 1872 diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
2     index f68a9bb..b042795 100644
3     --- a/arch/arm/plat-s5p/clock.c
4     +++ b/arch/arm/plat-s5p/clock.c
5     @@ -38,6 +38,7 @@ struct clk clk_ext_xtal_mux = {
6     struct clk clk_xusbxti = {
7     .name = "xusbxti",
8     .id = -1,
9     + .rate = 24000000,
10     };
11    
12     struct clk s5p_clk_27m = {
13     diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
14     index 0d85d8e..abb13e8 100644
15     --- a/arch/mips/include/asm/thread_info.h
16     +++ b/arch/mips/include/asm/thread_info.h
17     @@ -60,6 +60,8 @@ struct thread_info {
18     register struct thread_info *__current_thread_info __asm__("$28");
19     #define current_thread_info() __current_thread_info
20    
21     +#endif /* !__ASSEMBLY__ */
22     +
23     /* thread information allocation */
24     #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
25     #define THREAD_SIZE_ORDER (1)
26     @@ -97,8 +99,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
27    
28     #define free_thread_info(info) kfree(info)
29    
30     -#endif /* !__ASSEMBLY__ */
31     -
32     #define PREEMPT_ACTIVE 0x10000000
33    
34     /*
35     diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
36     index 924da5e..df243a6 100644
37     --- a/arch/mips/kernel/vmlinux.lds.S
38     +++ b/arch/mips/kernel/vmlinux.lds.S
39     @@ -1,5 +1,6 @@
40     #include <asm/asm-offsets.h>
41     #include <asm/page.h>
42     +#include <asm/thread_info.h>
43     #include <asm-generic/vmlinux.lds.h>
44    
45     #undef mips
46     @@ -72,7 +73,7 @@ SECTIONS
47     .data : { /* Data */
48     . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
49    
50     - INIT_TASK_DATA(PAGE_SIZE)
51     + INIT_TASK_DATA(THREAD_SIZE)
52     NOSAVE_DATA
53     CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
54     READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
55     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
56     index 054677b..973c238 100644
57     --- a/drivers/hid/hid-core.c
58     +++ b/drivers/hid/hid-core.c
59     @@ -1391,6 +1391,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
60     { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
61     { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
62     { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
63     + { HID_USB_DEVICE(USB_VENDOR_ID_BAANTO, USB_DEVICE_ID_BAANTO_MT_190W2), },
64     { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
65     { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
66     { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
67     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
68     index dfd4098..bb1abf8 100644
69     --- a/drivers/hid/hid-ids.h
70     +++ b/drivers/hid/hid-ids.h
71     @@ -160,6 +160,9 @@
72     #define USB_VENDOR_ID_AVERMEDIA 0x07ca
73     #define USB_DEVICE_ID_AVER_FM_MR800 0xb800
74    
75     +#define USB_VENDOR_ID_BAANTO 0x2453
76     +#define USB_DEVICE_ID_BAANTO_MT_190W2 0x0100
77     +
78     #define USB_VENDOR_ID_BELKIN 0x050d
79     #define USB_DEVICE_ID_FLIP_KVM 0x3201
80    
81     @@ -650,6 +653,9 @@
82     #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
83     #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
84    
85     +#define USB_VENDOR_ID_SENNHEISER 0x1395
86     +#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
87     +
88     #define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
89     #define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
90    
91     diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
92     index 002781c..21e473e 100644
93     --- a/drivers/hid/hid-input.c
94     +++ b/drivers/hid/hid-input.c
95     @@ -290,6 +290,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
96     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
97     USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
98     HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
99     + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
100     + USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
101     + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
102     {}
103     };
104    
105     diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
106     index 543896d..a6197f5 100644
107     --- a/drivers/hid/hid-multitouch.c
108     +++ b/drivers/hid/hid-multitouch.c
109     @@ -783,6 +783,10 @@ static const struct hid_device_id mt_devices[] = {
110     HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
111     USB_DEVICE_ID_ATMEL_MXT_DIGITIZER) },
112    
113     + /* Baanto multitouch devices */
114     + { .driver_data = MT_CLS_DEFAULT,
115     + HID_USB_DEVICE(USB_VENDOR_ID_BAANTO,
116     + USB_DEVICE_ID_BAANTO_MT_190W2) },
117     /* Cando panels */
118     { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
119     HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
120     diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
121     index 782c639..82f61ee 100644
122     --- a/drivers/hid/usbhid/hid-quirks.c
123     +++ b/drivers/hid/usbhid/hid-quirks.c
124     @@ -76,6 +76,7 @@ static const struct hid_blacklist {
125     { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
126     { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
127     { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
128     + { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
129     { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
130     { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
131     { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
132     diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
133     index d039de8..b58b7a3 100644
134     --- a/drivers/md/dm-raid1.c
135     +++ b/drivers/md/dm-raid1.c
136     @@ -1084,6 +1084,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
137     ti->split_io = dm_rh_get_region_size(ms->rh);
138     ti->num_flush_requests = 1;
139     ti->num_discard_requests = 1;
140     + ti->discard_zeroes_data_unsupported = 1;
141    
142     ms->kmirrord_wq = alloc_workqueue("kmirrord",
143     WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
144     @@ -1214,7 +1215,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
145     * We need to dec pending if this was a write.
146     */
147     if (rw == WRITE) {
148     - if (!(bio->bi_rw & REQ_FLUSH))
149     + if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
150     dm_rh_dec(ms->rh, map_context->ll);
151     return error;
152     }
153     diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
154     index 7771ed2..69732e0 100644
155     --- a/drivers/md/dm-region-hash.c
156     +++ b/drivers/md/dm-region-hash.c
157     @@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
158     return;
159     }
160    
161     + if (bio->bi_rw & REQ_DISCARD)
162     + return;
163     +
164     /* We must inform the log that the sync count has changed. */
165     log->type->set_region_sync(log, region, 0);
166    
167     @@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
168     struct bio *bio;
169    
170     for (bio = bios->head; bio; bio = bio->bi_next) {
171     - if (bio->bi_rw & REQ_FLUSH)
172     + if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
173     continue;
174     rh_inc(rh, dm_rh_bio_to_region(rh, bio));
175     }
176     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
177     index eb3d138..e0a0ebe 100644
178     --- a/drivers/md/dm-thin.c
179     +++ b/drivers/md/dm-thin.c
180     @@ -1240,7 +1240,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
181    
182     cell_release_singleton(cell, bio);
183     cell_release_singleton(cell2, bio);
184     - remap_and_issue(tc, bio, lookup_result.block);
185     + if ((!lookup_result.shared) && pool->pf.discard_passdown)
186     + remap_and_issue(tc, bio, lookup_result.block);
187     + else
188     + bio_endio(bio, 0);
189     }
190     break;
191    
192     @@ -2575,6 +2578,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
193     if (tc->pool->pf.discard_enabled) {
194     ti->discards_supported = 1;
195     ti->num_discard_requests = 1;
196     + ti->discard_zeroes_data_unsupported = 1;
197     }
198    
199     dm_put(pool_md);
200     diff --git a/drivers/md/md.c b/drivers/md/md.c
201     index 2b30ffd..9ee8ce3 100644
202     --- a/drivers/md/md.c
203     +++ b/drivers/md/md.c
204     @@ -3744,8 +3744,8 @@ array_state_show(struct mddev *mddev, char *page)
205     return sprintf(page, "%s\n", array_states[st]);
206     }
207    
208     -static int do_md_stop(struct mddev * mddev, int ro, int is_open);
209     -static int md_set_readonly(struct mddev * mddev, int is_open);
210     +static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
211     +static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
212     static int do_md_run(struct mddev * mddev);
213     static int restart_array(struct mddev *mddev);
214    
215     @@ -3761,14 +3761,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
216     /* stopping an active array */
217     if (atomic_read(&mddev->openers) > 0)
218     return -EBUSY;
219     - err = do_md_stop(mddev, 0, 0);
220     + err = do_md_stop(mddev, 0, NULL);
221     break;
222     case inactive:
223     /* stopping an active array */
224     if (mddev->pers) {
225     if (atomic_read(&mddev->openers) > 0)
226     return -EBUSY;
227     - err = do_md_stop(mddev, 2, 0);
228     + err = do_md_stop(mddev, 2, NULL);
229     } else
230     err = 0; /* already inactive */
231     break;
232     @@ -3776,7 +3776,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
233     break; /* not supported yet */
234     case readonly:
235     if (mddev->pers)
236     - err = md_set_readonly(mddev, 0);
237     + err = md_set_readonly(mddev, NULL);
238     else {
239     mddev->ro = 1;
240     set_disk_ro(mddev->gendisk, 1);
241     @@ -3786,7 +3786,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
242     case read_auto:
243     if (mddev->pers) {
244     if (mddev->ro == 0)
245     - err = md_set_readonly(mddev, 0);
246     + err = md_set_readonly(mddev, NULL);
247     else if (mddev->ro == 1)
248     err = restart_array(mddev);
249     if (err == 0) {
250     @@ -5124,15 +5124,17 @@ void md_stop(struct mddev *mddev)
251     }
252     EXPORT_SYMBOL_GPL(md_stop);
253    
254     -static int md_set_readonly(struct mddev *mddev, int is_open)
255     +static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
256     {
257     int err = 0;
258     mutex_lock(&mddev->open_mutex);
259     - if (atomic_read(&mddev->openers) > is_open) {
260     + if (atomic_read(&mddev->openers) > !!bdev) {
261     printk("md: %s still in use.\n",mdname(mddev));
262     err = -EBUSY;
263     goto out;
264     }
265     + if (bdev)
266     + sync_blockdev(bdev);
267     if (mddev->pers) {
268     __md_stop_writes(mddev);
269    
270     @@ -5154,18 +5156,26 @@ out:
271     * 0 - completely stop and dis-assemble array
272     * 2 - stop but do not disassemble array
273     */
274     -static int do_md_stop(struct mddev * mddev, int mode, int is_open)
275     +static int do_md_stop(struct mddev * mddev, int mode,
276     + struct block_device *bdev)
277     {
278     struct gendisk *disk = mddev->gendisk;
279     struct md_rdev *rdev;
280    
281     mutex_lock(&mddev->open_mutex);
282     - if (atomic_read(&mddev->openers) > is_open ||
283     + if (atomic_read(&mddev->openers) > !!bdev ||
284     mddev->sysfs_active) {
285     printk("md: %s still in use.\n",mdname(mddev));
286     mutex_unlock(&mddev->open_mutex);
287     return -EBUSY;
288     }
289     + if (bdev)
290     + /* It is possible IO was issued on some other
291     + * open file which was closed before we took ->open_mutex.
292     + * As that was not the last close __blkdev_put will not
293     + * have called sync_blockdev, so we must.
294     + */
295     + sync_blockdev(bdev);
296    
297     if (mddev->pers) {
298     if (mddev->ro)
299     @@ -5239,7 +5249,7 @@ static void autorun_array(struct mddev *mddev)
300     err = do_md_run(mddev);
301     if (err) {
302     printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
303     - do_md_stop(mddev, 0, 0);
304     + do_md_stop(mddev, 0, NULL);
305     }
306     }
307    
308     @@ -6237,11 +6247,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
309     goto done_unlock;
310    
311     case STOP_ARRAY:
312     - err = do_md_stop(mddev, 0, 1);
313     + err = do_md_stop(mddev, 0, bdev);
314     goto done_unlock;
315    
316     case STOP_ARRAY_RO:
317     - err = md_set_readonly(mddev, 1);
318     + err = md_set_readonly(mddev, bdev);
319     goto done_unlock;
320    
321     case BLKROSET:
322     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
323     index d1f74ab..d7add9d 100644
324     --- a/drivers/md/raid1.c
325     +++ b/drivers/md/raid1.c
326     @@ -1821,8 +1821,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
327    
328     if (atomic_dec_and_test(&r1_bio->remaining)) {
329     /* if we're here, all write(s) have completed, so clean up */
330     - md_done_sync(mddev, r1_bio->sectors, 1);
331     - put_buf(r1_bio);
332     + int s = r1_bio->sectors;
333     + if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
334     + test_bit(R1BIO_WriteError, &r1_bio->state))
335     + reschedule_retry(r1_bio);
336     + else {
337     + put_buf(r1_bio);
338     + md_done_sync(mddev, s, 1);
339     + }
340     }
341     }
342    
343     diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
344     index 30a6770..fa323f8 100644
345     --- a/drivers/target/target_core_cdb.c
346     +++ b/drivers/target/target_core_cdb.c
347     @@ -1107,7 +1107,7 @@ int target_emulate_write_same(struct se_task *task)
348     if (num_blocks != 0)
349     range = num_blocks;
350     else
351     - range = (dev->transport->get_blocks(dev) - lba);
352     + range = (dev->transport->get_blocks(dev) - lba) + 1;
353    
354     pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
355     (unsigned long long)lba, (unsigned long long)range);
356     diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
357     index c3148b1..89d10e6 100644
358     --- a/drivers/target/target_core_pr.c
359     +++ b/drivers/target/target_core_pr.c
360     @@ -2038,7 +2038,7 @@ static int __core_scsi3_write_aptpl_to_file(
361     if (IS_ERR(file) || !file || !file->f_dentry) {
362     pr_err("filp_open(%s) for APTPL metadata"
363     " failed\n", path);
364     - return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
365     + return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
366     }
367    
368     iov[0].iov_base = &buf[0];
369     @@ -3826,7 +3826,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
370     " SPC-2 reservation is held, returning"
371     " RESERVATION_CONFLICT\n");
372     cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
373     - ret = EINVAL;
374     + ret = -EINVAL;
375     goto out;
376     }
377    
378     @@ -3836,7 +3836,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
379     */
380     if (!cmd->se_sess) {
381     cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
382     - return -EINVAL;
383     + ret = -EINVAL;
384     + goto out;
385     }
386    
387     if (cmd->data_length < 24) {
388     diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
389     index a375f25..da90f64 100644
390     --- a/drivers/target/tcm_fc/tfc_cmd.c
391     +++ b/drivers/target/tcm_fc/tfc_cmd.c
392     @@ -240,6 +240,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
393     {
394     struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
395    
396     + if (cmd->aborted)
397     + return ~0;
398     return fc_seq_exch(cmd->seq)->rxid;
399     }
400    
401     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
402     index 6b79efd..3a75ee5 100644
403     --- a/fs/cifs/cifssmb.c
404     +++ b/fs/cifs/cifssmb.c
405     @@ -89,6 +89,32 @@ static struct {
406     /* Forward declarations */
407     static void cifs_readv_complete(struct work_struct *work);
408    
409     +#ifdef CONFIG_HIGHMEM
410     +/*
411     + * On arches that have high memory, kmap address space is limited. By
412     + * serializing the kmap operations on those arches, we ensure that we don't
413     + * end up with a bunch of threads in writeback with partially mapped page
414     + * arrays, stuck waiting for kmap to come back. That situation prevents
415     + * progress and can deadlock.
416     + */
417     +static DEFINE_MUTEX(cifs_kmap_mutex);
418     +
419     +static inline void
420     +cifs_kmap_lock(void)
421     +{
422     + mutex_lock(&cifs_kmap_mutex);
423     +}
424     +
425     +static inline void
426     +cifs_kmap_unlock(void)
427     +{
428     + mutex_unlock(&cifs_kmap_mutex);
429     +}
430     +#else /* !CONFIG_HIGHMEM */
431     +#define cifs_kmap_lock() do { ; } while(0)
432     +#define cifs_kmap_unlock() do { ; } while(0)
433     +#endif /* CONFIG_HIGHMEM */
434     +
435     /* Mark as invalid, all open files on tree connections since they
436     were closed when session to server was lost */
437     static void mark_open_files_invalid(struct cifs_tcon *pTcon)
438     @@ -1557,6 +1583,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
439     eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
440     cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
441    
442     + cifs_kmap_lock();
443     list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
444     if (remaining >= PAGE_CACHE_SIZE) {
445     /* enough data to fill the page */
446     @@ -1606,6 +1633,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
447     page_cache_release(page);
448     }
449     }
450     + cifs_kmap_unlock();
451    
452     /* issue the read if we have any iovecs left to fill */
453     if (rdata->nr_iov > 1) {
454     @@ -2194,7 +2222,9 @@ cifs_async_writev(struct cifs_writedata *wdata)
455     * and set the iov_len properly for each one. It may also set
456     * wdata->bytes too.
457     */
458     + cifs_kmap_lock();
459     wdata->marshal_iov(iov, wdata);
460     + cifs_kmap_unlock();
461    
462     cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
463    
464     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
465     index 402fa0f..87ce8af 100644
466     --- a/fs/cifs/connect.c
467     +++ b/fs/cifs/connect.c
468     @@ -3348,6 +3348,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
469     #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
470     #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
471    
472     +/*
473     + * On hosts with high memory, we can't currently support wsize/rsize that are
474     + * larger than we can kmap at once. Cap the rsize/wsize at
475     + * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
476     + * larger than that anyway.
477     + */
478     +#ifdef CONFIG_HIGHMEM
479     +#define CIFS_KMAP_SIZE_LIMIT (LAST_PKMAP * PAGE_CACHE_SIZE)
480     +#else /* CONFIG_HIGHMEM */
481     +#define CIFS_KMAP_SIZE_LIMIT (1<<24)
482     +#endif /* CONFIG_HIGHMEM */
483     +
484     static unsigned int
485     cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
486     {
487     @@ -3378,6 +3390,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
488     wsize = min_t(unsigned int, wsize,
489     server->maxBuf - sizeof(WRITE_REQ) + 4);
490    
491     + /* limit to the amount that we can kmap at once */
492     + wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
493     +
494     /* hard limit of CIFS_MAX_WSIZE */
495     wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
496    
497     @@ -3419,6 +3434,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
498     if (!(server->capabilities & CAP_LARGE_READ_X))
499     rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
500    
501     + /* limit to the amount that we can kmap at once */
502     + rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
503     +
504     /* hard limit of CIFS_MAX_RSIZE */
505     rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
506    
507     diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
508     index 0a8224d..a4217f0 100644
509     --- a/fs/cifs/readdir.c
510     +++ b/fs/cifs/readdir.c
511     @@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
512    
513     dentry = d_lookup(parent, name);
514     if (dentry) {
515     - /* FIXME: check for inode number changes? */
516     - if (dentry->d_inode != NULL)
517     + inode = dentry->d_inode;
518     + /* update inode in place if i_ino didn't change */
519     + if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
520     + cifs_fattr_to_inode(inode, fattr);
521     return dentry;
522     + }
523     d_drop(dentry);
524     dput(dentry);
525     }
526     diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
527     index 49cf230..24a49d4 100644
528     --- a/fs/exofs/ore.c
529     +++ b/fs/exofs/ore.c
530     @@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
531     out:
532     ios->numdevs = devs_in_group;
533     ios->pages_consumed = cur_pg;
534     - if (unlikely(ret)) {
535     - if (length == ios->length)
536     - return ret;
537     - else
538     - ios->length -= length;
539     - }
540     - return 0;
541     + return ret;
542     }
543    
544     int ore_create(struct ore_io_state *ios)
545     diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
546     index d222c77..fff2070 100644
547     --- a/fs/exofs/ore_raid.c
548     +++ b/fs/exofs/ore_raid.c
549     @@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
550     * ios->sp2d[p][*], xor is calculated the same way. These pages are
551     * allocated/freed and don't go through cache
552     */
553     -static int _read_4_write(struct ore_io_state *ios)
554     +static int _read_4_write_first_stripe(struct ore_io_state *ios)
555     {
556     - struct ore_io_state *ios_read;
557     struct ore_striping_info read_si;
558     struct __stripe_pages_2d *sp2d = ios->sp2d;
559     u64 offset = ios->si.first_stripe_start;
560     - u64 last_stripe_end;
561     - unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
562     - unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
563     - int ret;
564     + unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
565    
566     if (offset == ios->offset) /* Go to start collect $200 */
567     goto read_last_stripe;
568     @@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
569     min_p = _sp2d_min_pg(sp2d);
570     max_p = _sp2d_max_pg(sp2d);
571    
572     + ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
573     + offset, ios->offset, min_p, max_p);
574     +
575     for (c = 0; ; c++) {
576     ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
577     read_si.obj_offset += min_p * PAGE_SIZE;
578     @@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
579     }
580    
581     read_last_stripe:
582     + return 0;
583     +}
584     +
585     +static int _read_4_write_last_stripe(struct ore_io_state *ios)
586     +{
587     + struct ore_striping_info read_si;
588     + struct __stripe_pages_2d *sp2d = ios->sp2d;
589     + u64 offset;
590     + u64 last_stripe_end;
591     + unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
592     + unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
593     +
594     offset = ios->offset + ios->length;
595     if (offset % PAGE_SIZE)
596     _add_to_r4w_last_page(ios, &offset);
597     @@ -527,15 +538,15 @@ read_last_stripe:
598     c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
599     ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
600    
601     - BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
602     - /* unaligned IO must be within a single stripe */
603     -
604     if (min_p == sp2d->pages_in_unit) {
605     /* Didn't do it yet */
606     min_p = _sp2d_min_pg(sp2d);
607     max_p = _sp2d_max_pg(sp2d);
608     }
609    
610     + ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
611     + offset, last_stripe_end, min_p, max_p);
612     +
613     while (offset < last_stripe_end) {
614     struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
615    
616     @@ -568,6 +579,15 @@ read_last_stripe:
617     }
618    
619     read_it:
620     + return 0;
621     +}
622     +
623     +static int _read_4_write_execute(struct ore_io_state *ios)
624     +{
625     + struct ore_io_state *ios_read;
626     + unsigned i;
627     + int ret;
628     +
629     ios_read = ios->ios_read_4_write;
630     if (!ios_read)
631     return 0;
632     @@ -591,6 +611,8 @@ read_it:
633     }
634    
635     _mark_read4write_pages_uptodate(ios_read, ret);
636     + ore_put_io_state(ios_read);
637     + ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
638     return 0;
639     }
640    
641     @@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
642     /* If first stripe, Read in all read4write pages
643     * (if needed) before we calculate the first parity.
644     */
645     - _read_4_write(ios);
646     + _read_4_write_first_stripe(ios);
647     }
648     + if (!cur_len) /* If last stripe r4w pages of last stripe */
649     + _read_4_write_last_stripe(ios);
650     + _read_4_write_execute(ios);
651    
652     for (i = 0; i < num_pages; i++) {
653     pages[i] = _raid_page_alloc();
654     @@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
655    
656     int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
657     {
658     - struct ore_layout *layout = ios->layout;
659     -
660     if (ios->parity_pages) {
661     + struct ore_layout *layout = ios->layout;
662     unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
663     - unsigned stripe_size = ios->si.bytes_in_stripe;
664     - u64 last_stripe, first_stripe;
665    
666     if (_sp2d_alloc(pages_in_unit, layout->group_width,
667     layout->parity, &ios->sp2d)) {
668     return -ENOMEM;
669     }
670     -
671     - /* Round io down to last full strip */
672     - first_stripe = div_u64(ios->offset, stripe_size);
673     - last_stripe = div_u64(ios->offset + ios->length, stripe_size);
674     -
675     - /* If an IO spans more then a single stripe it must end at
676     - * a stripe boundary. The reminder at the end is pushed into the
677     - * next IO.
678     - */
679     - if (last_stripe != first_stripe) {
680     - ios->length = last_stripe * stripe_size - ios->offset;
681     -
682     - BUG_ON(!ios->length);
683     - ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
684     - PAGE_SIZE;
685     - ios->si.length = ios->length; /*make it consistent */
686     - }
687     }
688     return 0;
689     }
690     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
691     index 1365903..9727522 100644
692     --- a/fs/ext4/ioctl.c
693     +++ b/fs/ext4/ioctl.c
694     @@ -261,7 +261,6 @@ group_extend_out:
695     err = ext4_move_extents(filp, donor_filp, me.orig_start,
696     me.donor_start, me.len, &me.moved_len);
697     mnt_drop_write_file(filp);
698     - mnt_drop_write(filp->f_path.mnt);
699    
700     if (copy_to_user((struct move_extent __user *)arg,
701     &me, sizeof(me)))
702     diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
703     index 4bff4a3..42ac1bf 100644
704     --- a/fs/nfs/objlayout/objio_osd.c
705     +++ b/fs/nfs/objlayout/objio_osd.c
706     @@ -453,7 +453,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
707     objios->ios->done = _read_done;
708     dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
709     rdata->args.offset, rdata->args.count);
710     - return ore_read(objios->ios);
711     + ret = ore_read(objios->ios);
712     + if (unlikely(ret))
713     + objio_free_result(&objios->oir);
714     + return ret;
715     }
716    
717     /*
718     @@ -537,8 +540,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
719     dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
720     wdata->args.offset, wdata->args.count);
721     ret = ore_write(objios->ios);
722     - if (unlikely(ret))
723     + if (unlikely(ret)) {
724     + objio_free_result(&objios->oir);
725     return ret;
726     + }
727    
728     if (objios->sync)
729     _write_done(objios->ios, objios);
730     diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
731     index 771f7fb..a7be8e2 100644
732     --- a/fs/ubifs/sb.c
733     +++ b/fs/ubifs/sb.c
734     @@ -724,8 +724,12 @@ static int fixup_free_space(struct ubifs_info *c)
735     lnum = ubifs_next_log_lnum(c, lnum);
736     }
737    
738     - /* Fixup the current log head */
739     - err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
740     + /*
741     + * Fixup the log head which contains the only a CS node at the
742     + * beginning.
743     + */
744     + err = fixup_leb(c, c->lhead_lnum,
745     + ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
746     if (err)
747     goto out;
748    
749     diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
750     index e8c8671..8b70710 100644
751     --- a/kernel/time/ntp.c
752     +++ b/kernel/time/ntp.c
753     @@ -409,7 +409,9 @@ int second_overflow(unsigned long secs)
754     time_state = TIME_DEL;
755     break;
756     case TIME_INS:
757     - if (secs % 86400 == 0) {
758     + if (!(time_status & STA_INS))
759     + time_state = TIME_OK;
760     + else if (secs % 86400 == 0) {
761     leap = -1;
762     time_state = TIME_OOP;
763     time_tai++;
764     @@ -418,7 +420,9 @@ int second_overflow(unsigned long secs)
765     }
766     break;
767     case TIME_DEL:
768     - if ((secs + 1) % 86400 == 0) {
769     + if (!(time_status & STA_DEL))
770     + time_state = TIME_OK;
771     + else if ((secs + 1) % 86400 == 0) {
772     leap = 1;
773     time_tai--;
774     time_state = TIME_WAIT;
775     diff --git a/mm/vmscan.c b/mm/vmscan.c
776     index 4607cc6..be5bc0a 100644
777     --- a/mm/vmscan.c
778     +++ b/mm/vmscan.c
779     @@ -3013,7 +3013,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
780     * them before going back to sleep.
781     */
782     set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
783     - schedule();
784     +
785     + if (!kthread_should_stop())
786     + schedule();
787     +
788     set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
789     } else {
790     if (remaining)