Magellan Linux

Contents of /trunk/kernel-mcore/patches-3.0-r2/0107-3.0.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1560 - (show annotations) (download)
Thu Nov 10 14:21:33 2011 UTC (12 years, 5 months ago) by niro
File size: 44427 byte(s)
3.0-mcore-r2, updated to linux-3.0.8
1 diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
2 index 4960686..4372763 100644
3 --- a/arch/arm/kernel/perf_event_v7.c
4 +++ b/arch/arm/kernel/perf_event_v7.c
5 @@ -264,8 +264,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
6 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
7 [PERF_COUNT_HW_INSTRUCTIONS] =
8 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
9 - [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
10 - [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
11 + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
12 + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
13 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
14 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
15 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
16 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
17 index c19571c..4a4eba5 100644
18 --- a/arch/arm/mm/init.c
19 +++ b/arch/arm/mm/init.c
20 @@ -473,6 +473,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
21 */
22 bank_start = min(bank_start,
23 ALIGN(prev_bank_end, PAGES_PER_SECTION));
24 +#else
25 + /*
26 + * Align down here since the VM subsystem insists that the
27 + * memmap entries are valid from the bank start aligned to
28 + * MAX_ORDER_NR_PAGES.
29 + */
30 + bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
31 #endif
32 /*
33 * If we had a previous bank, and there is a space
34 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
35 index 3032644..87488b9 100644
36 --- a/arch/x86/mm/init.c
37 +++ b/arch/x86/mm/init.c
38 @@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
39 #ifdef CONFIG_X86_32
40 /* for fixmap */
41 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
42 -
43 - good_end = max_pfn_mapped << PAGE_SHIFT;
44 #endif
45 + good_end = max_pfn_mapped << PAGE_SHIFT;
46
47 base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
48 if (base == MEMBLOCK_ERROR)
49 diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
50 index be44256..7835b8f 100644
51 --- a/crypto/ghash-generic.c
52 +++ b/crypto/ghash-generic.c
53 @@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc,
54 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
55 u8 *dst = dctx->buffer;
56
57 + if (!ctx->gf128)
58 + return -ENOKEY;
59 +
60 if (dctx->bytes) {
61 int n = min(srclen, dctx->bytes);
62 u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
63 @@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
64 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
65 u8 *buf = dctx->buffer;
66
67 + if (!ctx->gf128)
68 + return -ENOKEY;
69 +
70 ghash_flush(ctx, dctx);
71 memcpy(dst, buf, GHASH_BLOCK_SIZE);
72
73 diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
74 index 41841a3..17cef86 100644
75 --- a/drivers/firewire/sbp2.c
76 +++ b/drivers/firewire/sbp2.c
77 @@ -1198,6 +1198,10 @@ static int sbp2_remove(struct device *dev)
78 {
79 struct fw_unit *unit = fw_unit(dev);
80 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
81 + struct sbp2_logical_unit *lu;
82 +
83 + list_for_each_entry(lu, &tgt->lu_list, link)
84 + cancel_delayed_work_sync(&lu->work);
85
86 sbp2_target_put(tgt);
87 return 0;
88 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
89 index ebdb0fd..9a0aee2 100644
90 --- a/drivers/gpu/drm/radeon/atom.c
91 +++ b/drivers/gpu/drm/radeon/atom.c
92 @@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
93 case ATOM_ARG_FB:
94 idx = U8(*ptr);
95 (*ptr)++;
96 - val = gctx->scratch[((gctx->fb_base + idx) / 4)];
97 + if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
98 + DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
99 + gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
100 + val = 0;
101 + } else
102 + val = gctx->scratch[(gctx->fb_base / 4) + idx];
103 if (print)
104 DEBUG("FB[0x%02X]", idx);
105 break;
106 @@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
107 case ATOM_ARG_FB:
108 idx = U8(*ptr);
109 (*ptr)++;
110 - gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
111 + if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
112 + DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
113 + gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
114 + } else
115 + gctx->scratch[(gctx->fb_base / 4) + idx] = val;
116 DEBUG("FB[0x%02X]", idx);
117 break;
118 case ATOM_ARG_PLL:
119 @@ -1367,11 +1376,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
120
121 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
122 }
123 + ctx->scratch_size_bytes = 0;
124 if (usage_bytes == 0)
125 usage_bytes = 20 * 1024;
126 /* allocate some scratch memory */
127 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
128 if (!ctx->scratch)
129 return -ENOMEM;
130 + ctx->scratch_size_bytes = usage_bytes;
131 return 0;
132 }
133 diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
134 index a589a55..93cfe20 100644
135 --- a/drivers/gpu/drm/radeon/atom.h
136 +++ b/drivers/gpu/drm/radeon/atom.h
137 @@ -137,6 +137,7 @@ struct atom_context {
138 int cs_equal, cs_above;
139 int io_mode;
140 uint32_t *scratch;
141 + int scratch_size_bytes;
142 };
143
144 extern int atom_debug;
145 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
146 index b7f0726..e2b2d78 100644
147 --- a/drivers/gpu/drm/ttm/ttm_bo.c
148 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
149 @@ -392,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
150 * Create and bind a ttm if required.
151 */
152
153 - if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
154 - ret = ttm_bo_add_ttm(bo, false);
155 - if (ret)
156 - goto out_err;
157 + if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
158 + if (bo->ttm == NULL) {
159 + ret = ttm_bo_add_ttm(bo, false);
160 + if (ret)
161 + goto out_err;
162 + }
163
164 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
165 if (ret)
166 diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
167 index 77dbf40..ae3c6f5 100644
168 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
169 +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
170 @@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
171 if (ret)
172 return ret;
173
174 - ttm_bo_free_old_node(bo);
175 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
176 (bo->ttm != NULL)) {
177 ttm_tt_unbind(bo->ttm);
178 ttm_tt_destroy(bo->ttm);
179 bo->ttm = NULL;
180 }
181 + ttm_bo_free_old_node(bo);
182 } else {
183 /**
184 * This should help pipeline ordinary buffer moves.
185 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
186 index a756ee6..c946d90 100644
187 --- a/drivers/hid/hid-ids.h
188 +++ b/drivers/hid/hid-ids.h
189 @@ -568,6 +568,9 @@
190 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
191 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
192
193 +#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
194 +#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
195 +
196 #define USB_VENDOR_ID_SKYCABLE 0x1223
197 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
198
199 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
200 index 0ec91c1..56d0539 100644
201 --- a/drivers/hid/hid-magicmouse.c
202 +++ b/drivers/hid/hid-magicmouse.c
203 @@ -501,9 +501,17 @@ static int magicmouse_probe(struct hid_device *hdev,
204 }
205 report->size = 6;
206
207 + /*
208 + * Some devices repond with 'invalid report id' when feature
209 + * report switching it into multitouch mode is sent to it.
210 + *
211 + * This results in -EIO from the _raw low-level transport callback,
212 + * but there seems to be no other way of switching the mode.
213 + * Thus the super-ugly hacky success check below.
214 + */
215 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
216 HID_FEATURE_REPORT);
217 - if (ret != sizeof(feature)) {
218 + if (ret != -EIO && ret != sizeof(feature)) {
219 hid_err(hdev, "unable to request touch data (%d)\n", ret);
220 goto err_stop_hw;
221 }
222 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
223 index 621959d..4bdb5d4 100644
224 --- a/drivers/hid/usbhid/hid-quirks.c
225 +++ b/drivers/hid/usbhid/hid-quirks.c
226 @@ -89,6 +89,7 @@ static const struct hid_blacklist {
227
228 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
229 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
230 + { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
231 { 0, 0 }
232 };
233
234 diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
235 index f2b377c..36d7f27 100644
236 --- a/drivers/hwmon/w83627ehf.c
237 +++ b/drivers/hwmon/w83627ehf.c
238 @@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval)
239 {
240 if (is_word_sized(reg))
241 return LM75_TEMP_FROM_REG(regval);
242 - return regval * 1000;
243 + return ((s8)regval) * 1000;
244 }
245
246 static inline u16
247 @@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp)
248 {
249 if (is_word_sized(reg))
250 return LM75_TEMP_TO_REG(temp);
251 - return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000);
252 + return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000),
253 + 1000);
254 }
255
256 /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
257 @@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
258 }
259
260 /* Get the monitoring functions started */
261 -static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
262 +static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
263 + enum kinds kind)
264 {
265 int i;
266 u8 tmp, diode;
267 @@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
268 w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
269
270 /* Get thermal sensor types */
271 - diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
272 + switch (kind) {
273 + case w83627ehf:
274 + diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
275 + break;
276 + default:
277 + diode = 0x70;
278 + }
279 for (i = 0; i < 3; i++) {
280 if ((tmp & (0x02 << i)))
281 - data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2;
282 + data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
283 else
284 data->temp_type[i] = 4; /* thermistor */
285 }
286 @@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
287 }
288
289 /* Initialize the chip */
290 - w83627ehf_init_device(data);
291 + w83627ehf_init_device(data, sio_data->kind);
292
293 data->vrm = vid_which_vrm();
294 superio_enter(sio_data->sioreg);
295 diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
296 index 48fea37..29e2399 100644
297 --- a/drivers/media/video/uvc/uvc_entity.c
298 +++ b/drivers/media/video/uvc/uvc_entity.c
299 @@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
300 if (remote == NULL)
301 return -EINVAL;
302
303 - source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
304 + source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
305 ? (remote->vdev ? &remote->vdev->entity : NULL)
306 : &remote->subdev.entity;
307 if (source == NULL)
308 diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
309 index d347116..1658575 100644
310 --- a/drivers/platform/x86/samsung-laptop.c
311 +++ b/drivers/platform/x86/samsung-laptop.c
312 @@ -601,6 +601,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
313 .callback = dmi_check_cb,
314 },
315 {
316 + .ident = "N150/N210/N220",
317 + .matches = {
318 + DMI_MATCH(DMI_SYS_VENDOR,
319 + "SAMSUNG ELECTRONICS CO., LTD."),
320 + DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
321 + DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
322 + },
323 + .callback = dmi_check_cb,
324 + },
325 + {
326 .ident = "N150/N210/N220/N230",
327 .matches = {
328 DMI_MATCH(DMI_SYS_VENDOR,
329 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
330 index fc7e57b..53e7d72 100644
331 --- a/fs/cifs/cifsfs.c
332 +++ b/fs/cifs/cifsfs.c
333 @@ -566,6 +566,12 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
334 struct inode *dir = dentry->d_inode;
335 struct dentry *child;
336
337 + if (!dir) {
338 + dput(dentry);
339 + dentry = ERR_PTR(-ENOENT);
340 + break;
341 + }
342 +
343 /* skip separators */
344 while (*s == sep)
345 s++;
346 @@ -581,10 +587,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
347 mutex_unlock(&dir->i_mutex);
348 dput(dentry);
349 dentry = child;
350 - if (!dentry->d_inode) {
351 - dput(dentry);
352 - dentry = ERR_PTR(-ENOENT);
353 - }
354 } while (!IS_ERR(dentry));
355 _FreeXid(xid);
356 kfree(full_path);
357 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
358 index 168a80f..5cb8614 100644
359 --- a/fs/fuse/dev.c
360 +++ b/fs/fuse/dev.c
361 @@ -258,10 +258,14 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
362 forget->forget_one.nlookup = nlookup;
363
364 spin_lock(&fc->lock);
365 - fc->forget_list_tail->next = forget;
366 - fc->forget_list_tail = forget;
367 - wake_up(&fc->waitq);
368 - kill_fasync(&fc->fasync, SIGIO, POLL_IN);
369 + if (fc->connected) {
370 + fc->forget_list_tail->next = forget;
371 + fc->forget_list_tail = forget;
372 + wake_up(&fc->waitq);
373 + kill_fasync(&fc->fasync, SIGIO, POLL_IN);
374 + } else {
375 + kfree(forget);
376 + }
377 spin_unlock(&fc->lock);
378 }
379
380 diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
381 index d685752..4e7f64b 100644
382 --- a/fs/hfsplus/hfsplus_fs.h
383 +++ b/fs/hfsplus/hfsplus_fs.h
384 @@ -13,6 +13,7 @@
385 #include <linux/fs.h>
386 #include <linux/mutex.h>
387 #include <linux/buffer_head.h>
388 +#include <linux/blkdev.h>
389 #include "hfsplus_raw.h"
390
391 #define DBG_BNODE_REFS 0x00000001
392 @@ -110,7 +111,9 @@ struct hfsplus_vh;
393 struct hfs_btree;
394
395 struct hfsplus_sb_info {
396 + void *s_vhdr_buf;
397 struct hfsplus_vh *s_vhdr;
398 + void *s_backup_vhdr_buf;
399 struct hfsplus_vh *s_backup_vhdr;
400 struct hfs_btree *ext_tree;
401 struct hfs_btree *cat_tree;
402 @@ -258,6 +261,15 @@ struct hfsplus_readdir_data {
403 struct hfsplus_cat_key key;
404 };
405
406 +/*
407 + * Find minimum acceptible I/O size for an hfsplus sb.
408 + */
409 +static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
410 +{
411 + return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev),
412 + HFSPLUS_SECTOR_SIZE);
413 +}
414 +
415 #define hfs_btree_open hfsplus_btree_open
416 #define hfs_btree_close hfsplus_btree_close
417 #define hfs_btree_write hfsplus_btree_write
418 @@ -436,8 +448,8 @@ int hfsplus_compare_dentry(const struct dentry *parent,
419 /* wrapper.c */
420 int hfsplus_read_wrapper(struct super_block *);
421 int hfs_part_find(struct super_block *, sector_t *, sector_t *);
422 -int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
423 - void *data, int rw);
424 +int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
425 + void *buf, void **data, int rw);
426
427 /* time macros */
428 #define __hfsp_mt2ut(t) (be32_to_cpu(t) - 2082844800U)
429 diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
430 index 40ad88c..eb355d8 100644
431 --- a/fs/hfsplus/part_tbl.c
432 +++ b/fs/hfsplus/part_tbl.c
433 @@ -88,11 +88,12 @@ static int hfs_parse_old_pmap(struct super_block *sb, struct old_pmap *pm,
434 return -ENOENT;
435 }
436
437 -static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
438 - sector_t *part_start, sector_t *part_size)
439 +static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
440 + struct new_pmap *pm, sector_t *part_start, sector_t *part_size)
441 {
442 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
443 int size = be32_to_cpu(pm->pmMapBlkCnt);
444 + int buf_size = hfsplus_min_io_size(sb);
445 int res;
446 int i = 0;
447
448 @@ -107,11 +108,14 @@ static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
449 if (++i >= size)
450 return -ENOENT;
451
452 - res = hfsplus_submit_bio(sb->s_bdev,
453 - *part_start + HFS_PMAP_BLK + i,
454 - pm, READ);
455 - if (res)
456 - return res;
457 + pm = (struct new_pmap *)((u8 *)pm + HFSPLUS_SECTOR_SIZE);
458 + if ((u8 *)pm - (u8 *)buf >= buf_size) {
459 + res = hfsplus_submit_bio(sb,
460 + *part_start + HFS_PMAP_BLK + i,
461 + buf, (void **)&pm, READ);
462 + if (res)
463 + return res;
464 + }
465 } while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC));
466
467 return -ENOENT;
468 @@ -124,15 +128,15 @@ static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
469 int hfs_part_find(struct super_block *sb,
470 sector_t *part_start, sector_t *part_size)
471 {
472 - void *data;
473 + void *buf, *data;
474 int res;
475
476 - data = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
477 - if (!data)
478 + buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
479 + if (!buf)
480 return -ENOMEM;
481
482 - res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
483 - data, READ);
484 + res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
485 + buf, &data, READ);
486 if (res)
487 goto out;
488
489 @@ -141,13 +145,13 @@ int hfs_part_find(struct super_block *sb,
490 res = hfs_parse_old_pmap(sb, data, part_start, part_size);
491 break;
492 case HFS_NEW_PMAP_MAGIC:
493 - res = hfs_parse_new_pmap(sb, data, part_start, part_size);
494 + res = hfs_parse_new_pmap(sb, buf, data, part_start, part_size);
495 break;
496 default:
497 res = -ENOENT;
498 break;
499 }
500 out:
501 - kfree(data);
502 + kfree(buf);
503 return res;
504 }
505 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
506 index 84a47b7..c3a76fd 100644
507 --- a/fs/hfsplus/super.c
508 +++ b/fs/hfsplus/super.c
509 @@ -197,17 +197,17 @@ int hfsplus_sync_fs(struct super_block *sb, int wait)
510 write_backup = 1;
511 }
512
513 - error2 = hfsplus_submit_bio(sb->s_bdev,
514 + error2 = hfsplus_submit_bio(sb,
515 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
516 - sbi->s_vhdr, WRITE_SYNC);
517 + sbi->s_vhdr_buf, NULL, WRITE_SYNC);
518 if (!error)
519 error = error2;
520 if (!write_backup)
521 goto out;
522
523 - error2 = hfsplus_submit_bio(sb->s_bdev,
524 + error2 = hfsplus_submit_bio(sb,
525 sbi->part_start + sbi->sect_count - 2,
526 - sbi->s_backup_vhdr, WRITE_SYNC);
527 + sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
528 if (!error)
529 error2 = error;
530 out:
531 @@ -251,8 +251,8 @@ static void hfsplus_put_super(struct super_block *sb)
532 hfs_btree_close(sbi->ext_tree);
533 iput(sbi->alloc_file);
534 iput(sbi->hidden_dir);
535 - kfree(sbi->s_vhdr);
536 - kfree(sbi->s_backup_vhdr);
537 + kfree(sbi->s_vhdr_buf);
538 + kfree(sbi->s_backup_vhdr_buf);
539 unload_nls(sbi->nls);
540 kfree(sb->s_fs_info);
541 sb->s_fs_info = NULL;
542 @@ -508,8 +508,8 @@ out_close_cat_tree:
543 out_close_ext_tree:
544 hfs_btree_close(sbi->ext_tree);
545 out_free_vhdr:
546 - kfree(sbi->s_vhdr);
547 - kfree(sbi->s_backup_vhdr);
548 + kfree(sbi->s_vhdr_buf);
549 + kfree(sbi->s_backup_vhdr_buf);
550 out_unload_nls:
551 unload_nls(sbi->nls);
552 unload_nls(nls);
553 diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
554 index 4ac88ff..7b8112d 100644
555 --- a/fs/hfsplus/wrapper.c
556 +++ b/fs/hfsplus/wrapper.c
557 @@ -31,25 +31,67 @@ static void hfsplus_end_io_sync(struct bio *bio, int err)
558 complete(bio->bi_private);
559 }
560
561 -int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
562 - void *data, int rw)
563 +/*
564 + * hfsplus_submit_bio - Perfrom block I/O
565 + * @sb: super block of volume for I/O
566 + * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
567 + * @buf: buffer for I/O
568 + * @data: output pointer for location of requested data
569 + * @rw: direction of I/O
570 + *
571 + * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
572 + * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
573 + * @data will return a pointer to the start of the requested sector,
574 + * which may not be the same location as @buf.
575 + *
576 + * If @sector is not aligned to the bdev logical block size it will
577 + * be rounded down. For writes this means that @buf should contain data
578 + * that starts at the rounded-down address. As long as the data was
579 + * read using hfsplus_submit_bio() and the same buffer is used things
580 + * will work correctly.
581 + */
582 +int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
583 + void *buf, void **data, int rw)
584 {
585 DECLARE_COMPLETION_ONSTACK(wait);
586 struct bio *bio;
587 int ret = 0;
588 + unsigned int io_size;
589 + loff_t start;
590 + int offset;
591 +
592 + /*
593 + * Align sector to hardware sector size and find offset. We
594 + * assume that io_size is a power of two, which _should_
595 + * be true.
596 + */
597 + io_size = hfsplus_min_io_size(sb);
598 + start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
599 + offset = start & (io_size - 1);
600 + sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
601
602 bio = bio_alloc(GFP_NOIO, 1);
603 bio->bi_sector = sector;
604 - bio->bi_bdev = bdev;
605 + bio->bi_bdev = sb->s_bdev;
606 bio->bi_end_io = hfsplus_end_io_sync;
607 bio->bi_private = &wait;
608
609 - /*
610 - * We always submit one sector at a time, so bio_add_page must not fail.
611 - */
612 - if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
613 - offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
614 - BUG();
615 + if (!(rw & WRITE) && data)
616 + *data = (u8 *)buf + offset;
617 +
618 + while (io_size > 0) {
619 + unsigned int page_offset = offset_in_page(buf);
620 + unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
621 + io_size);
622 +
623 + ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
624 + if (ret != len) {
625 + ret = -EIO;
626 + goto out;
627 + }
628 + io_size -= len;
629 + buf = (u8 *)buf + len;
630 + }
631
632 submit_bio(rw, bio);
633 wait_for_completion(&wait);
634 @@ -57,8 +99,9 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
635 if (!bio_flagged(bio, BIO_UPTODATE))
636 ret = -EIO;
637
638 +out:
639 bio_put(bio);
640 - return ret;
641 + return ret < 0 ? ret : 0;
642 }
643
644 static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
645 @@ -147,17 +190,17 @@ int hfsplus_read_wrapper(struct super_block *sb)
646 }
647
648 error = -ENOMEM;
649 - sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
650 - if (!sbi->s_vhdr)
651 + sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
652 + if (!sbi->s_vhdr_buf)
653 goto out;
654 - sbi->s_backup_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
655 - if (!sbi->s_backup_vhdr)
656 + sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
657 + if (!sbi->s_backup_vhdr_buf)
658 goto out_free_vhdr;
659
660 reread:
661 - error = hfsplus_submit_bio(sb->s_bdev,
662 - part_start + HFSPLUS_VOLHEAD_SECTOR,
663 - sbi->s_vhdr, READ);
664 + error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
665 + sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
666 + READ);
667 if (error)
668 goto out_free_backup_vhdr;
669
670 @@ -186,9 +229,9 @@ reread:
671 goto reread;
672 }
673
674 - error = hfsplus_submit_bio(sb->s_bdev,
675 - part_start + part_size - 2,
676 - sbi->s_backup_vhdr, READ);
677 + error = hfsplus_submit_bio(sb, part_start + part_size - 2,
678 + sbi->s_backup_vhdr_buf,
679 + (void **)&sbi->s_backup_vhdr, READ);
680 if (error)
681 goto out_free_backup_vhdr;
682
683 @@ -232,9 +275,9 @@ reread:
684 return 0;
685
686 out_free_backup_vhdr:
687 - kfree(sbi->s_backup_vhdr);
688 + kfree(sbi->s_backup_vhdr_buf);
689 out_free_vhdr:
690 - kfree(sbi->s_vhdr);
691 + kfree(sbi->s_vhdr_buf);
692 out:
693 return error;
694 }
695 diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
696 index 8633521..8731516 100644
697 --- a/fs/xfs/linux-2.6/xfs_linux.h
698 +++ b/fs/xfs/linux-2.6/xfs_linux.h
699 @@ -70,6 +70,8 @@
700 #include <linux/ctype.h>
701 #include <linux/writeback.h>
702 #include <linux/capability.h>
703 +#include <linux/kthread.h>
704 +#include <linux/freezer.h>
705 #include <linux/list_sort.h>
706
707 #include <asm/page.h>
708 diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
709 index a1a881e..347cae9 100644
710 --- a/fs/xfs/linux-2.6/xfs_super.c
711 +++ b/fs/xfs/linux-2.6/xfs_super.c
712 @@ -1412,37 +1412,35 @@ xfs_fs_fill_super(
713 sb->s_time_gran = 1;
714 set_posix_acl_flag(sb);
715
716 - error = xfs_syncd_init(mp);
717 - if (error)
718 - goto out_filestream_unmount;
719 -
720 xfs_inode_shrinker_register(mp);
721
722 error = xfs_mountfs(mp);
723 if (error)
724 - goto out_syncd_stop;
725 + goto out_filestream_unmount;
726 +
727 + error = xfs_syncd_init(mp);
728 + if (error)
729 + goto out_unmount;
730
731 root = igrab(VFS_I(mp->m_rootip));
732 if (!root) {
733 error = ENOENT;
734 - goto fail_unmount;
735 + goto out_syncd_stop;
736 }
737 if (is_bad_inode(root)) {
738 error = EINVAL;
739 - goto fail_vnrele;
740 + goto out_syncd_stop;
741 }
742 sb->s_root = d_alloc_root(root);
743 if (!sb->s_root) {
744 error = ENOMEM;
745 - goto fail_vnrele;
746 + goto out_iput;
747 }
748
749 return 0;
750
751 - out_syncd_stop:
752 - xfs_inode_shrinker_unregister(mp);
753 - xfs_syncd_stop(mp);
754 out_filestream_unmount:
755 + xfs_inode_shrinker_unregister(mp);
756 xfs_filestream_unmount(mp);
757 out_free_sb:
758 xfs_freesb(mp);
759 @@ -1456,17 +1454,12 @@ xfs_fs_fill_super(
760 out:
761 return -error;
762
763 - fail_vnrele:
764 - if (sb->s_root) {
765 - dput(sb->s_root);
766 - sb->s_root = NULL;
767 - } else {
768 - iput(root);
769 - }
770 -
771 - fail_unmount:
772 - xfs_inode_shrinker_unregister(mp);
773 + out_iput:
774 + iput(root);
775 + out_syncd_stop:
776 xfs_syncd_stop(mp);
777 + out_unmount:
778 + xfs_inode_shrinker_unregister(mp);
779
780 /*
781 * Blow away any referenced inode in the filestreams cache.
782 @@ -1667,24 +1660,13 @@ xfs_init_workqueues(void)
783 */
784 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
785 if (!xfs_syncd_wq)
786 - goto out;
787 -
788 - xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
789 - if (!xfs_ail_wq)
790 - goto out_destroy_syncd;
791 -
792 + return -ENOMEM;
793 return 0;
794 -
795 -out_destroy_syncd:
796 - destroy_workqueue(xfs_syncd_wq);
797 -out:
798 - return -ENOMEM;
799 }
800
801 STATIC void
802 xfs_destroy_workqueues(void)
803 {
804 - destroy_workqueue(xfs_ail_wq);
805 destroy_workqueue(xfs_syncd_wq);
806 }
807
808 diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
809 index 9e0e2fa..8126fc2 100644
810 --- a/fs/xfs/quota/xfs_dquot_item.c
811 +++ b/fs/xfs/quota/xfs_dquot_item.c
812 @@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
813 * search the buffer cache can be a time consuming thing, and AIL lock is a
814 * spinlock.
815 */
816 -STATIC void
817 +STATIC bool
818 xfs_qm_dquot_logitem_pushbuf(
819 struct xfs_log_item *lip)
820 {
821 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
822 struct xfs_dquot *dqp = qlip->qli_dquot;
823 struct xfs_buf *bp;
824 + bool ret = true;
825
826 ASSERT(XFS_DQ_IS_LOCKED(dqp));
827
828 @@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
829 if (completion_done(&dqp->q_flush) ||
830 !(lip->li_flags & XFS_LI_IN_AIL)) {
831 xfs_dqunlock(dqp);
832 - return;
833 + return true;
834 }
835
836 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
837 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
838 xfs_dqunlock(dqp);
839 if (!bp)
840 - return;
841 + return true;
842 if (XFS_BUF_ISDELAYWRITE(bp))
843 xfs_buf_delwri_promote(bp);
844 + if (XFS_BUF_ISPINNED(bp))
845 + ret = false;
846 xfs_buf_relse(bp);
847 + return ret;
848 }
849
850 /*
851 diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
852 index 7b7e005..a7342e8 100644
853 --- a/fs/xfs/xfs_buf_item.c
854 +++ b/fs/xfs/xfs_buf_item.c
855 @@ -632,7 +632,7 @@ xfs_buf_item_push(
856 * the xfsbufd to get this buffer written. We have to unlock the buffer
857 * to allow the xfsbufd to write it, too.
858 */
859 -STATIC void
860 +STATIC bool
861 xfs_buf_item_pushbuf(
862 struct xfs_log_item *lip)
863 {
864 @@ -646,6 +646,7 @@ xfs_buf_item_pushbuf(
865
866 xfs_buf_delwri_promote(bp);
867 xfs_buf_relse(bp);
868 + return true;
869 }
870
871 STATIC void
872 diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
873 index b1e88d5..391044c 100644
874 --- a/fs/xfs/xfs_inode_item.c
875 +++ b/fs/xfs/xfs_inode_item.c
876 @@ -713,13 +713,14 @@ xfs_inode_item_committed(
877 * marked delayed write. If that's the case, we'll promote it and that will
878 * allow the caller to write the buffer by triggering the xfsbufd to run.
879 */
880 -STATIC void
881 +STATIC bool
882 xfs_inode_item_pushbuf(
883 struct xfs_log_item *lip)
884 {
885 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
886 struct xfs_inode *ip = iip->ili_inode;
887 struct xfs_buf *bp;
888 + bool ret = true;
889
890 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
891
892 @@ -730,7 +731,7 @@ xfs_inode_item_pushbuf(
893 if (completion_done(&ip->i_flush) ||
894 !(lip->li_flags & XFS_LI_IN_AIL)) {
895 xfs_iunlock(ip, XFS_ILOCK_SHARED);
896 - return;
897 + return true;
898 }
899
900 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
901 @@ -738,10 +739,13 @@ xfs_inode_item_pushbuf(
902
903 xfs_iunlock(ip, XFS_ILOCK_SHARED);
904 if (!bp)
905 - return;
906 + return true;
907 if (XFS_BUF_ISDELAYWRITE(bp))
908 xfs_buf_delwri_promote(bp);
909 + if (XFS_BUF_ISPINNED(bp))
910 + ret = false;
911 xfs_buf_relse(bp);
912 + return ret;
913 }
914
915 /*
916 diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
917 index c83f63b..efc147f 100644
918 --- a/fs/xfs/xfs_trans.c
919 +++ b/fs/xfs/xfs_trans.c
920 @@ -1426,6 +1426,7 @@ xfs_trans_committed(
921 static inline void
922 xfs_log_item_batch_insert(
923 struct xfs_ail *ailp,
924 + struct xfs_ail_cursor *cur,
925 struct xfs_log_item **log_items,
926 int nr_items,
927 xfs_lsn_t commit_lsn)
928 @@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
929
930 spin_lock(&ailp->xa_lock);
931 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
932 - xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
933 + xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
934
935 for (i = 0; i < nr_items; i++)
936 IOP_UNPIN(log_items[i], 0);
937 @@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
938 * as an iclog write error even though we haven't started any IO yet. Hence in
939 * this case all we need to do is IOP_COMMITTED processing, followed by an
940 * IOP_UNPIN(aborted) call.
941 + *
942 + * The AIL cursor is used to optimise the insert process. If commit_lsn is not
943 + * at the end of the AIL, the insert cursor avoids the need to walk
944 + * the AIL to find the insertion point on every xfs_log_item_batch_insert()
945 + * call. This saves a lot of needless list walking and is a net win, even
946 + * though it slightly increases that amount of AIL lock traffic to set it up
947 + * and tear it down.
948 */
949 void
950 xfs_trans_committed_bulk(
951 @@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
952 #define LOG_ITEM_BATCH_SIZE 32
953 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
954 struct xfs_log_vec *lv;
955 + struct xfs_ail_cursor cur;
956 int i = 0;
957
958 + spin_lock(&ailp->xa_lock);
959 + xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
960 + spin_unlock(&ailp->xa_lock);
961 +
962 /* unpin all the log items */
963 for (lv = log_vector; lv; lv = lv->lv_next ) {
964 struct xfs_log_item *lip = lv->lv_item;
965 @@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
966 /*
967 * Not a bulk update option due to unusual item_lsn.
968 * Push into AIL immediately, rechecking the lsn once
969 - * we have the ail lock. Then unpin the item.
970 + * we have the ail lock. Then unpin the item. This does
971 + * not affect the AIL cursor the bulk insert path is
972 + * using.
973 */
974 spin_lock(&ailp->xa_lock);
975 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
976 @@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
977 /* Item is a candidate for bulk AIL insert. */
978 log_items[i++] = lv->lv_item;
979 if (i >= LOG_ITEM_BATCH_SIZE) {
980 - xfs_log_item_batch_insert(ailp, log_items,
981 + xfs_log_item_batch_insert(ailp, &cur, log_items,
982 LOG_ITEM_BATCH_SIZE, commit_lsn);
983 i = 0;
984 }
985 @@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
986
987 /* make sure we insert the remainder! */
988 if (i)
989 - xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
990 + xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
991 +
992 + spin_lock(&ailp->xa_lock);
993 + xfs_trans_ail_cursor_done(ailp, &cur);
994 + spin_unlock(&ailp->xa_lock);
995 }
996
997 /*
998 diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
999 index 06a9759..53597f4 100644
1000 --- a/fs/xfs/xfs_trans.h
1001 +++ b/fs/xfs/xfs_trans.h
1002 @@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
1003 void (*iop_unlock)(xfs_log_item_t *);
1004 xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
1005 void (*iop_push)(xfs_log_item_t *);
1006 - void (*iop_pushbuf)(xfs_log_item_t *);
1007 + bool (*iop_pushbuf)(xfs_log_item_t *);
1008 void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
1009 } xfs_item_ops_t;
1010
1011 diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
1012 index 5fc2380..a4c281b 100644
1013 --- a/fs/xfs/xfs_trans_ail.c
1014 +++ b/fs/xfs/xfs_trans_ail.c
1015 @@ -28,8 +28,6 @@
1016 #include "xfs_trans_priv.h"
1017 #include "xfs_error.h"
1018
1019 -struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
1020 -
1021 #ifdef DEBUG
1022 /*
1023 * Check that the list is sorted as it should be.
1024 @@ -272,9 +270,9 @@ xfs_trans_ail_cursor_clear(
1025 }
1026
1027 /*
1028 - * Return the item in the AIL with the current lsn.
1029 - * Return the current tree generation number for use
1030 - * in calls to xfs_trans_next_ail().
1031 + * Initialise the cursor to the first item in the AIL with the given @lsn.
1032 + * This searches the list from lowest LSN to highest. Pass a @lsn of zero
1033 + * to initialise the cursor to the first item in the AIL.
1034 */
1035 xfs_log_item_t *
1036 xfs_trans_ail_cursor_first(
1037 @@ -300,31 +298,97 @@ out:
1038 }
1039
1040 /*
1041 - * splice the log item list into the AIL at the given LSN.
1042 + * Initialise the cursor to the last item in the AIL with the given @lsn.
1043 + * This searches the list from highest LSN to lowest. If there is no item with
1044 + * the value of @lsn, then it sets the cursor to the last item with an LSN lower
1045 + * than @lsn.
1046 + */
1047 +static struct xfs_log_item *
1048 +__xfs_trans_ail_cursor_last(
1049 + struct xfs_ail *ailp,
1050 + xfs_lsn_t lsn)
1051 +{
1052 + xfs_log_item_t *lip;
1053 +
1054 + list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
1055 + if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
1056 + return lip;
1057 + }
1058 + return NULL;
1059 +}
1060 +
1061 +/*
1062 + * Initialise the cursor to the last item in the AIL with the given @lsn.
1063 + * This searches the list from highest LSN to lowest.
1064 + */
1065 +struct xfs_log_item *
1066 +xfs_trans_ail_cursor_last(
1067 + struct xfs_ail *ailp,
1068 + struct xfs_ail_cursor *cur,
1069 + xfs_lsn_t lsn)
1070 +{
1071 + xfs_trans_ail_cursor_init(ailp, cur);
1072 + cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
1073 + return cur->item;
1074 +}
1075 +
1076 +/*
1077 + * splice the log item list into the AIL at the given LSN. We splice to the
1078 + * tail of the given LSN to maintain insert order for push traversals. The
1079 + * cursor is optional, allowing repeated updates to the same LSN to avoid
1080 + * repeated traversals.
1081 */
1082 static void
1083 xfs_ail_splice(
1084 - struct xfs_ail *ailp,
1085 - struct list_head *list,
1086 - xfs_lsn_t lsn)
1087 + struct xfs_ail *ailp,
1088 + struct xfs_ail_cursor *cur,
1089 + struct list_head *list,
1090 + xfs_lsn_t lsn)
1091 {
1092 - xfs_log_item_t *next_lip;
1093 + struct xfs_log_item *lip = cur ? cur->item : NULL;
1094 + struct xfs_log_item *next_lip;
1095
1096 - /* If the list is empty, just insert the item. */
1097 - if (list_empty(&ailp->xa_ail)) {
1098 - list_splice(list, &ailp->xa_ail);
1099 - return;
1100 - }
1101 + /*
1102 + * Get a new cursor if we don't have a placeholder or the existing one
1103 + * has been invalidated.
1104 + */
1105 + if (!lip || (__psint_t)lip & 1) {
1106 + lip = __xfs_trans_ail_cursor_last(ailp, lsn);
1107
1108 - list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
1109 - if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
1110 - break;
1111 + if (!lip) {
1112 + /* The list is empty, so just splice and return. */
1113 + if (cur)
1114 + cur->item = NULL;
1115 + list_splice(list, &ailp->xa_ail);
1116 + return;
1117 + }
1118 }
1119
1120 - ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
1121 - XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
1122 -
1123 - list_splice_init(list, &next_lip->li_ail);
1124 + /*
1125 + * Our cursor points to the item we want to insert _after_, so we have
1126 + * to update the cursor to point to the end of the list we are splicing
1127 + * in so that it points to the correct location for the next splice.
1128 + * i.e. before the splice
1129 + *
1130 + * lsn -> lsn -> lsn + x -> lsn + x ...
1131 + * ^
1132 + * | cursor points here
1133 + *
1134 + * After the splice we have:
1135 + *
1136 + * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
1137 + * ^ ^
1138 + * | cursor points here | needs to move here
1139 + *
1140 + * So we set the cursor to the last item in the list to be spliced
1141 + * before we execute the splice, resulting in the cursor pointing to
1142 + * the correct item after the splice occurs.
1143 + */
1144 + if (cur) {
1145 + next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
1146 + cur->item = next_lip;
1147 + }
1148 + list_splice(list, &lip->li_ail);
1149 }
1150
1151 /*
1152 @@ -340,16 +404,10 @@ xfs_ail_delete(
1153 xfs_trans_ail_cursor_clear(ailp, lip);
1154 }
1155
1156 -/*
1157 - * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
1158 - * to run at a later time if there is more work to do to complete the push.
1159 - */
1160 -STATIC void
1161 -xfs_ail_worker(
1162 - struct work_struct *work)
1163 +static long
1164 +xfsaild_push(
1165 + struct xfs_ail *ailp)
1166 {
1167 - struct xfs_ail *ailp = container_of(to_delayed_work(work),
1168 - struct xfs_ail, xa_work);
1169 xfs_mount_t *mp = ailp->xa_mount;
1170 struct xfs_ail_cursor *cur = &ailp->xa_cursors;
1171 xfs_log_item_t *lip;
1172 @@ -412,8 +470,13 @@ xfs_ail_worker(
1173
1174 case XFS_ITEM_PUSHBUF:
1175 XFS_STATS_INC(xs_push_ail_pushbuf);
1176 - IOP_PUSHBUF(lip);
1177 - ailp->xa_last_pushed_lsn = lsn;
1178 +
1179 + if (!IOP_PUSHBUF(lip)) {
1180 + stuck++;
1181 + flush_log = 1;
1182 + } else {
1183 + ailp->xa_last_pushed_lsn = lsn;
1184 + }
1185 push_xfsbufd = 1;
1186 break;
1187
1188 @@ -425,7 +488,6 @@ xfs_ail_worker(
1189
1190 case XFS_ITEM_LOCKED:
1191 XFS_STATS_INC(xs_push_ail_locked);
1192 - ailp->xa_last_pushed_lsn = lsn;
1193 stuck++;
1194 break;
1195
1196 @@ -486,20 +548,6 @@ out_done:
1197 /* We're past our target or empty, so idle */
1198 ailp->xa_last_pushed_lsn = 0;
1199
1200 - /*
1201 - * We clear the XFS_AIL_PUSHING_BIT first before checking
1202 - * whether the target has changed. If the target has changed,
1203 - * this pushes the requeue race directly onto the result of the
1204 - * atomic test/set bit, so we are guaranteed that either the
1205 - * the pusher that changed the target or ourselves will requeue
1206 - * the work (but not both).
1207 - */
1208 - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
1209 - smp_rmb();
1210 - if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
1211 - test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
1212 - return;
1213 -
1214 tout = 50;
1215 } else if (XFS_LSN_CMP(lsn, target) >= 0) {
1216 /*
1217 @@ -522,9 +570,30 @@ out_done:
1218 tout = 20;
1219 }
1220
1221 - /* There is more to do, requeue us. */
1222 - queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
1223 - msecs_to_jiffies(tout));
1224 + return tout;
1225 +}
1226 +
1227 +static int
1228 +xfsaild(
1229 + void *data)
1230 +{
1231 + struct xfs_ail *ailp = data;
1232 + long tout = 0; /* milliseconds */
1233 +
1234 + while (!kthread_should_stop()) {
1235 + if (tout && tout <= 20)
1236 + __set_current_state(TASK_KILLABLE);
1237 + else
1238 + __set_current_state(TASK_INTERRUPTIBLE);
1239 + schedule_timeout(tout ?
1240 + msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
1241 +
1242 + try_to_freeze();
1243 +
1244 + tout = xfsaild_push(ailp);
1245 + }
1246 +
1247 + return 0;
1248 }
1249
1250 /*
1251 @@ -559,8 +628,9 @@ xfs_ail_push(
1252 */
1253 smp_wmb();
1254 xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
1255 - if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
1256 - queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
1257 + smp_wmb();
1258 +
1259 + wake_up_process(ailp->xa_task);
1260 }
1261
1262 /*
1263 @@ -645,6 +715,7 @@ xfs_trans_unlocked_item(
1264 void
1265 xfs_trans_ail_update_bulk(
1266 struct xfs_ail *ailp,
1267 + struct xfs_ail_cursor *cur,
1268 struct xfs_log_item **log_items,
1269 int nr_items,
1270 xfs_lsn_t lsn) __releases(ailp->xa_lock)
1271 @@ -674,7 +745,7 @@ xfs_trans_ail_update_bulk(
1272 list_add(&lip->li_ail, &tmp);
1273 }
1274
1275 - xfs_ail_splice(ailp, &tmp, lsn);
1276 + xfs_ail_splice(ailp, cur, &tmp, lsn);
1277
1278 if (!mlip_changed) {
1279 spin_unlock(&ailp->xa_lock);
1280 @@ -794,9 +865,18 @@ xfs_trans_ail_init(
1281 ailp->xa_mount = mp;
1282 INIT_LIST_HEAD(&ailp->xa_ail);
1283 spin_lock_init(&ailp->xa_lock);
1284 - INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
1285 +
1286 + ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
1287 + ailp->xa_mount->m_fsname);
1288 + if (IS_ERR(ailp->xa_task))
1289 + goto out_free_ailp;
1290 +
1291 mp->m_ail = ailp;
1292 return 0;
1293 +
1294 +out_free_ailp:
1295 + kmem_free(ailp);
1296 + return ENOMEM;
1297 }
1298
1299 void
1300 @@ -805,6 +885,6 @@ xfs_trans_ail_destroy(
1301 {
1302 struct xfs_ail *ailp = mp->m_ail;
1303
1304 - cancel_delayed_work_sync(&ailp->xa_work);
1305 + kthread_stop(ailp->xa_task);
1306 kmem_free(ailp);
1307 }
1308 diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
1309 index 6b164e9..fe2e3cb 100644
1310 --- a/fs/xfs/xfs_trans_priv.h
1311 +++ b/fs/xfs/xfs_trans_priv.h
1312 @@ -64,24 +64,19 @@ struct xfs_ail_cursor {
1313 */
1314 struct xfs_ail {
1315 struct xfs_mount *xa_mount;
1316 + struct task_struct *xa_task;
1317 struct list_head xa_ail;
1318 xfs_lsn_t xa_target;
1319 struct xfs_ail_cursor xa_cursors;
1320 spinlock_t xa_lock;
1321 - struct delayed_work xa_work;
1322 xfs_lsn_t xa_last_pushed_lsn;
1323 - unsigned long xa_flags;
1324 };
1325
1326 -#define XFS_AIL_PUSHING_BIT 0
1327 -
1328 /*
1329 * From xfs_trans_ail.c
1330 */
1331 -
1332 -extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
1333 -
1334 void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
1335 + struct xfs_ail_cursor *cur,
1336 struct xfs_log_item **log_items, int nr_items,
1337 xfs_lsn_t lsn) __releases(ailp->xa_lock);
1338 static inline void
1339 @@ -90,7 +85,7 @@ xfs_trans_ail_update(
1340 struct xfs_log_item *lip,
1341 xfs_lsn_t lsn) __releases(ailp->xa_lock)
1342 {
1343 - xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
1344 + xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
1345 }
1346
1347 void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
1348 @@ -111,10 +106,13 @@ xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
1349 void xfs_trans_unlocked_item(struct xfs_ail *,
1350 xfs_log_item_t *);
1351
1352 -struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
1353 +struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
1354 + struct xfs_ail_cursor *cur,
1355 + xfs_lsn_t lsn);
1356 +struct xfs_log_item * xfs_trans_ail_cursor_last(struct xfs_ail *ailp,
1357 struct xfs_ail_cursor *cur,
1358 xfs_lsn_t lsn);
1359 -struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
1360 +struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
1361 struct xfs_ail_cursor *cur);
1362 void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
1363 struct xfs_ail_cursor *cur);
1364 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
1365 index c8008dd..640ded8 100644
1366 --- a/kernel/posix-cpu-timers.c
1367 +++ b/kernel/posix-cpu-timers.c
1368 @@ -274,9 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
1369 struct task_cputime sum;
1370 unsigned long flags;
1371
1372 - spin_lock_irqsave(&cputimer->lock, flags);
1373 if (!cputimer->running) {
1374 - cputimer->running = 1;
1375 /*
1376 * The POSIX timer interface allows for absolute time expiry
1377 * values through the TIMER_ABSTIME flag, therefore we have
1378 @@ -284,8 +282,11 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
1379 * it.
1380 */
1381 thread_group_cputime(tsk, &sum);
1382 + spin_lock_irqsave(&cputimer->lock, flags);
1383 + cputimer->running = 1;
1384 update_gt_cputime(&cputimer->cputime, &sum);
1385 - }
1386 + } else
1387 + spin_lock_irqsave(&cputimer->lock, flags);
1388 *times = cputimer->cputime;
1389 spin_unlock_irqrestore(&cputimer->lock, flags);
1390 }
1391 diff --git a/kernel/sys.c b/kernel/sys.c
1392 index 5c942cf..f88dadc 100644
1393 --- a/kernel/sys.c
1394 +++ b/kernel/sys.c
1395 @@ -1135,7 +1135,7 @@ DECLARE_RWSEM(uts_sem);
1396 static int override_release(char __user *release, int len)
1397 {
1398 int ret = 0;
1399 - char buf[len];
1400 + char buf[65];
1401
1402 if (current->personality & UNAME26) {
1403 char *rest = UTS_RELEASE;
1404 diff --git a/mm/migrate.c b/mm/migrate.c
1405 index 666e4e6..14d0a6a 100644
1406 --- a/mm/migrate.c
1407 +++ b/mm/migrate.c
1408 @@ -120,10 +120,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
1409
1410 ptep = pte_offset_map(pmd, addr);
1411
1412 - if (!is_swap_pte(*ptep)) {
1413 - pte_unmap(ptep);
1414 - goto out;
1415 - }
1416 + /*
1417 + * Peek to check is_swap_pte() before taking ptlock? No, we
1418 + * can race mremap's move_ptes(), which skips anon_vma lock.
1419 + */
1420
1421 ptl = pte_lockptr(mm, pmd);
1422 }
1423 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
1424 index 4680b1e..373e14f 100644
1425 --- a/net/x25/af_x25.c
1426 +++ b/net/x25/af_x25.c
1427 @@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr,
1428 * Found a listening socket, now check the incoming
1429 * call user data vs this sockets call user data
1430 */
1431 - if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) {
1432 + if (x25_sk(s)->cudmatchlength > 0 &&
1433 + skb->len >= x25_sk(s)->cudmatchlength) {
1434 if((memcmp(x25_sk(s)->calluserdata.cuddata,
1435 skb->data,
1436 x25_sk(s)->cudmatchlength)) == 0) {
1437 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1438 index 486f6de..981b6fd 100644
1439 --- a/sound/pci/hda/hda_intel.c
1440 +++ b/sound/pci/hda/hda_intel.c
1441 @@ -2352,6 +2352,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
1442 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
1443 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
1444 SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
1445 + SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB),
1446 SND_PCI_QUIRK(0x1028, 0x0470, "Dell Inspiron 1120", POS_FIX_LPIB),
1447 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
1448 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
1449 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
1450 index 7bbc5f2..cf1fa36 100644
1451 --- a/sound/pci/hda/patch_conexant.c
1452 +++ b/sound/pci/hda/patch_conexant.c
1453 @@ -3097,6 +3097,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
1454 SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
1455 SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
1456 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
1457 + SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
1458 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
1459 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
1460 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),