Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.27-r3/0147-2.6.27.48-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1176 - (show annotations) (download)
Thu Oct 14 15:11:06 2010 UTC (13 years, 7 months ago) by niro
File size: 32511 byte(s)
-2.6.27-alx-r3: new magellan 0.5.2 kernel
1 diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
2 index 66c8a9f..62ebcde 100644
3 --- a/arch/parisc/math-emu/decode_exc.c
4 +++ b/arch/parisc/math-emu/decode_exc.c
5 @@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
6 return SIGNALCODE(SIGFPE, FPE_FLTINV);
7 case DIVISIONBYZEROEXCEPTION:
8 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
9 + Clear_excp_register(exception_index);
10 return SIGNALCODE(SIGFPE, FPE_FLTDIV);
11 case INEXACTEXCEPTION:
12 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
13 diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
14 index 64e2e49..3ac0cd3 100644
15 --- a/arch/powerpc/lib/string.S
16 +++ b/arch/powerpc/lib/string.S
17 @@ -71,7 +71,7 @@ _GLOBAL(strcmp)
18
19 _GLOBAL(strncmp)
20 PPC_LCMPI r5,0
21 - beqlr
22 + ble- 2f
23 mtctr r5
24 addi r5,r3,-1
25 addi r4,r4,-1
26 @@ -82,6 +82,8 @@ _GLOBAL(strncmp)
27 beqlr 1
28 bdnzt eq,1b
29 blr
30 +2: li r3,0
31 + blr
32
33 _GLOBAL(strlen)
34 addi r4,r3,-1
35 diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
36 index 5ff4de3..303721d 100644
37 --- a/arch/powerpc/oprofile/op_model_cell.c
38 +++ b/arch/powerpc/oprofile/op_model_cell.c
39 @@ -821,7 +821,7 @@ static int calculate_lfsr(int n)
40 index = ENTRIES-1;
41
42 /* make sure index is valid */
43 - if ((index > ENTRIES) || (index < 0))
44 + if ((index >= ENTRIES) || (index < 0))
45 index = ENTRIES-1;
46
47 return initial_lfsr[index];
48 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
49 index 1f03248..ca5e7b0 100644
50 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
51 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
52 @@ -66,30 +66,6 @@ static void pseries_mach_cpu_die(void)
53 for(;;);
54 }
55
56 -static int qcss_tok; /* query-cpu-stopped-state token */
57 -
58 -/* Get state of physical CPU.
59 - * Return codes:
60 - * 0 - The processor is in the RTAS stopped state
61 - * 1 - stop-self is in progress
62 - * 2 - The processor is not in the RTAS stopped state
63 - * -1 - Hardware Error
64 - * -2 - Hardware Busy, Try again later.
65 - */
66 -static int query_cpu_stopped(unsigned int pcpu)
67 -{
68 - int cpu_status, status;
69 -
70 - status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
71 - if (status != 0) {
72 - printk(KERN_ERR
73 - "RTAS query-cpu-stopped-state failed: %i\n", status);
74 - return status;
75 - }
76 -
77 - return cpu_status;
78 -}
79 -
80 static int pseries_cpu_disable(void)
81 {
82 int cpu = smp_processor_id();
83 @@ -113,8 +89,9 @@ static void pseries_cpu_die(unsigned int cpu)
84 unsigned int pcpu = get_hard_smp_processor_id(cpu);
85
86 for (tries = 0; tries < 25; tries++) {
87 - cpu_status = query_cpu_stopped(pcpu);
88 - if (cpu_status == 0 || cpu_status == -1)
89 + cpu_status = smp_query_cpu_stopped(pcpu);
90 + if (cpu_status == QCSS_STOPPED ||
91 + cpu_status == QCSS_HARDWARE_ERROR)
92 break;
93 msleep(200);
94 }
95 @@ -256,6 +233,7 @@ static int __init pseries_cpu_hotplug_init(void)
96 {
97 struct device_node *np;
98 const char *typep;
99 + int qcss_tok;
100
101 for_each_node_by_name(np, "interrupt-controller") {
102 typep = of_get_property(np, "compatible", NULL);
103 diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
104 index d967c18..1adef11 100644
105 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
106 +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
107 @@ -4,6 +4,14 @@
108 #include <asm/hvcall.h>
109 #include <asm/page.h>
110
111 +/* Get state of physical CPU from query_cpu_stopped */
112 +int smp_query_cpu_stopped(unsigned int pcpu);
113 +#define QCSS_STOPPED 0
114 +#define QCSS_STOPPING 1
115 +#define QCSS_NOT_STOPPED 2
116 +#define QCSS_HARDWARE_ERROR -1
117 +#define QCSS_HARDWARE_BUSY -2
118 +
119 static inline long poll_pending(void)
120 {
121 return plpar_hcall_norets(H_POLL_PENDING);
122 diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
123 index c9337c7..db0cbbb 100644
124 --- a/arch/powerpc/platforms/pseries/smp.c
125 +++ b/arch/powerpc/platforms/pseries/smp.c
126 @@ -59,6 +59,28 @@ static cpumask_t of_spin_map;
127
128 extern void generic_secondary_smp_init(unsigned long);
129
130 +/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
131 +int smp_query_cpu_stopped(unsigned int pcpu)
132 +{
133 + int cpu_status, status;
134 + int qcss_tok = rtas_token("query-cpu-stopped-state");
135 +
136 + if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
137 + printk(KERN_INFO "Firmware doesn't support "
138 + "query-cpu-stopped-state\n");
139 + return QCSS_HARDWARE_ERROR;
140 + }
141 +
142 + status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
143 + if (status != 0) {
144 + printk(KERN_ERR
145 + "RTAS query-cpu-stopped-state failed: %i\n", status);
146 + return status;
147 + }
148 +
149 + return cpu_status;
150 +}
151 +
152 /**
153 * smp_startup_cpu() - start the given cpu
154 *
155 @@ -84,6 +106,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
156
157 pcpu = get_hard_smp_processor_id(lcpu);
158
159 + /* Check to see if the CPU out of FW already for kexec */
160 + if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
161 + cpu_set(lcpu, of_spin_map);
162 + return 1;
163 + }
164 +
165 /* Fixup atomic count: it exited inside IRQ handler. */
166 task_thread_info(paca[lcpu].__current)->preempt_count = 0;
167
168 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
169 index 8afc274..512c281 100644
170 --- a/drivers/ata/libata-core.c
171 +++ b/drivers/ata/libata-core.c
172 @@ -157,6 +157,10 @@ int libata_allow_tpm = 0;
173 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
174 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
175
176 +static int atapi_an;
177 +module_param(atapi_an, int, 0444);
178 +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
179 +
180 MODULE_AUTHOR("Jeff Garzik");
181 MODULE_DESCRIPTION("Library module for ATA devices");
182 MODULE_LICENSE("GPL");
183 @@ -2341,7 +2345,8 @@ int ata_dev_configure(struct ata_device *dev)
184 * to enable ATAPI AN to discern between PHY status
185 * changed notifications and ATAPI ANs.
186 */
187 - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
188 + if (atapi_an &&
189 + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
190 (!sata_pmp_attached(ap) ||
191 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
192 unsigned int err_mask;
193 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
194 index 7e860da..604c991 100644
195 --- a/drivers/char/ipmi/ipmi_si_intf.c
196 +++ b/drivers/char/ipmi/ipmi_si_intf.c
197 @@ -313,9 +313,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
198 {
199 /* Deliver the message to the upper layer with the lock
200 released. */
201 - spin_unlock(&(smi_info->si_lock));
202 - ipmi_smi_msg_received(smi_info->intf, msg);
203 - spin_lock(&(smi_info->si_lock));
204 +
205 + if (smi_info->run_to_completion) {
206 + ipmi_smi_msg_received(smi_info->intf, msg);
207 + } else {
208 + spin_unlock(&(smi_info->si_lock));
209 + ipmi_smi_msg_received(smi_info->intf, msg);
210 + spin_lock(&(smi_info->si_lock));
211 + }
212 }
213
214 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
215 diff --git a/drivers/md/md.c b/drivers/md/md.c
216 index b60d328..4d96950 100644
217 --- a/drivers/md/md.c
218 +++ b/drivers/md/md.c
219 @@ -4816,6 +4816,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
220 int err = 0;
221 void __user *argp = (void __user *)arg;
222 mddev_t *mddev = NULL;
223 + int ro;
224
225 if (!capable(CAP_SYS_ADMIN))
226 return -EACCES;
227 @@ -4951,6 +4952,34 @@ static int md_ioctl(struct inode *inode, struct file *file,
228 err = do_md_stop (mddev, 1, 1);
229 goto done_unlock;
230
231 + case BLKROSET:
232 + if (get_user(ro, (int __user *)(arg))) {
233 + err = -EFAULT;
234 + goto done_unlock;
235 + }
236 + err = -EINVAL;
237 +
238 + /* if the bdev is going readonly the value of mddev->ro
239 + * does not matter, no writes are coming
240 + */
241 + if (ro)
242 + goto done_unlock;
243 +
244 + /* are we are already prepared for writes? */
245 + if (mddev->ro != 1)
246 + goto done_unlock;
247 +
248 + /* transitioning to readauto need only happen for
249 + * arrays that call md_write_start
250 + */
251 + if (mddev->pers) {
252 + err = restart_array(mddev);
253 + if (err == 0) {
254 + mddev->ro = 2;
255 + set_disk_ro(mddev->gendisk, 0);
256 + }
257 + }
258 + goto done_unlock;
259 }
260
261 /*
262 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
263 index 2b510a3..4b78069 100644
264 --- a/drivers/md/raid1.c
265 +++ b/drivers/md/raid1.c
266 @@ -413,7 +413,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
267 */
268 static int read_balance(conf_t *conf, r1bio_t *r1_bio)
269 {
270 - const unsigned long this_sector = r1_bio->sector;
271 + const sector_t this_sector = r1_bio->sector;
272 int new_disk = conf->last_used, disk = new_disk;
273 int wonly_disk = -1;
274 const int sectors = r1_bio->sectors;
275 @@ -429,7 +429,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
276 retry:
277 if (conf->mddev->recovery_cp < MaxSector &&
278 (this_sector + sectors >= conf->next_resync)) {
279 - /* Choose the first operation device, for consistancy */
280 + /* Choose the first operational device, for consistancy */
281 new_disk = 0;
282
283 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
284 @@ -879,9 +879,10 @@ static int make_request(struct request_queue *q, struct bio * bio)
285 if (test_bit(Faulty, &rdev->flags)) {
286 rdev_dec_pending(rdev, mddev);
287 r1_bio->bios[i] = NULL;
288 - } else
289 + } else {
290 r1_bio->bios[i] = bio;
291 - targets++;
292 + targets++;
293 + }
294 } else
295 r1_bio->bios[i] = NULL;
296 }
297 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
298 index b08dd95..dbf51e9 100644
299 --- a/drivers/md/raid10.c
300 +++ b/drivers/md/raid10.c
301 @@ -490,7 +490,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
302 */
303 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
304 {
305 - const unsigned long this_sector = r10_bio->sector;
306 + const sector_t this_sector = r10_bio->sector;
307 int disk, slot, nslot;
308 const int sectors = r10_bio->sectors;
309 sector_t new_distance, current_distance;
310 diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
311 index b8c0fa6..0477bd1 100644
312 --- a/drivers/net/arcnet/com20020-pci.c
313 +++ b/drivers/net/arcnet/com20020-pci.c
314 @@ -162,8 +162,8 @@ static struct pci_device_id com20020pci_id_table[] = {
315 { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
316 { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
317 { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
318 - { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
319 - { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
320 + { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
321 + { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
322 { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
323 { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
324 {0,}
325 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
326 index a67f837..536e392 100644
327 --- a/fs/ext4/inode.c
328 +++ b/fs/ext4/inode.c
329 @@ -1721,7 +1721,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
330
331 pages_skipped = mpd->wbc->pages_skipped;
332 err = mapping->a_ops->writepage(page, mpd->wbc);
333 - if (!err)
334 + if (!err && (pages_skipped == mpd->wbc->pages_skipped))
335 + /*
336 + * have successfully written the page
337 + * without skipping the same
338 + */
339 mpd->pages_written++;
340 /*
341 * In error case, we have to continue because
342 @@ -2059,17 +2063,6 @@ static int __mpage_da_writepage(struct page *page,
343 struct buffer_head *bh, *head, fake;
344 sector_t logical;
345
346 - if (mpd->io_done) {
347 - /*
348 - * Rest of the page in the page_vec
349 - * redirty then and skip then. We will
350 - * try to to write them again after
351 - * starting a new transaction
352 - */
353 - redirty_page_for_writepage(wbc, page);
354 - unlock_page(page);
355 - return MPAGE_DA_EXTENT_TAIL;
356 - }
357 /*
358 * Can we merge this page to current extent?
359 */
360 @@ -2160,6 +2153,137 @@ static int __mpage_da_writepage(struct page *page,
361 }
362
363 /*
364 + * write_cache_pages_da - walk the list of dirty pages of the given
365 + * address space and call the callback function (which usually writes
366 + * the pages).
367 + *
368 + * This is a forked version of write_cache_pages(). Differences:
369 + * Range cyclic is ignored.
370 + * no_nrwrite_index_update is always presumed true
371 + */
372 +static int write_cache_pages_da(struct address_space *mapping,
373 + struct writeback_control *wbc,
374 + struct mpage_da_data *mpd)
375 +{
376 + struct backing_dev_info *bdi = mapping->backing_dev_info;
377 + int ret = 0;
378 + int done = 0;
379 + struct pagevec pvec;
380 + int nr_pages;
381 + pgoff_t index;
382 + pgoff_t end; /* Inclusive */
383 + long nr_to_write = wbc->nr_to_write;
384 +
385 + if (wbc->nonblocking && bdi_write_congested(bdi)) {
386 + wbc->encountered_congestion = 1;
387 + return 0;
388 + }
389 +
390 + pagevec_init(&pvec, 0);
391 + index = wbc->range_start >> PAGE_CACHE_SHIFT;
392 + end = wbc->range_end >> PAGE_CACHE_SHIFT;
393 +
394 + while (!done && (index <= end)) {
395 + int i;
396 +
397 + nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
398 + PAGECACHE_TAG_DIRTY,
399 + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
400 + if (nr_pages == 0)
401 + break;
402 +
403 + for (i = 0; i < nr_pages; i++) {
404 + struct page *page = pvec.pages[i];
405 +
406 + /*
407 + * At this point, the page may be truncated or
408 + * invalidated (changing page->mapping to NULL), or
409 + * even swizzled back from swapper_space to tmpfs file
410 + * mapping. However, page->index will not change
411 + * because we have a reference on the page.
412 + */
413 + if (page->index > end) {
414 + done = 1;
415 + break;
416 + }
417 +
418 + lock_page(page);
419 +
420 + /*
421 + * Page truncated or invalidated. We can freely skip it
422 + * then, even for data integrity operations: the page
423 + * has disappeared concurrently, so there could be no
424 + * real expectation of this data interity operation
425 + * even if there is now a new, dirty page at the same
426 + * pagecache address.
427 + */
428 + if (unlikely(page->mapping != mapping)) {
429 +continue_unlock:
430 + unlock_page(page);
431 + continue;
432 + }
433 +
434 + if (!PageDirty(page)) {
435 + /* someone wrote it for us */
436 + goto continue_unlock;
437 + }
438 +
439 + if (PageWriteback(page)) {
440 + if (wbc->sync_mode != WB_SYNC_NONE)
441 + wait_on_page_writeback(page);
442 + else
443 + goto continue_unlock;
444 + }
445 +
446 + BUG_ON(PageWriteback(page));
447 + if (!clear_page_dirty_for_io(page))
448 + goto continue_unlock;
449 +
450 + ret = __mpage_da_writepage(page, wbc, mpd);
451 +
452 + if (unlikely(ret)) {
453 + if (ret == AOP_WRITEPAGE_ACTIVATE) {
454 + unlock_page(page);
455 + ret = 0;
456 + } else {
457 + done = 1;
458 + break;
459 + }
460 + }
461 +
462 + if (nr_to_write > 0) {
463 + nr_to_write--;
464 + if (nr_to_write == 0 &&
465 + wbc->sync_mode == WB_SYNC_NONE) {
466 + /*
467 + * We stop writing back only if we are
468 + * not doing integrity sync. In case of
469 + * integrity sync we have to keep going
470 + * because someone may be concurrently
471 + * dirtying pages, and we might have
472 + * synced a lot of newly appeared dirty
473 + * pages, but have not synced all of the
474 + * old dirty pages.
475 + */
476 + done = 1;
477 + break;
478 + }
479 + }
480 +
481 + if (wbc->nonblocking && bdi_write_congested(bdi)) {
482 + wbc->encountered_congestion = 1;
483 + done = 1;
484 + break;
485 + }
486 + }
487 + pagevec_release(&pvec);
488 + cond_resched();
489 + }
490 + return ret;
491 +}
492 +
493 +
494 +/*
495 * mpage_da_writepages - walk the list of dirty pages of the given
496 * address space, allocates non-allocated blocks, maps newly-allocated
497 * blocks to existing bhs and issue IO them
498 @@ -2175,7 +2299,6 @@ static int mpage_da_writepages(struct address_space *mapping,
499 struct writeback_control *wbc,
500 struct mpage_da_data *mpd)
501 {
502 - long to_write;
503 int ret;
504
505 if (!mpd->get_block)
506 @@ -2190,19 +2313,18 @@ static int mpage_da_writepages(struct address_space *mapping,
507 mpd->pages_written = 0;
508 mpd->retval = 0;
509
510 - to_write = wbc->nr_to_write;
511 -
512 - ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd);
513 -
514 + ret = write_cache_pages_da(mapping, wbc, mpd);
515 /*
516 * Handle last extent of pages
517 */
518 if (!mpd->io_done && mpd->next_page != mpd->first_page) {
519 if (mpage_da_map_blocks(mpd) == 0)
520 mpage_da_submit_io(mpd);
521 - }
522
523 - wbc->nr_to_write = to_write - mpd->pages_written;
524 + mpd->io_done = 1;
525 + ret = MPAGE_DA_EXTENT_TAIL;
526 + }
527 + wbc->nr_to_write -= mpd->pages_written;
528 return ret;
529 }
530
531 @@ -2447,11 +2569,14 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
532 static int ext4_da_writepages(struct address_space *mapping,
533 struct writeback_control *wbc)
534 {
535 + pgoff_t index;
536 + int range_whole = 0;
537 handle_t *handle = NULL;
538 struct mpage_da_data mpd;
539 struct inode *inode = mapping->host;
540 + long pages_written = 0, pages_skipped;
541 + int range_cyclic, cycled = 1, io_done = 0;
542 int needed_blocks, ret = 0, nr_to_writebump = 0;
543 - long to_write, pages_skipped = 0;
544 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
545
546 /*
547 @@ -2485,16 +2610,27 @@ static int ext4_da_writepages(struct address_space *mapping,
548 nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
549 wbc->nr_to_write = sbi->s_mb_stream_request;
550 }
551 -
552 -
553 - pages_skipped = wbc->pages_skipped;
554 + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
555 + range_whole = 1;
556 +
557 + range_cyclic = wbc->range_cyclic;
558 + if (wbc->range_cyclic) {
559 + index = mapping->writeback_index;
560 + if (index)
561 + cycled = 0;
562 + wbc->range_start = index << PAGE_CACHE_SHIFT;
563 + wbc->range_end = LLONG_MAX;
564 + wbc->range_cyclic = 0;
565 + } else
566 + index = wbc->range_start >> PAGE_CACHE_SHIFT;
567
568 mpd.wbc = wbc;
569 mpd.inode = mapping->host;
570
571 -restart_loop:
572 - to_write = wbc->nr_to_write;
573 - while (!ret && to_write > 0) {
574 + pages_skipped = wbc->pages_skipped;
575 +
576 +retry:
577 + while (!ret && wbc->nr_to_write > 0) {
578
579 /*
580 * we insert one extent at a time. So we need
581 @@ -2527,46 +2663,60 @@ restart_loop:
582 goto out_writepages;
583 }
584 }
585 - to_write -= wbc->nr_to_write;
586 -
587 mpd.get_block = ext4_da_get_block_write;
588 ret = mpage_da_writepages(mapping, wbc, &mpd);
589
590 ext4_journal_stop(handle);
591
592 - if (mpd.retval == -ENOSPC)
593 + if (mpd.retval == -ENOSPC) {
594 + /* commit the transaction which would
595 + * free blocks released in the transaction
596 + * and try again
597 + */
598 jbd2_journal_force_commit_nested(sbi->s_journal);
599 -
600 - /* reset the retry count */
601 - if (ret == MPAGE_DA_EXTENT_TAIL) {
602 + wbc->pages_skipped = pages_skipped;
603 + ret = 0;
604 + } else if (ret == MPAGE_DA_EXTENT_TAIL) {
605 /*
606 * got one extent now try with
607 * rest of the pages
608 */
609 - to_write += wbc->nr_to_write;
610 + pages_written += mpd.pages_written;
611 + wbc->pages_skipped = pages_skipped;
612 ret = 0;
613 - } else if (wbc->nr_to_write) {
614 + io_done = 1;
615 + } else if (wbc->nr_to_write)
616 /*
617 * There is no more writeout needed
618 * or we requested for a noblocking writeout
619 * and we found the device congested
620 */
621 - to_write += wbc->nr_to_write;
622 break;
623 - }
624 - wbc->nr_to_write = to_write;
625 }
626 -
627 - if (!wbc->range_cyclic && (pages_skipped != wbc->pages_skipped)) {
628 - /* We skipped pages in this loop */
629 - wbc->nr_to_write = to_write +
630 - wbc->pages_skipped - pages_skipped;
631 - wbc->pages_skipped = pages_skipped;
632 - goto restart_loop;
633 + if (!io_done && !cycled) {
634 + cycled = 1;
635 + index = 0;
636 + wbc->range_start = index << PAGE_CACHE_SHIFT;
637 + wbc->range_end = mapping->writeback_index - 1;
638 + goto retry;
639 }
640 + if (pages_skipped != wbc->pages_skipped)
641 + printk(KERN_EMERG "This should not happen leaving %s "
642 + "with nr_to_write = %ld ret = %d\n",
643 + __func__, wbc->nr_to_write, ret);
644 +
645 + /* Update index */
646 + index += pages_written;
647 + wbc->range_cyclic = range_cyclic;
648 + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
649 + /*
650 + * set the writeback_index so that range_cyclic
651 + * mode will write it back later
652 + */
653 + mapping->writeback_index = index;
654
655 out_writepages:
656 - wbc->nr_to_write = to_write - nr_to_writebump;
657 + wbc->nr_to_write -= nr_to_writebump;
658 return ret;
659 }
660
661 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
662 index 0070431..f7f5995 100644
663 --- a/fs/ext4/resize.c
664 +++ b/fs/ext4/resize.c
665 @@ -935,7 +935,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
666 percpu_counter_add(&sbi->s_freeinodes_counter,
667 EXT4_INODES_PER_GROUP(sb));
668
669 - if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
670 + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
671 + sbi->s_log_groups_per_flex) {
672 ext4_group_t flex_group;
673 flex_group = ext4_flex_group(sbi, input->group);
674 sbi->s_flex_groups[flex_group].free_blocks +=
675 diff --git a/fs/namespace.c b/fs/namespace.c
676 index efe0251..2083810 100644
677 --- a/fs/namespace.c
678 +++ b/fs/namespace.c
679 @@ -1132,8 +1132,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
680 {
681 struct path path;
682 int retval;
683 + int lookup_flags = 0;
684
685 - retval = user_path(name, &path);
686 + if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
687 + return -EINVAL;
688 +
689 + if (!(flags & UMOUNT_NOFOLLOW))
690 + lookup_flags |= LOOKUP_FOLLOW;
691 +
692 + retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
693 if (retval)
694 goto out;
695 retval = -EINVAL;
696 diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
697 index 80292ff..e02db8e 100644
698 --- a/fs/nfsd/nfssvc.c
699 +++ b/fs/nfsd/nfssvc.c
700 @@ -126,7 +126,7 @@ struct svc_program nfsd_program = {
701 int nfsd_vers(int vers, enum vers_op change)
702 {
703 if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
704 - return -1;
705 + return 0;
706 switch(change) {
707 case NFSD_SET:
708 nfsd_versions[vers] = nfsd_version[vers];
709 diff --git a/include/linux/fs.h b/include/linux/fs.h
710 index 909e13e..248465c 100644
711 --- a/include/linux/fs.h
712 +++ b/include/linux/fs.h
713 @@ -1061,6 +1061,8 @@ extern int send_sigurg(struct fown_struct *fown);
714 #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
715 #define MNT_DETACH 0x00000002 /* Just detach from the tree */
716 #define MNT_EXPIRE 0x00000004 /* Mark for expiry */
717 +#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
718 +#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
719
720 extern struct list_head super_blocks;
721 extern spinlock_t sb_lock;
722 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
723 index ab1c472..0594330 100644
724 --- a/include/net/sctp/structs.h
725 +++ b/include/net/sctp/structs.h
726 @@ -753,6 +753,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len,
727 struct iovec *data);
728 void sctp_chunk_free(struct sctp_chunk *);
729 void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data);
730 +void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data);
731 struct sctp_chunk *sctp_chunkify(struct sk_buff *,
732 const struct sctp_association *,
733 struct sock *);
734 diff --git a/mm/filemap.c b/mm/filemap.c
735 index bff0b00..a4800c5 100644
736 --- a/mm/filemap.c
737 +++ b/mm/filemap.c
738 @@ -1118,6 +1118,12 @@ page_not_up_to_date_locked:
739 }
740
741 readpage:
742 + /*
743 + * A previous I/O error may have been due to temporary
744 + * failures, eg. multipath errors.
745 + * PG_error will be set again if readpage fails.
746 + */
747 + ClearPageError(page);
748 /* Start the actual read. The read will unlock the page. */
749 error = mapping->a_ops->readpage(filp, page);
750
751 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
752 index d68869f..baf2fa4 100644
753 --- a/net/sctp/sm_make_chunk.c
754 +++ b/net/sctp/sm_make_chunk.c
755 @@ -107,7 +107,7 @@ static const struct sctp_paramhdr prsctp_param = {
756 __constant_htons(sizeof(struct sctp_paramhdr)),
757 };
758
759 -/* A helper to initialize to initialize an op error inside a
760 +/* A helper to initialize an op error inside a
761 * provided chunk, as most cause codes will be embedded inside an
762 * abort chunk.
763 */
764 @@ -124,6 +124,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
765 chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
766 }
767
768 +/* A helper to initialize an op error inside a
769 + * provided chunk, as most cause codes will be embedded inside an
770 + * abort chunk. Differs from sctp_init_cause in that it won't oops
771 + * if there isn't enough space in the op error chunk
772 + */
773 +int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
774 + size_t paylen)
775 +{
776 + sctp_errhdr_t err;
777 + __u16 len;
778 +
779 + /* Cause code constants are now defined in network order. */
780 + err.cause = cause_code;
781 + len = sizeof(sctp_errhdr_t) + paylen;
782 + err.length = htons(len);
783 +
784 + if (skb_tailroom(chunk->skb) < len)
785 + return -ENOSPC;
786 + chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
787 + sizeof(sctp_errhdr_t),
788 + &err);
789 + return 0;
790 +}
791 /* 3.3.2 Initiation (INIT) (1)
792 *
793 * This chunk is used to initiate a SCTP association between two
794 @@ -1114,6 +1137,24 @@ nodata:
795 return retval;
796 }
797
798 +/* Create an Operation Error chunk of a fixed size,
799 + * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
800 + * This is a helper function to allocate an error chunk for
801 + * for those invalid parameter codes in which we may not want
802 + * to report all the errors, if the incomming chunk is large
803 + */
804 +static inline struct sctp_chunk *sctp_make_op_error_fixed(
805 + const struct sctp_association *asoc,
806 + const struct sctp_chunk *chunk)
807 +{
808 + size_t size = asoc ? asoc->pathmtu : 0;
809 +
810 + if (!size)
811 + size = SCTP_DEFAULT_MAXSEGMENT;
812 +
813 + return sctp_make_op_error_space(asoc, chunk, size);
814 +}
815 +
816 /* Create an Operation Error chunk. */
817 struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
818 const struct sctp_chunk *chunk,
819 @@ -1354,6 +1395,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
820 return target;
821 }
822
823 +/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
824 + * space in the chunk
825 + */
826 +void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
827 + int len, const void *data)
828 +{
829 + if (skb_tailroom(chunk->skb) >= len)
830 + return sctp_addto_chunk(chunk, len, data);
831 + else
832 + return NULL;
833 +}
834 +
835 /* Append bytes from user space to the end of a chunk. Will panic if
836 * chunk is not big enough.
837 * Returns a kernel err value.
838 @@ -1957,13 +2010,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
839 * returning multiple unknown parameters.
840 */
841 if (NULL == *errp)
842 - *errp = sctp_make_op_error_space(asoc, chunk,
843 - ntohs(chunk->chunk_hdr->length));
844 + *errp = sctp_make_op_error_fixed(asoc, chunk);
845
846 if (*errp) {
847 - sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
848 + sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
849 WORD_ROUND(ntohs(param.p->length)));
850 - sctp_addto_chunk(*errp,
851 + sctp_addto_chunk_fixed(*errp,
852 WORD_ROUND(ntohs(param.p->length)),
853 param.v);
854 } else {
855 diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
856 index a7a3677..a2a53e8 100644
857 --- a/net/tipc/bearer.c
858 +++ b/net/tipc/bearer.c
859 @@ -45,10 +45,10 @@
860
861 #define MAX_ADDR_STR 32
862
863 -static struct media *media_list = NULL;
864 +static struct media media_list[MAX_MEDIA];
865 static u32 media_count = 0;
866
867 -struct bearer *tipc_bearers = NULL;
868 +struct bearer tipc_bearers[MAX_BEARERS];
869
870 /**
871 * media_name_valid - validate media name
872 @@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
873 int res = -EINVAL;
874
875 write_lock_bh(&tipc_net_lock);
876 - if (!media_list)
877 - goto exit;
878
879 + if (tipc_mode != TIPC_NET_MODE) {
880 + warn("Media <%s> rejected, not in networked mode yet\n", name);
881 + goto exit;
882 + }
883 if (!media_name_valid(name)) {
884 warn("Media <%s> rejected, illegal name\n", name);
885 goto exit;
886 @@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
887
888
889
890 -int tipc_bearer_init(void)
891 -{
892 - int res;
893 -
894 - write_lock_bh(&tipc_net_lock);
895 - tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
896 - media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
897 - if (tipc_bearers && media_list) {
898 - res = 0;
899 - } else {
900 - kfree(tipc_bearers);
901 - kfree(media_list);
902 - tipc_bearers = NULL;
903 - media_list = NULL;
904 - res = -ENOMEM;
905 - }
906 - write_unlock_bh(&tipc_net_lock);
907 - return res;
908 -}
909 -
910 void tipc_bearer_stop(void)
911 {
912 u32 i;
913
914 - if (!tipc_bearers)
915 - return;
916 -
917 for (i = 0; i < MAX_BEARERS; i++) {
918 if (tipc_bearers[i].active)
919 tipc_bearers[i].publ.blocked = 1;
920 @@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
921 if (tipc_bearers[i].active)
922 bearer_disable(tipc_bearers[i].publ.name);
923 }
924 - kfree(tipc_bearers);
925 - kfree(media_list);
926 - tipc_bearers = NULL;
927 - media_list = NULL;
928 media_count = 0;
929 }
930
931 diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
932 index ca57348..000228e 100644
933 --- a/net/tipc/bearer.h
934 +++ b/net/tipc/bearer.h
935 @@ -114,7 +114,7 @@ struct bearer_name {
936
937 struct link;
938
939 -extern struct bearer *tipc_bearers;
940 +extern struct bearer tipc_bearers[];
941
942 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
943 struct sk_buff *tipc_media_get_names(void);
944 diff --git a/net/tipc/net.c b/net/tipc/net.c
945 index 7906608..f25b1cd 100644
946 --- a/net/tipc/net.c
947 +++ b/net/tipc/net.c
948 @@ -116,7 +116,8 @@
949 */
950
951 DEFINE_RWLOCK(tipc_net_lock);
952 -struct network tipc_net = { NULL };
953 +struct _zone *tipc_zones[256] = { NULL, };
954 +struct network tipc_net = { tipc_zones };
955
956 struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
957 {
958 @@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
959 }
960 }
961
962 -static int net_init(void)
963 -{
964 - memset(&tipc_net, 0, sizeof(tipc_net));
965 - tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
966 - if (!tipc_net.zones) {
967 - return -ENOMEM;
968 - }
969 - return 0;
970 -}
971 -
972 static void net_stop(void)
973 {
974 u32 z_num;
975
976 - if (!tipc_net.zones)
977 - return;
978 -
979 - for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
980 + for (z_num = 1; z_num <= tipc_max_zones; z_num++)
981 tipc_zone_delete(tipc_net.zones[z_num]);
982 - }
983 - kfree(tipc_net.zones);
984 - tipc_net.zones = NULL;
985 }
986
987 static void net_route_named_msg(struct sk_buff *buf)
988 @@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
989 tipc_named_reinit();
990 tipc_port_reinit();
991
992 - if ((res = tipc_bearer_init()) ||
993 - (res = net_init()) ||
994 - (res = tipc_cltr_init()) ||
995 + if ((res = tipc_cltr_init()) ||
996 (res = tipc_bclink_init())) {
997 return res;
998 }
999 diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
1000 index fa1a7d5..61acdf1 100644
1001 --- a/scripts/kconfig/Makefile
1002 +++ b/scripts/kconfig/Makefile
1003 @@ -161,7 +161,7 @@ HOSTCFLAGS_zconf.tab.o := -I$(src)
1004 HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl
1005 HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK
1006
1007 -HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0`
1008 +HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` -ldl
1009 HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \
1010 -D LKC_DIRECT_LINK
1011
1012 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
1013 index a9ab8af..594660f 100644
1014 --- a/security/keys/keyring.c
1015 +++ b/security/keys/keyring.c
1016 @@ -523,9 +523,8 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
1017 struct key *keyring;
1018 int bucket;
1019
1020 - keyring = ERR_PTR(-EINVAL);
1021 if (!name)
1022 - goto error;
1023 + return ERR_PTR(-EINVAL);
1024
1025 bucket = keyring_hash(name);
1026
1027 @@ -549,17 +548,18 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
1028 KEY_SEARCH) < 0)
1029 continue;
1030
1031 - /* we've got a match */
1032 - atomic_inc(&keyring->usage);
1033 - read_unlock(&keyring_name_lock);
1034 - goto error;
1035 + /* we've got a match but we might end up racing with
1036 + * key_cleanup() if the keyring is currently 'dead'
1037 + * (ie. it has a zero usage count) */
1038 + if (!atomic_inc_not_zero(&keyring->usage))
1039 + continue;
1040 + goto out;
1041 }
1042 }
1043
1044 - read_unlock(&keyring_name_lock);
1045 keyring = ERR_PTR(-ENOKEY);
1046 -
1047 - error:
1048 +out:
1049 + read_unlock(&keyring_name_lock);
1050 return keyring;
1051
1052 } /* end find_keyring_by_name() */
1053 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
1054 index 45b240a..b04a2bd 100644
1055 --- a/security/keys/process_keys.c
1056 +++ b/security/keys/process_keys.c
1057 @@ -592,7 +592,7 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
1058
1059 ret = install_thread_keyring(context);
1060 if (ret < 0) {
1061 - key = ERR_PTR(ret);
1062 + key_ref = ERR_PTR(ret);
1063 goto error;
1064 }
1065 }
1066 @@ -609,7 +609,7 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
1067
1068 ret = install_process_keyring(context);
1069 if (ret < 0) {
1070 - key = ERR_PTR(ret);
1071 + key_ref = ERR_PTR(ret);
1072 goto error;
1073 }
1074 }
1075 @@ -665,7 +665,7 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
1076
1077 case KEY_SPEC_GROUP_KEYRING:
1078 /* group keyrings are not yet supported */
1079 - key = ERR_PTR(-EINVAL);
1080 + key_ref = ERR_PTR(-EINVAL);
1081 goto error;
1082
1083 case KEY_SPEC_REQKEY_AUTH_KEY: