Annotation of /trunk/kernel26-alx/patches-2.6.21-r13/0106-2.6.21.7-all-fixes.patch
Parent Directory | Revision Log
Revision 445 -
(hide annotations)
(download)
Tue Jan 15 00:44:37 2008 UTC (16 years, 8 months ago) by niro
File size: 34520 byte(s)
Tue Jan 15 00:44:37 2008 UTC (16 years, 8 months ago) by niro
File size: 34520 byte(s)
-added patches for 2.6.21-alx-r13
1 | niro | 445 | diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S |
2 | index 18bddcb..cb1f16c 100644 | ||
3 | --- a/arch/i386/kernel/entry.S | ||
4 | +++ b/arch/i386/kernel/entry.S | ||
5 | @@ -371,10 +371,6 @@ ENTRY(system_call) | ||
6 | CFI_ADJUST_CFA_OFFSET 4 | ||
7 | SAVE_ALL | ||
8 | GET_THREAD_INFO(%ebp) | ||
9 | - testl $TF_MASK,PT_EFLAGS(%esp) | ||
10 | - jz no_singlestep | ||
11 | - orl $_TIF_SINGLESTEP,TI_flags(%ebp) | ||
12 | -no_singlestep: | ||
13 | # system call tracing in operation / emulation | ||
14 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ | ||
15 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | ||
16 | @@ -389,6 +385,10 @@ syscall_exit: | ||
17 | # setting need_resched or sigpending | ||
18 | # between sampling and the iret | ||
19 | TRACE_IRQS_OFF | ||
20 | + testl $TF_MASK,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit | ||
21 | + jz no_singlestep | ||
22 | + orl $_TIF_SINGLESTEP,TI_flags(%ebp) | ||
23 | +no_singlestep: | ||
24 | movl TI_flags(%ebp), %ecx | ||
25 | testw $_TIF_ALLWORK_MASK, %cx # current->work | ||
26 | jne syscall_exit_work | ||
27 | diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c | ||
28 | index f72e8e8..a84304e 100644 | ||
29 | --- a/arch/powerpc/kernel/signal_64.c | ||
30 | +++ b/arch/powerpc/kernel/signal_64.c | ||
31 | @@ -177,6 +177,13 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, | ||
32 | */ | ||
33 | discard_lazy_cpu_state(); | ||
34 | |||
35 | + /* | ||
36 | + * Force reload of FP/VEC. | ||
37 | + * This has to be done before copying stuff into current->thread.fpr/vr | ||
38 | + * for the reasons explained in the previous comment. | ||
39 | + */ | ||
40 | + regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); | ||
41 | + | ||
42 | err |= __copy_from_user(¤t->thread.fpr, &sc->fp_regs, FP_REGS_SIZE); | ||
43 | |||
44 | #ifdef CONFIG_ALTIVEC | ||
45 | @@ -198,9 +205,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, | ||
46 | current->thread.vrsave = 0; | ||
47 | #endif /* CONFIG_ALTIVEC */ | ||
48 | |||
49 | - /* Force reload of FP/VEC */ | ||
50 | - regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC); | ||
51 | - | ||
52 | return err; | ||
53 | } | ||
54 | |||
55 | diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c | ||
56 | index 6fd126a..df35d6d 100644 | ||
57 | --- a/arch/x86_64/mm/init.c | ||
58 | +++ b/arch/x86_64/mm/init.c | ||
59 | @@ -72,6 +72,8 @@ void show_mem(void) | ||
60 | |||
61 | for_each_online_pgdat(pgdat) { | ||
62 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | ||
63 | + if (!pfn_valid(pgdat->node_start_pfn + i)) | ||
64 | + continue; | ||
65 | page = pfn_to_page(pgdat->node_start_pfn + i); | ||
66 | total++; | ||
67 | if (PageReserved(page)) | ||
68 | diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c | ||
69 | index cf9d344..14de1e8 100644 | ||
70 | --- a/drivers/ide/pci/hpt366.c | ||
71 | +++ b/drivers/ide/pci/hpt366.c | ||
72 | @@ -1,5 +1,5 @@ | ||
73 | /* | ||
74 | - * linux/drivers/ide/pci/hpt366.c Version 1.03 May 4, 2007 | ||
75 | + * linux/drivers/ide/pci/hpt366.c Version 1.04 Jun 4, 2007 | ||
76 | * | ||
77 | * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> | ||
78 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. | ||
79 | @@ -106,7 +106,8 @@ | ||
80 | * switch to calculating PCI clock frequency based on the chip's base DPLL | ||
81 | * frequency | ||
82 | * - switch to using the DPLL clock and enable UltraATA/133 mode by default on | ||
83 | - * anything newer than HPT370/A | ||
84 | + * anything newer than HPT370/A (except HPT374 that is not capable of this | ||
85 | + * mode according to the manual) | ||
86 | * - fold PCI clock detection and DPLL setup code into init_chipset_hpt366(), | ||
87 | * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips; | ||
88 | * unify HPT36x/37x timing setup code and the speedproc handlers by joining | ||
89 | @@ -365,7 +366,6 @@ static u32 sixty_six_base_hpt37x[] = { | ||
90 | }; | ||
91 | |||
92 | #define HPT366_DEBUG_DRIVE_INFO 0 | ||
93 | -#define HPT374_ALLOW_ATA133_6 1 | ||
94 | #define HPT371_ALLOW_ATA133_6 1 | ||
95 | #define HPT302_ALLOW_ATA133_6 1 | ||
96 | #define HPT372_ALLOW_ATA133_6 1 | ||
97 | @@ -450,7 +450,7 @@ static struct hpt_info hpt370a __devinitdata = { | ||
98 | |||
99 | static struct hpt_info hpt374 __devinitdata = { | ||
100 | .chip_type = HPT374, | ||
101 | - .max_mode = HPT374_ALLOW_ATA133_6 ? 4 : 3, | ||
102 | + .max_mode = 3, | ||
103 | .dpll_clk = 48, | ||
104 | .settings = hpt37x_settings | ||
105 | }; | ||
106 | diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c | ||
107 | index 4c2471e..b9ff4e3 100644 | ||
108 | --- a/drivers/md/dm-crypt.c | ||
109 | +++ b/drivers/md/dm-crypt.c | ||
110 | @@ -33,7 +33,6 @@ | ||
111 | struct crypt_io { | ||
112 | struct dm_target *target; | ||
113 | struct bio *base_bio; | ||
114 | - struct bio *first_clone; | ||
115 | struct work_struct work; | ||
116 | atomic_t pending; | ||
117 | int error; | ||
118 | @@ -107,6 +106,8 @@ struct crypt_config { | ||
119 | |||
120 | static struct kmem_cache *_crypt_io_pool; | ||
121 | |||
122 | +static void clone_init(struct crypt_io *, struct bio *); | ||
123 | + | ||
124 | /* | ||
125 | * Different IV generation algorithms: | ||
126 | * | ||
127 | @@ -378,25 +379,20 @@ static int crypt_convert(struct crypt_config *cc, | ||
128 | * This should never violate the device limitations | ||
129 | * May return a smaller bio when running out of pages | ||
130 | */ | ||
131 | -static struct bio * | ||
132 | -crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | ||
133 | - struct bio *base_bio, unsigned int *bio_vec_idx) | ||
134 | +static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size, | ||
135 | + unsigned int *bio_vec_idx) | ||
136 | { | ||
137 | + struct crypt_config *cc = io->target->private; | ||
138 | struct bio *clone; | ||
139 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
140 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; | ||
141 | unsigned int i; | ||
142 | |||
143 | - if (base_bio) { | ||
144 | - clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs); | ||
145 | - __bio_clone(clone, base_bio); | ||
146 | - } else | ||
147 | - clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); | ||
148 | - | ||
149 | + clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); | ||
150 | if (!clone) | ||
151 | return NULL; | ||
152 | |||
153 | - clone->bi_destructor = dm_crypt_bio_destructor; | ||
154 | + clone_init(io, clone); | ||
155 | |||
156 | /* if the last bio was not complete, continue where that one ended */ | ||
157 | clone->bi_idx = *bio_vec_idx; | ||
158 | @@ -495,9 +491,6 @@ static void dec_pending(struct crypt_io *io, int error) | ||
159 | if (!atomic_dec_and_test(&io->pending)) | ||
160 | return; | ||
161 | |||
162 | - if (io->first_clone) | ||
163 | - bio_put(io->first_clone); | ||
164 | - | ||
165 | bio_endio(io->base_bio, io->base_bio->bi_size, io->error); | ||
166 | |||
167 | mempool_free(io, cc->io_pool); | ||
168 | @@ -562,6 +555,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone) | ||
169 | clone->bi_end_io = crypt_endio; | ||
170 | clone->bi_bdev = cc->dev->bdev; | ||
171 | clone->bi_rw = io->base_bio->bi_rw; | ||
172 | + clone->bi_destructor = dm_crypt_bio_destructor; | ||
173 | } | ||
174 | |||
175 | static void process_read(struct crypt_io *io) | ||
176 | @@ -585,7 +579,6 @@ static void process_read(struct crypt_io *io) | ||
177 | } | ||
178 | |||
179 | clone_init(io, clone); | ||
180 | - clone->bi_destructor = dm_crypt_bio_destructor; | ||
181 | clone->bi_idx = 0; | ||
182 | clone->bi_vcnt = bio_segments(base_bio); | ||
183 | clone->bi_size = base_bio->bi_size; | ||
184 | @@ -615,8 +608,7 @@ static void process_write(struct crypt_io *io) | ||
185 | * so repeat the whole process until all the data can be handled. | ||
186 | */ | ||
187 | while (remaining) { | ||
188 | - clone = crypt_alloc_buffer(cc, base_bio->bi_size, | ||
189 | - io->first_clone, &bvec_idx); | ||
190 | + clone = crypt_alloc_buffer(io, base_bio->bi_size, &bvec_idx); | ||
191 | if (unlikely(!clone)) { | ||
192 | dec_pending(io, -ENOMEM); | ||
193 | return; | ||
194 | @@ -631,31 +623,23 @@ static void process_write(struct crypt_io *io) | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | - clone_init(io, clone); | ||
199 | clone->bi_sector = cc->start + sector; | ||
200 | - | ||
201 | - if (!io->first_clone) { | ||
202 | - /* | ||
203 | - * hold a reference to the first clone, because it | ||
204 | - * holds the bio_vec array and that can't be freed | ||
205 | - * before all other clones are released | ||
206 | - */ | ||
207 | - bio_get(clone); | ||
208 | - io->first_clone = clone; | ||
209 | - } | ||
210 | - | ||
211 | remaining -= clone->bi_size; | ||
212 | sector += bio_sectors(clone); | ||
213 | |||
214 | - /* prevent bio_put of first_clone */ | ||
215 | + /* Grab another reference to the io struct | ||
216 | + * before we kick off the request */ | ||
217 | if (remaining) | ||
218 | atomic_inc(&io->pending); | ||
219 | |||
220 | generic_make_request(clone); | ||
221 | |||
222 | + /* Do not reference clone after this - it | ||
223 | + * may be gone already. */ | ||
224 | + | ||
225 | /* out of memory -> run queues */ | ||
226 | if (remaining) | ||
227 | - congestion_wait(bio_data_dir(clone), HZ/100); | ||
228 | + congestion_wait(WRITE, HZ/100); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | @@ -954,10 +938,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, | ||
233 | struct crypt_config *cc = ti->private; | ||
234 | struct crypt_io *io; | ||
235 | |||
236 | + if (bio_barrier(bio)) | ||
237 | + return -EOPNOTSUPP; | ||
238 | + | ||
239 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | ||
240 | io->target = ti; | ||
241 | io->base_bio = bio; | ||
242 | - io->first_clone = NULL; | ||
243 | io->error = io->post_process = 0; | ||
244 | atomic_set(&io->pending, 0); | ||
245 | kcryptd_queue_io(io); | ||
246 | diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c | ||
247 | index 3a95cc5..46677d7 100644 | ||
248 | --- a/drivers/md/raid1.c | ||
249 | +++ b/drivers/md/raid1.c | ||
250 | @@ -1240,17 +1240,24 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) | ||
251 | } | ||
252 | r1_bio->read_disk = primary; | ||
253 | for (i=0; i<mddev->raid_disks; i++) | ||
254 | - if (r1_bio->bios[i]->bi_end_io == end_sync_read && | ||
255 | - test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) { | ||
256 | + if (r1_bio->bios[i]->bi_end_io == end_sync_read) { | ||
257 | int j; | ||
258 | int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); | ||
259 | struct bio *pbio = r1_bio->bios[primary]; | ||
260 | struct bio *sbio = r1_bio->bios[i]; | ||
261 | - for (j = vcnt; j-- ; ) | ||
262 | - if (memcmp(page_address(pbio->bi_io_vec[j].bv_page), | ||
263 | - page_address(sbio->bi_io_vec[j].bv_page), | ||
264 | - PAGE_SIZE)) | ||
265 | - break; | ||
266 | + | ||
267 | + if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { | ||
268 | + for (j = vcnt; j-- ; ) { | ||
269 | + struct page *p, *s; | ||
270 | + p = pbio->bi_io_vec[j].bv_page; | ||
271 | + s = sbio->bi_io_vec[j].bv_page; | ||
272 | + if (memcmp(page_address(p), | ||
273 | + page_address(s), | ||
274 | + PAGE_SIZE)) | ||
275 | + break; | ||
276 | + } | ||
277 | + } else | ||
278 | + j = 0; | ||
279 | if (j >= 0) | ||
280 | mddev->resync_mismatches += r1_bio->sectors; | ||
281 | if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { | ||
282 | diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c | ||
283 | index 82249a6..9eb66c1 100644 | ||
284 | --- a/drivers/md/raid10.c | ||
285 | +++ b/drivers/md/raid10.c | ||
286 | @@ -1867,6 +1867,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | ||
287 | int d = r10_bio->devs[i].devnum; | ||
288 | bio = r10_bio->devs[i].bio; | ||
289 | bio->bi_end_io = NULL; | ||
290 | + clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
291 | if (conf->mirrors[d].rdev == NULL || | ||
292 | test_bit(Faulty, &conf->mirrors[d].rdev->flags)) | ||
293 | continue; | ||
294 | @@ -2037,6 +2038,11 @@ static int run(mddev_t *mddev) | ||
295 | /* 'size' is now the number of chunks in the array */ | ||
296 | /* calculate "used chunks per device" in 'stride' */ | ||
297 | stride = size * conf->copies; | ||
298 | + | ||
299 | + /* We need to round up when dividing by raid_disks to | ||
300 | + * get the stride size. | ||
301 | + */ | ||
302 | + stride += conf->raid_disks - 1; | ||
303 | sector_div(stride, conf->raid_disks); | ||
304 | mddev->size = stride << (conf->chunk_shift-1); | ||
305 | |||
306 | diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c | ||
307 | index 5720b77..d4bef35 100644 | ||
308 | --- a/drivers/media/video/bt8xx/bttv-driver.c | ||
309 | +++ b/drivers/media/video/bt8xx/bttv-driver.c | ||
310 | @@ -1313,7 +1313,7 @@ set_tvnorm(struct bttv *btv, unsigned int norm) | ||
311 | |||
312 | /* Call with btv->lock down. */ | ||
313 | static void | ||
314 | -set_input(struct bttv *btv, unsigned int input) | ||
315 | +set_input(struct bttv *btv, unsigned int input, unsigned int norm) | ||
316 | { | ||
317 | unsigned long flags; | ||
318 | |||
319 | @@ -1332,7 +1332,7 @@ set_input(struct bttv *btv, unsigned int input) | ||
320 | } | ||
321 | audio_input(btv,(input == bttv_tvcards[btv->c.type].tuner ? | ||
322 | TVAUDIO_INPUT_TUNER : TVAUDIO_INPUT_EXTERN)); | ||
323 | - set_tvnorm(btv,btv->tvnorm); | ||
324 | + set_tvnorm(btv, norm); | ||
325 | i2c_vidiocschan(btv); | ||
326 | } | ||
327 | |||
328 | @@ -1423,7 +1423,7 @@ static void bttv_reinit_bt848(struct bttv *btv) | ||
329 | |||
330 | init_bt848(btv); | ||
331 | btv->pll.pll_current = -1; | ||
332 | - set_input(btv,btv->input); | ||
333 | + set_input(btv, btv->input, btv->tvnorm); | ||
334 | } | ||
335 | |||
336 | static int get_control(struct bttv *btv, struct v4l2_control *c) | ||
337 | @@ -1993,8 +1993,7 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg) | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | - btv->tvnorm = v->norm; | ||
342 | - set_input(btv,v->channel); | ||
343 | + set_input(btv, v->channel, v->norm); | ||
344 | mutex_unlock(&btv->lock); | ||
345 | return 0; | ||
346 | } | ||
347 | @@ -2130,7 +2129,7 @@ static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg) | ||
348 | if (*i > bttv_tvcards[btv->c.type].video_inputs) | ||
349 | return -EINVAL; | ||
350 | mutex_lock(&btv->lock); | ||
351 | - set_input(btv,*i); | ||
352 | + set_input(btv, *i, btv->tvnorm); | ||
353 | mutex_unlock(&btv->lock); | ||
354 | return 0; | ||
355 | } | ||
356 | @@ -4762,7 +4761,7 @@ static int __devinit bttv_probe(struct pci_dev *dev, | ||
357 | bt848_hue(btv,32768); | ||
358 | bt848_sat(btv,32768); | ||
359 | audio_mute(btv, 1); | ||
360 | - set_input(btv,0); | ||
361 | + set_input(btv, 0, btv->tvnorm); | ||
362 | bttv_crop_reset(&btv->crop[0], btv->tvnorm); | ||
363 | btv->crop[1] = btv->crop[0]; /* current = default */ | ||
364 | disclaim_vbi_lines(btv); | ||
365 | diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c | ||
366 | index b0466b8..a80b1cb 100644 | ||
367 | --- a/drivers/media/video/cx88/cx88-blackbird.c | ||
368 | +++ b/drivers/media/video/cx88/cx88-blackbird.c | ||
369 | @@ -1034,6 +1034,8 @@ static int vidioc_g_tuner (struct file *file, void *priv, | ||
370 | |||
371 | if (unlikely(UNSET == core->tuner_type)) | ||
372 | return -EINVAL; | ||
373 | + if (0 != t->index) | ||
374 | + return -EINVAL; | ||
375 | |||
376 | strcpy(t->name, "Television"); | ||
377 | t->type = V4L2_TUNER_ANALOG_TV; | ||
378 | diff --git a/drivers/media/video/saa7134/saa7134-tvaudio.c b/drivers/media/video/saa7134/saa7134-tvaudio.c | ||
379 | index dd759d6..36b3fa3 100644 | ||
380 | --- a/drivers/media/video/saa7134/saa7134-tvaudio.c | ||
381 | +++ b/drivers/media/video/saa7134/saa7134-tvaudio.c | ||
382 | @@ -1006,7 +1006,7 @@ int saa7134_tvaudio_init2(struct saa7134_dev *dev) | ||
383 | int saa7134_tvaudio_fini(struct saa7134_dev *dev) | ||
384 | { | ||
385 | /* shutdown tvaudio thread */ | ||
386 | - if (dev->thread.pid >= 0) { | ||
387 | + if (dev->thread.pid > 0) { | ||
388 | dev->thread.shutdown = 1; | ||
389 | wake_up_interruptible(&dev->thread.wq); | ||
390 | wait_for_completion(&dev->thread.exit); | ||
391 | diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c | ||
392 | index 5006c67..1137291 100644 | ||
393 | --- a/drivers/net/bnx2.c | ||
394 | +++ b/drivers/net/bnx2.c | ||
395 | @@ -54,8 +54,8 @@ | ||
396 | |||
397 | #define DRV_MODULE_NAME "bnx2" | ||
398 | #define PFX DRV_MODULE_NAME ": " | ||
399 | -#define DRV_MODULE_VERSION "1.5.8.1" | ||
400 | -#define DRV_MODULE_RELDATE "May 7, 2007" | ||
401 | +#define DRV_MODULE_VERSION "1.5.8.2" | ||
402 | +#define DRV_MODULE_RELDATE "June 5, 2007" | ||
403 | |||
404 | #define RUN_AT(x) (jiffies + (x)) | ||
405 | |||
406 | @@ -1550,6 +1550,7 @@ bnx2_init_context(struct bnx2 *bp) | ||
407 | vcid = 96; | ||
408 | while (vcid) { | ||
409 | u32 vcid_addr, pcid_addr, offset; | ||
410 | + int i; | ||
411 | |||
412 | vcid--; | ||
413 | |||
414 | @@ -1570,16 +1571,20 @@ bnx2_init_context(struct bnx2 *bp) | ||
415 | pcid_addr = vcid_addr; | ||
416 | } | ||
417 | |||
418 | - REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00); | ||
419 | - REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); | ||
420 | + for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { | ||
421 | + vcid_addr += (i << PHY_CTX_SHIFT); | ||
422 | + pcid_addr += (i << PHY_CTX_SHIFT); | ||
423 | |||
424 | - /* Zero out the context. */ | ||
425 | - for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) { | ||
426 | - CTX_WR(bp, 0x00, offset, 0); | ||
427 | - } | ||
428 | + REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00); | ||
429 | + REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); | ||
430 | |||
431 | - REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); | ||
432 | - REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); | ||
433 | + /* Zero out the context. */ | ||
434 | + for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) | ||
435 | + CTX_WR(bp, 0x00, offset, 0); | ||
436 | + | ||
437 | + REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); | ||
438 | + REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); | ||
439 | + } | ||
440 | } | ||
441 | } | ||
442 | |||
443 | diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c | ||
444 | index b6b444b..e525a5b 100644 | ||
445 | --- a/drivers/net/sky2.c | ||
446 | +++ b/drivers/net/sky2.c | ||
447 | @@ -95,7 +95,7 @@ static int disable_msi = 0; | ||
448 | module_param(disable_msi, int, 0); | ||
449 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); | ||
450 | |||
451 | -static int idle_timeout = 0; | ||
452 | +static int idle_timeout = 100; | ||
453 | module_param(idle_timeout, int, 0); | ||
454 | MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)"); | ||
455 | |||
456 | @@ -2433,6 +2433,13 @@ static int sky2_poll(struct net_device *dev0, int *budget) | ||
457 | |||
458 | work_done = sky2_status_intr(hw, work_limit); | ||
459 | if (work_done < work_limit) { | ||
460 | + /* Bug/Errata workaround? | ||
461 | + * Need to kick the TX irq moderation timer. | ||
462 | + */ | ||
463 | + if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { | ||
464 | + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); | ||
465 | + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
466 | + } | ||
467 | netif_rx_complete(dev0); | ||
468 | |||
469 | sky2_read32(hw, B0_Y2_SP_LISR); | ||
470 | diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c | ||
471 | index 3d2fcc5..64ed5ef 100644 | ||
472 | --- a/drivers/serial/mpsc.c | ||
473 | +++ b/drivers/serial/mpsc.c | ||
474 | @@ -502,7 +502,8 @@ mpsc_sdma_intr_ack(struct mpsc_port_info *pi) | ||
475 | |||
476 | if (pi->mirror_regs) | ||
477 | pi->shared_regs->SDMA_INTR_CAUSE_m = 0; | ||
478 | - writel(0, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE); | ||
479 | + writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE + | ||
480 | + pi->port.line); | ||
481 | return; | ||
482 | } | ||
483 | |||
484 | diff --git a/include/linux/sched.h b/include/linux/sched.h | ||
485 | index 49fe299..8cf1d7f 100644 | ||
486 | --- a/include/linux/sched.h | ||
487 | +++ b/include/linux/sched.h | ||
488 | @@ -1138,6 +1138,7 @@ static inline void put_task_struct(struct task_struct *t) | ||
489 | /* Not implemented yet, only for 486*/ | ||
490 | #define PF_STARTING 0x00000002 /* being created */ | ||
491 | #define PF_EXITING 0x00000004 /* getting shut down */ | ||
492 | +#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | ||
493 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ | ||
494 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ | ||
495 | #define PF_DUMPCORE 0x00000200 /* dumped core */ | ||
496 | diff --git a/ipc/shm.c b/ipc/shm.c | ||
497 | index 4fefbad..8d2672d 100644 | ||
498 | --- a/ipc/shm.c | ||
499 | +++ b/ipc/shm.c | ||
500 | @@ -254,8 +254,10 @@ struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr) | ||
501 | |||
502 | if (sfd->vm_ops->get_policy) | ||
503 | pol = sfd->vm_ops->get_policy(vma, addr); | ||
504 | - else | ||
505 | + else if (vma->vm_policy) | ||
506 | pol = vma->vm_policy; | ||
507 | + else | ||
508 | + pol = current->mempolicy; | ||
509 | return pol; | ||
510 | } | ||
511 | #endif | ||
512 | diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c | ||
513 | index 3749193..2b8311b 100644 | ||
514 | --- a/kernel/auditfilter.c | ||
515 | +++ b/kernel/auditfilter.c | ||
516 | @@ -905,7 +905,7 @@ static void audit_update_watch(struct audit_parent *parent, | ||
517 | |||
518 | /* If the update involves invalidating rules, do the inode-based | ||
519 | * filtering now, so we don't omit records. */ | ||
520 | - if (invalidating && | ||
521 | + if (invalidating && current->audit_context && | ||
522 | audit_filter_inodes(current, current->audit_context) == AUDIT_RECORD_CONTEXT) | ||
523 | audit_set_auditable(current->audit_context); | ||
524 | |||
525 | diff --git a/kernel/exit.c b/kernel/exit.c | ||
526 | index b55ed4c..7debf34 100644 | ||
527 | --- a/kernel/exit.c | ||
528 | +++ b/kernel/exit.c | ||
529 | @@ -884,13 +884,29 @@ fastcall NORET_TYPE void do_exit(long code) | ||
530 | if (unlikely(tsk->flags & PF_EXITING)) { | ||
531 | printk(KERN_ALERT | ||
532 | "Fixing recursive fault but reboot is needed!\n"); | ||
533 | + /* | ||
534 | + * We can do this unlocked here. The futex code uses | ||
535 | + * this flag just to verify whether the pi state | ||
536 | + * cleanup has been done or not. In the worst case it | ||
537 | + * loops once more. We pretend that the cleanup was | ||
538 | + * done as there is no way to return. Either the | ||
539 | + * OWNER_DIED bit is set by now or we push the blocked | ||
540 | + * task into the wait for ever nirwana as well. | ||
541 | + */ | ||
542 | + tsk->flags |= PF_EXITPIDONE; | ||
543 | if (tsk->io_context) | ||
544 | exit_io_context(); | ||
545 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
546 | schedule(); | ||
547 | } | ||
548 | |||
549 | + /* | ||
550 | + * tsk->flags are checked in the futex code to protect against | ||
551 | + * an exiting task cleaning up the robust pi futexes. | ||
552 | + */ | ||
553 | + spin_lock_irq(&tsk->pi_lock); | ||
554 | tsk->flags |= PF_EXITING; | ||
555 | + spin_unlock_irq(&tsk->pi_lock); | ||
556 | |||
557 | if (unlikely(in_atomic())) | ||
558 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | ||
559 | @@ -957,6 +973,12 @@ fastcall NORET_TYPE void do_exit(long code) | ||
560 | * Make sure we are holding no locks: | ||
561 | */ | ||
562 | debug_check_no_locks_held(tsk); | ||
563 | + /* | ||
564 | + * We can do this unlocked here. The futex code uses this flag | ||
565 | + * just to verify whether the pi state cleanup has been done | ||
566 | + * or not. In the worst case it loops once more. | ||
567 | + */ | ||
568 | + tsk->flags |= PF_EXITPIDONE; | ||
569 | |||
570 | if (tsk->io_context) | ||
571 | exit_io_context(); | ||
572 | diff --git a/kernel/futex.c b/kernel/futex.c | ||
573 | index 5a270b5..4809436 100644 | ||
574 | --- a/kernel/futex.c | ||
575 | +++ b/kernel/futex.c | ||
576 | @@ -390,18 +390,12 @@ static struct task_struct * futex_find_get_task(pid_t pid) | ||
577 | |||
578 | rcu_read_lock(); | ||
579 | p = find_task_by_pid(pid); | ||
580 | - if (!p) | ||
581 | - goto out_unlock; | ||
582 | - if ((current->euid != p->euid) && (current->euid != p->uid)) { | ||
583 | - p = NULL; | ||
584 | - goto out_unlock; | ||
585 | - } | ||
586 | - if (p->exit_state != 0) { | ||
587 | - p = NULL; | ||
588 | - goto out_unlock; | ||
589 | - } | ||
590 | - get_task_struct(p); | ||
591 | -out_unlock: | ||
592 | + | ||
593 | + if (!p || ((current->euid != p->euid) && (current->euid != p->uid))) | ||
594 | + p = ERR_PTR(-ESRCH); | ||
595 | + else | ||
596 | + get_task_struct(p); | ||
597 | + | ||
598 | rcu_read_unlock(); | ||
599 | |||
600 | return p; | ||
601 | @@ -467,7 +461,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | ||
602 | struct futex_q *this, *next; | ||
603 | struct list_head *head; | ||
604 | struct task_struct *p; | ||
605 | - pid_t pid; | ||
606 | + pid_t pid = uval & FUTEX_TID_MASK; | ||
607 | |||
608 | head = &hb->chain; | ||
609 | |||
610 | @@ -485,6 +479,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | ||
611 | return -EINVAL; | ||
612 | |||
613 | WARN_ON(!atomic_read(&pi_state->refcount)); | ||
614 | + WARN_ON(pid && pi_state->owner && | ||
615 | + pi_state->owner->pid != pid); | ||
616 | |||
617 | atomic_inc(&pi_state->refcount); | ||
618 | me->pi_state = pi_state; | ||
619 | @@ -495,15 +491,33 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | ||
620 | |||
621 | /* | ||
622 | * We are the first waiter - try to look up the real owner and attach | ||
623 | - * the new pi_state to it, but bail out when the owner died bit is set | ||
624 | - * and TID = 0: | ||
625 | + * the new pi_state to it, but bail out when TID = 0 | ||
626 | */ | ||
627 | - pid = uval & FUTEX_TID_MASK; | ||
628 | - if (!pid && (uval & FUTEX_OWNER_DIED)) | ||
629 | + if (!pid) | ||
630 | return -ESRCH; | ||
631 | p = futex_find_get_task(pid); | ||
632 | - if (!p) | ||
633 | - return -ESRCH; | ||
634 | + if (IS_ERR(p)) | ||
635 | + return PTR_ERR(p); | ||
636 | + | ||
637 | + /* | ||
638 | + * We need to look at the task state flags to figure out, | ||
639 | + * whether the task is exiting. To protect against the do_exit | ||
640 | + * change of the task flags, we do this protected by | ||
641 | + * p->pi_lock: | ||
642 | + */ | ||
643 | + spin_lock_irq(&p->pi_lock); | ||
644 | + if (unlikely(p->flags & PF_EXITING)) { | ||
645 | + /* | ||
646 | + * The task is on the way out. When PF_EXITPIDONE is | ||
647 | + * set, we know that the task has finished the | ||
648 | + * cleanup: | ||
649 | + */ | ||
650 | + int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; | ||
651 | + | ||
652 | + spin_unlock_irq(&p->pi_lock); | ||
653 | + put_task_struct(p); | ||
654 | + return ret; | ||
655 | + } | ||
656 | |||
657 | pi_state = alloc_pi_state(); | ||
658 | |||
659 | @@ -516,7 +530,6 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me) | ||
660 | /* Store the key for possible exit cleanups: */ | ||
661 | pi_state->key = me->key; | ||
662 | |||
663 | - spin_lock_irq(&p->pi_lock); | ||
664 | WARN_ON(!list_empty(&pi_state->list)); | ||
665 | list_add(&pi_state->list, &p->pi_state_list); | ||
666 | pi_state->owner = p; | ||
667 | @@ -583,15 +596,22 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | ||
668 | * preserve the owner died bit.) | ||
669 | */ | ||
670 | if (!(uval & FUTEX_OWNER_DIED)) { | ||
671 | + int ret = 0; | ||
672 | + | ||
673 | newval = FUTEX_WAITERS | new_owner->pid; | ||
674 | |||
675 | pagefault_disable(); | ||
676 | curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); | ||
677 | pagefault_enable(); | ||
678 | + | ||
679 | if (curval == -EFAULT) | ||
680 | - return -EFAULT; | ||
681 | + ret = -EFAULT; | ||
682 | if (curval != uval) | ||
683 | - return -EINVAL; | ||
684 | + ret = -EINVAL; | ||
685 | + if (ret) { | ||
686 | + spin_unlock(&pi_state->pi_mutex.wait_lock); | ||
687 | + return ret; | ||
688 | + } | ||
689 | } | ||
690 | |||
691 | spin_lock_irq(&pi_state->owner->pi_lock); | ||
692 | @@ -1149,6 +1169,7 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, | ||
693 | if (unlikely(ret != 0)) | ||
694 | goto out_release_sem; | ||
695 | |||
696 | + retry_unlocked: | ||
697 | hb = queue_lock(&q, -1, NULL); | ||
698 | |||
699 | retry_locked: | ||
700 | @@ -1200,34 +1221,58 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, | ||
701 | ret = lookup_pi_state(uval, hb, &q); | ||
702 | |||
703 | if (unlikely(ret)) { | ||
704 | - /* | ||
705 | - * There were no waiters and the owner task lookup | ||
706 | - * failed. When the OWNER_DIED bit is set, then we | ||
707 | - * know that this is a robust futex and we actually | ||
708 | - * take the lock. This is safe as we are protected by | ||
709 | - * the hash bucket lock. We also set the waiters bit | ||
710 | - * unconditionally here, to simplify glibc handling of | ||
711 | - * multiple tasks racing to acquire the lock and | ||
712 | - * cleanup the problems which were left by the dead | ||
713 | - * owner. | ||
714 | - */ | ||
715 | - if (curval & FUTEX_OWNER_DIED) { | ||
716 | - uval = newval; | ||
717 | - newval = current->pid | | ||
718 | - FUTEX_OWNER_DIED | FUTEX_WAITERS; | ||
719 | + switch (ret) { | ||
720 | |||
721 | - pagefault_disable(); | ||
722 | - curval = futex_atomic_cmpxchg_inatomic(uaddr, | ||
723 | - uval, newval); | ||
724 | - pagefault_enable(); | ||
725 | + case -EAGAIN: | ||
726 | + /* | ||
727 | + * Task is exiting and we just wait for the | ||
728 | + * exit to complete. | ||
729 | + */ | ||
730 | + queue_unlock(&q, hb); | ||
731 | + up_read(&curr->mm->mmap_sem); | ||
732 | + cond_resched(); | ||
733 | + goto retry; | ||
734 | |||
735 | - if (unlikely(curval == -EFAULT)) | ||
736 | + case -ESRCH: | ||
737 | + /* | ||
738 | + * No owner found for this futex. Check if the | ||
739 | + * OWNER_DIED bit is set to figure out whether | ||
740 | + * this is a robust futex or not. | ||
741 | + */ | ||
742 | + if (get_futex_value_locked(&curval, uaddr)) | ||
743 | goto uaddr_faulted; | ||
744 | - if (unlikely(curval != uval)) | ||
745 | - goto retry_locked; | ||
746 | - ret = 0; | ||
747 | + | ||
748 | + /* | ||
749 | + * There were no waiters and the owner task lookup | ||
750 | + * failed. When the OWNER_DIED bit is set, then we | ||
751 | + * know that this is a robust futex and we actually | ||
752 | + * take the lock. This is safe as we are protected by | ||
753 | + * the hash bucket lock. We also set the waiters bit | ||
754 | + * unconditionally here, to simplify glibc handling of | ||
755 | + * multiple tasks racing to acquire the lock and | ||
756 | + * cleanup the problems which were left by the dead | ||
757 | + * owner. | ||
758 | + */ | ||
759 | + if (curval & FUTEX_OWNER_DIED) { | ||
760 | + uval = newval; | ||
761 | + newval = current->pid | | ||
762 | + FUTEX_OWNER_DIED | FUTEX_WAITERS; | ||
763 | + | ||
764 | + pagefault_disable(); | ||
765 | + curval = futex_atomic_cmpxchg_inatomic(uaddr, | ||
766 | + uval, | ||
767 | + newval); | ||
768 | + pagefault_enable(); | ||
769 | + | ||
770 | + if (unlikely(curval == -EFAULT)) | ||
771 | + goto uaddr_faulted; | ||
772 | + if (unlikely(curval != uval)) | ||
773 | + goto retry_locked; | ||
774 | + ret = 0; | ||
775 | + } | ||
776 | + default: | ||
777 | + goto out_unlock_release_sem; | ||
778 | } | ||
779 | - goto out_unlock_release_sem; | ||
780 | } | ||
781 | |||
782 | /* | ||
783 | @@ -1279,39 +1324,52 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, | ||
784 | list_add(&q.pi_state->list, ¤t->pi_state_list); | ||
785 | spin_unlock_irq(¤t->pi_lock); | ||
786 | |||
787 | - /* Unqueue and drop the lock */ | ||
788 | - unqueue_me_pi(&q, hb); | ||
789 | - up_read(&curr->mm->mmap_sem); | ||
790 | /* | ||
791 | * We own it, so we have to replace the pending owner | ||
792 | - * TID. This must be atomic as we have preserve the | ||
793 | + * TID. This must be atomic as we have to preserve the | ||
794 | * owner died bit here. | ||
795 | */ | ||
796 | - ret = get_user(uval, uaddr); | ||
797 | + ret = get_futex_value_locked(&uval, uaddr); | ||
798 | while (!ret) { | ||
799 | newval = (uval & FUTEX_OWNER_DIED) | newtid; | ||
800 | + | ||
801 | + pagefault_disable(); | ||
802 | curval = futex_atomic_cmpxchg_inatomic(uaddr, | ||
803 | uval, newval); | ||
804 | + pagefault_enable(); | ||
805 | + | ||
806 | if (curval == -EFAULT) | ||
807 | ret = -EFAULT; | ||
808 | if (curval == uval) | ||
809 | break; | ||
810 | uval = curval; | ||
811 | } | ||
812 | - } else { | ||
813 | + } else if (ret) { | ||
814 | /* | ||
815 | * Catch the rare case, where the lock was released | ||
816 | * when we were on the way back before we locked | ||
817 | * the hash bucket. | ||
818 | */ | ||
819 | - if (ret && q.pi_state->owner == curr) { | ||
820 | - if (rt_mutex_trylock(&q.pi_state->pi_mutex)) | ||
821 | - ret = 0; | ||
822 | + if (q.pi_state->owner == curr && | ||
823 | + rt_mutex_trylock(&q.pi_state->pi_mutex)) { | ||
824 | + ret = 0; | ||
825 | + } else { | ||
826 | + /* | ||
827 | + * Paranoia check. If we did not take the lock | ||
828 | + * in the trylock above, then we should not be | ||
829 | + * the owner of the rtmutex, neither the real | ||
830 | + * nor the pending one: | ||
831 | + */ | ||
832 | + if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr) | ||
833 | + printk(KERN_ERR "futex_lock_pi: ret = %d " | ||
834 | + "pi-mutex: %p pi-state %p\n", ret, | ||
835 | + q.pi_state->pi_mutex.owner, | ||
836 | + q.pi_state->owner); | ||
837 | } | ||
838 | - /* Unqueue and drop the lock */ | ||
839 | - unqueue_me_pi(&q, hb); | ||
840 | - up_read(&curr->mm->mmap_sem); | ||
841 | } | ||
842 | + /* Unqueue and drop the lock */ | ||
843 | + unqueue_me_pi(&q, hb); | ||
844 | + up_read(&curr->mm->mmap_sem); | ||
845 | |||
846 | if (!detect && ret == -EDEADLK && 0) | ||
847 | force_sig(SIGKILL, current); | ||
848 | @@ -1331,16 +1389,18 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec, | ||
849 | * non-atomically. Therefore, if get_user below is not | ||
850 | * enough, we need to handle the fault ourselves, while | ||
851 | * still holding the mmap_sem. | ||
852 | + * | ||
853 | + * ... and hb->lock. :-) --ANK | ||
854 | */ | ||
855 | + queue_unlock(&q, hb); | ||
856 | + | ||
857 | if (attempt++) { | ||
858 | - if (futex_handle_fault((unsigned long)uaddr, attempt)) { | ||
859 | - ret = -EFAULT; | ||
860 | - goto out_unlock_release_sem; | ||
861 | - } | ||
862 | - goto retry_locked; | ||
863 | + ret = futex_handle_fault((unsigned long)uaddr, attempt); | ||
864 | + if (ret) | ||
865 | + goto out_release_sem; | ||
866 | + goto retry_unlocked; | ||
867 | } | ||
868 | |||
869 | - queue_unlock(&q, hb); | ||
870 | up_read(&curr->mm->mmap_sem); | ||
871 | |||
872 | ret = get_user(uval, uaddr); | ||
873 | @@ -1382,9 +1442,9 @@ retry: | ||
874 | goto out; | ||
875 | |||
876 | hb = hash_futex(&key); | ||
877 | +retry_unlocked: | ||
878 | spin_lock(&hb->lock); | ||
879 | |||
880 | -retry_locked: | ||
881 | /* | ||
882 | * To avoid races, try to do the TID -> 0 atomic transition | ||
883 | * again. If it succeeds then we can return without waking | ||
884 | @@ -1446,16 +1506,17 @@ pi_faulted: | ||
885 | * non-atomically. Therefore, if get_user below is not | ||
886 | * enough, we need to handle the fault ourselves, while | ||
887 | * still holding the mmap_sem. | ||
888 | + * | ||
889 | + * ... and hb->lock. :-) --ANK | ||
890 | */ | ||
891 | + spin_unlock(&hb->lock); | ||
892 | + | ||
893 | if (attempt++) { | ||
894 | - if (futex_handle_fault((unsigned long)uaddr, attempt)) { | ||
895 | - ret = -EFAULT; | ||
896 | - goto out_unlock; | ||
897 | - } | ||
898 | - goto retry_locked; | ||
899 | + ret = futex_handle_fault((unsigned long)uaddr, attempt); | ||
900 | + if (ret) | ||
901 | + goto out; | ||
902 | + goto retry_unlocked; | ||
903 | } | ||
904 | - | ||
905 | - spin_unlock(&hb->lock); | ||
906 | up_read(¤t->mm->mmap_sem); | ||
907 | |||
908 | ret = get_user(uval, uaddr); | ||
909 | diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c | ||
910 | index 44318ca..9577ac8 100644 | ||
911 | --- a/kernel/posix-timers.c | ||
912 | +++ b/kernel/posix-timers.c | ||
913 | @@ -354,9 +354,40 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) | ||
914 | * it should be restarted. | ||
915 | */ | ||
916 | if (timr->it.real.interval.tv64 != 0) { | ||
917 | + ktime_t now = hrtimer_cb_get_time(timer); | ||
918 | + | ||
919 | + /* | ||
920 | + * FIXME: What we really want, is to stop this | ||
921 | + * timer completely and restart it in case the | ||
922 | + * SIG_IGN is removed. This is a non trivial | ||
923 | + * change which involves sighand locking | ||
924 | + * (sigh !), which we don't want to do late in | ||
925 | + * the release cycle. | ||
926 | + * | ||
927 | + * For now we just let timers with an interval | ||
928 | + * less than a jiffie expire every jiffie to | ||
929 | + * avoid softirq starvation in case of SIG_IGN | ||
930 | + * and a very small interval, which would put | ||
931 | + * the timer right back on the softirq pending | ||
932 | + * list. By moving now ahead of time we trick | ||
933 | + * hrtimer_forward() to expire the timer | ||
934 | + * later, while we still maintain the overrun | ||
935 | + * accuracy, but have some inconsistency in | ||
936 | + * the timer_gettime() case. This is at least | ||
937 | + * better than a starved softirq. A more | ||
938 | + * complex fix which solves also another related | ||
939 | + * inconsistency is already in the pipeline. | ||
940 | + */ | ||
941 | +#ifdef CONFIG_HIGH_RES_TIMERS | ||
942 | + { | ||
943 | + ktime_t kj = ktime_set(0, NSEC_PER_SEC / HZ); | ||
944 | + | ||
945 | + if (timr->it.real.interval.tv64 < kj.tv64) | ||
946 | + now = ktime_add(now, kj); | ||
947 | + } | ||
948 | +#endif | ||
949 | timr->it_overrun += | ||
950 | - hrtimer_forward(timer, | ||
951 | - hrtimer_cb_get_time(timer), | ||
952 | + hrtimer_forward(timer, now, | ||
953 | timr->it.real.interval); | ||
954 | ret = HRTIMER_RESTART; | ||
955 | ++timr->it_requeue_pending; | ||
956 | diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c | ||
957 | index 180978c..17d28ce 100644 | ||
958 | --- a/kernel/rtmutex.c | ||
959 | +++ b/kernel/rtmutex.c | ||
960 | @@ -212,6 +212,19 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | ||
961 | if (!waiter || !waiter->task) | ||
962 | goto out_unlock_pi; | ||
963 | |||
964 | + /* | ||
965 | + * Check the orig_waiter state. After we dropped the locks, | ||
966 | + * the previous owner of the lock might have released the lock | ||
967 | + * and made us the pending owner: | ||
968 | + */ | ||
969 | + if (orig_waiter && !orig_waiter->task) | ||
970 | + goto out_unlock_pi; | ||
971 | + | ||
972 | + /* | ||
973 | + * Drop out, when the task has no waiters. Note, | ||
974 | + * top_waiter can be NULL, when we are in the deboosting | ||
975 | + * mode! | ||
976 | + */ | ||
977 | if (top_waiter && (!task_has_pi_waiters(task) || | ||
978 | top_waiter != task_top_pi_waiter(task))) | ||
979 | goto out_unlock_pi; | ||
980 | @@ -659,9 +672,16 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||
981 | * all over without going into schedule to try | ||
982 | * to get the lock now: | ||
983 | */ | ||
984 | - if (unlikely(!waiter.task)) | ||
985 | + if (unlikely(!waiter.task)) { | ||
986 | + /* | ||
987 | + * Reset the return value. We might | ||
988 | + * have returned with -EDEADLK and the | ||
989 | + * owner released the lock while we | ||
990 | + * were walking the pi chain. | ||
991 | + */ | ||
992 | + ret = 0; | ||
993 | continue; | ||
994 | - | ||
995 | + } | ||
996 | if (unlikely(ret)) | ||
997 | break; | ||
998 | } | ||
999 | diff --git a/kernel/sched.c b/kernel/sched.c | ||
1000 | index a3993b9..f745a44 100644 | ||
1001 | --- a/kernel/sched.c | ||
1002 | +++ b/kernel/sched.c | ||
1003 | @@ -2831,17 +2831,21 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | ||
1004 | unsigned long next_balance = jiffies + 60 * HZ; | ||
1005 | |||
1006 | for_each_domain(this_cpu, sd) { | ||
1007 | - if (sd->flags & SD_BALANCE_NEWIDLE) { | ||
1008 | + unsigned long interval; | ||
1009 | + | ||
1010 | + if (!(sd->flags & SD_LOAD_BALANCE)) | ||
1011 | + continue; | ||
1012 | + | ||
1013 | + if (sd->flags & SD_BALANCE_NEWIDLE) | ||
1014 | /* If we've pulled tasks over stop searching: */ | ||
1015 | pulled_task = load_balance_newidle(this_cpu, | ||
1016 | - this_rq, sd); | ||
1017 | - if (time_after(next_balance, | ||
1018 | - sd->last_balance + sd->balance_interval)) | ||
1019 | - next_balance = sd->last_balance | ||
1020 | - + sd->balance_interval; | ||
1021 | - if (pulled_task) | ||
1022 | - break; | ||
1023 | - } | ||
1024 | + this_rq, sd); | ||
1025 | + | ||
1026 | + interval = msecs_to_jiffies(sd->balance_interval); | ||
1027 | + if (time_after(next_balance, sd->last_balance + interval)) | ||
1028 | + next_balance = sd->last_balance + interval; | ||
1029 | + if (pulled_task) | ||
1030 | + break; | ||
1031 | } | ||
1032 | if (!pulled_task) | ||
1033 | /* | ||
1034 | diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c | ||
1035 | index cb25649..c6b6f35 100644 | ||
1036 | --- a/kernel/time/ntp.c | ||
1037 | +++ b/kernel/time/ntp.c | ||
1038 | @@ -120,7 +120,6 @@ void second_overflow(void) | ||
1039 | */ | ||
1040 | time_interpolator_update(-NSEC_PER_SEC); | ||
1041 | time_state = TIME_OOP; | ||
1042 | - clock_was_set(); | ||
1043 | printk(KERN_NOTICE "Clock: inserting leap second " | ||
1044 | "23:59:60 UTC\n"); | ||
1045 | } | ||
1046 | @@ -135,7 +134,6 @@ void second_overflow(void) | ||
1047 | */ | ||
1048 | time_interpolator_update(NSEC_PER_SEC); | ||
1049 | time_state = TIME_WAIT; | ||
1050 | - clock_was_set(); | ||
1051 | printk(KERN_NOTICE "Clock: deleting leap second " | ||
1052 | "23:59:59 UTC\n"); | ||
1053 | } | ||
1054 | diff --git a/mm/rmap.c b/mm/rmap.c | ||
1055 | index b82146e..6e35d11 100644 | ||
1056 | --- a/mm/rmap.c | ||
1057 | +++ b/mm/rmap.c | ||
1058 | @@ -53,24 +53,6 @@ | ||
1059 | |||
1060 | struct kmem_cache *anon_vma_cachep; | ||
1061 | |||
1062 | -static inline void validate_anon_vma(struct vm_area_struct *find_vma) | ||
1063 | -{ | ||
1064 | -#ifdef CONFIG_DEBUG_VM | ||
1065 | - struct anon_vma *anon_vma = find_vma->anon_vma; | ||
1066 | - struct vm_area_struct *vma; | ||
1067 | - unsigned int mapcount = 0; | ||
1068 | - int found = 0; | ||
1069 | - | ||
1070 | - list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | ||
1071 | - mapcount++; | ||
1072 | - BUG_ON(mapcount > 100000); | ||
1073 | - if (vma == find_vma) | ||
1074 | - found = 1; | ||
1075 | - } | ||
1076 | - BUG_ON(!found); | ||
1077 | -#endif | ||
1078 | -} | ||
1079 | - | ||
1080 | /* This must be called under the mmap_sem. */ | ||
1081 | int anon_vma_prepare(struct vm_area_struct *vma) | ||
1082 | { | ||
1083 | @@ -121,10 +103,8 @@ void __anon_vma_link(struct vm_area_struct *vma) | ||
1084 | { | ||
1085 | struct anon_vma *anon_vma = vma->anon_vma; | ||
1086 | |||
1087 | - if (anon_vma) { | ||
1088 | + if (anon_vma) | ||
1089 | list_add_tail(&vma->anon_vma_node, &anon_vma->head); | ||
1090 | - validate_anon_vma(vma); | ||
1091 | - } | ||
1092 | } | ||
1093 | |||
1094 | void anon_vma_link(struct vm_area_struct *vma) | ||
1095 | @@ -134,7 +114,6 @@ void anon_vma_link(struct vm_area_struct *vma) | ||
1096 | if (anon_vma) { | ||
1097 | spin_lock(&anon_vma->lock); | ||
1098 | list_add_tail(&vma->anon_vma_node, &anon_vma->head); | ||
1099 | - validate_anon_vma(vma); | ||
1100 | spin_unlock(&anon_vma->lock); | ||
1101 | } | ||
1102 | } | ||
1103 | @@ -148,7 +127,6 @@ void anon_vma_unlink(struct vm_area_struct *vma) | ||
1104 | return; | ||
1105 | |||
1106 | spin_lock(&anon_vma->lock); | ||
1107 | - validate_anon_vma(vma); | ||
1108 | list_del(&vma->anon_vma_node); | ||
1109 | |||
1110 | /* We must garbage collect the anon_vma if it's empty */ |