Magellan Linux

Annotation of /trunk/kernel26-alx/patches-2.6.23-r1/0116-2.6.23.17-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 658 - (hide annotations) (download)
Mon Jun 23 21:39:39 2008 UTC (15 years, 11 months ago) by niro
File size: 16283 byte(s)
2.6.23-alx-r1: new default as we fix the via epia clocksource=tsc quircks
-linux-2.6.23.17
-fbcondecor-0.9.4
-squashfs-3.3
-unionfs-2.3.3
-ipw3945-1.2.2
-mptbase-vmware fix

1 niro 658 diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
2     index ba931be..5169ecc 100644
3     --- a/arch/powerpc/platforms/powermac/feature.c
4     +++ b/arch/powerpc/platforms/powermac/feature.c
5     @@ -2565,6 +2565,8 @@ static void __init probe_uninorth(void)
6    
7     /* Locate core99 Uni-N */
8     uninorth_node = of_find_node_by_name(NULL, "uni-n");
9     + uninorth_maj = 1;
10     +
11     /* Locate G5 u3 */
12     if (uninorth_node == NULL) {
13     uninorth_node = of_find_node_by_name(NULL, "u3");
14     @@ -2575,8 +2577,10 @@ static void __init probe_uninorth(void)
15     uninorth_node = of_find_node_by_name(NULL, "u4");
16     uninorth_maj = 4;
17     }
18     - if (uninorth_node == NULL)
19     + if (uninorth_node == NULL) {
20     + uninorth_maj = 0;
21     return;
22     + }
23    
24     addrp = of_get_property(uninorth_node, "reg", NULL);
25     if (addrp == NULL)
26     @@ -3029,3 +3033,8 @@ void pmac_resume_agp_for_card(struct pci_dev *dev)
27     pmac_agp_resume(pmac_agp_bridge);
28     }
29     EXPORT_SYMBOL(pmac_resume_agp_for_card);
30     +
31     +int pmac_get_uninorth_variant(void)
32     +{
33     + return uninorth_maj;
34     +}
35     diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
36     index eff3b22..7770e10 100644
37     --- a/arch/x86_64/mm/pageattr.c
38     +++ b/arch/x86_64/mm/pageattr.c
39     @@ -207,7 +207,7 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
40     if (__pa(address) < KERNEL_TEXT_SIZE) {
41     unsigned long addr2;
42     pgprot_t prot2;
43     - addr2 = __START_KERNEL_map + __pa(address);
44     + addr2 = __START_KERNEL_map + __pa(address) - phys_base;
45     /* Make sure the kernel mappings stay executable */
46     prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
47     err = __change_page_attr(addr2, pfn, prot2,
48     diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
49     index d409f67..1ebe7a3 100644
50     --- a/drivers/macintosh/smu.c
51     +++ b/drivers/macintosh/smu.c
52     @@ -85,6 +85,7 @@ struct smu_device {
53     u32 cmd_buf_abs; /* command buffer absolute */
54     struct list_head cmd_list;
55     struct smu_cmd *cmd_cur; /* pending command */
56     + int broken_nap;
57     struct list_head cmd_i2c_list;
58     struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */
59     struct timer_list i2c_timer;
60     @@ -135,6 +136,19 @@ static void smu_start_cmd(void)
61     fend = faddr + smu->cmd_buf->length + 2;
62     flush_inval_dcache_range(faddr, fend);
63    
64     +
65     + /* We also disable NAP mode for the duration of the command
66     + * on U3 based machines.
67     + * This is slightly racy as it can be written back to 1 by a sysctl
68     + * but that never happens in practice. There seem to be an issue with
69     + * U3 based machines such as the iMac G5 where napping for the
70     + * whole duration of the command prevents the SMU from fetching it
71     + * from memory. This might be related to the strange i2c based
72     + * mechanism the SMU uses to access memory.
73     + */
74     + if (smu->broken_nap)
75     + powersave_nap = 0;
76     +
77     /* This isn't exactly a DMA mapping here, I suspect
78     * the SMU is actually communicating with us via i2c to the
79     * northbridge or the CPU to access RAM.
80     @@ -211,6 +225,10 @@ static irqreturn_t smu_db_intr(int irq, void *arg)
81     misc = cmd->misc;
82     mb();
83     cmd->status = rc;
84     +
85     + /* Re-enable NAP mode */
86     + if (smu->broken_nap)
87     + powersave_nap = 1;
88     bail:
89     /* Start next command if any */
90     smu_start_cmd();
91     @@ -461,7 +479,7 @@ int __init smu_init (void)
92     if (np == NULL)
93     return -ENODEV;
94    
95     - printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR);
96     + printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR);
97    
98     if (smu_cmdbuf_abs == 0) {
99     printk(KERN_ERR "SMU: Command buffer not allocated !\n");
100     @@ -533,6 +551,11 @@ int __init smu_init (void)
101     goto fail;
102     }
103    
104     + /* U3 has an issue with NAP mode when issuing SMU commands */
105     + smu->broken_nap = pmac_get_uninorth_variant() < 4;
106     + if (smu->broken_nap)
107     + printk(KERN_INFO "SMU: using NAP mode workaround\n");
108     +
109     sys_ctrler = SYS_CTRLER_SMU;
110     return 0;
111    
112     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
113     index 2c6116f..2b28a24 100644
114     --- a/drivers/scsi/sd.c
115     +++ b/drivers/scsi/sd.c
116     @@ -901,6 +901,7 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
117     unsigned int xfer_size = SCpnt->request_bufflen;
118     unsigned int good_bytes = result ? 0 : xfer_size;
119     u64 start_lba = SCpnt->request->sector;
120     + u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
121     u64 bad_lba;
122     struct scsi_sense_hdr sshdr;
123     int sense_valid = 0;
124     @@ -939,26 +940,23 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
125     goto out;
126     if (xfer_size <= SCpnt->device->sector_size)
127     goto out;
128     - switch (SCpnt->device->sector_size) {
129     - case 256:
130     + if (SCpnt->device->sector_size < 512) {
131     + /* only legitimate sector_size here is 256 */
132     start_lba <<= 1;
133     - break;
134     - case 512:
135     - break;
136     - case 1024:
137     - start_lba >>= 1;
138     - break;
139     - case 2048:
140     - start_lba >>= 2;
141     - break;
142     - case 4096:
143     - start_lba >>= 3;
144     - break;
145     - default:
146     - /* Print something here with limiting frequency. */
147     - goto out;
148     - break;
149     + end_lba <<= 1;
150     + } else {
151     + /* be careful ... don't want any overflows */
152     + u64 factor = SCpnt->device->sector_size / 512;
153     + do_div(start_lba, factor);
154     + do_div(end_lba, factor);
155     }
156     +
157     + if (bad_lba < start_lba || bad_lba >= end_lba)
158     + /* the bad lba was reported incorrectly, we have
159     + * no idea where the error is
160     + */
161     + goto out;
162     +
163     /* This computation should always be done in terms of
164     * the resolution of the device's medium.
165     */
166     diff --git a/fs/nfs/write.c b/fs/nfs/write.c
167     index a2a4865..331a5bb 100644
168     --- a/fs/nfs/write.c
169     +++ b/fs/nfs/write.c
170     @@ -717,6 +717,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
171     }
172    
173     /*
174     + * If the page cache is marked as unsafe or invalid, then we can't rely on
175     + * the PageUptodate() flag. In this case, we will need to turn off
176     + * write optimisations that depend on the page contents being correct.
177     + */
178     +static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
179     +{
180     + return PageUptodate(page) &&
181     + !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
182     +}
183     +
184     +/*
185     * Update and possibly write a cached page of an NFS file.
186     *
187     * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
188     @@ -737,10 +748,13 @@ int nfs_updatepage(struct file *file, struct page *page,
189     (long long)(page_offset(page) +offset));
190    
191     /* If we're not using byte range locks, and we know the page
192     - * is entirely in cache, it may be more efficient to avoid
193     - * fragmenting write requests.
194     + * is up to date, it may be more efficient to extend the write
195     + * to cover the entire page in order to avoid fragmentation
196     + * inefficiencies.
197     */
198     - if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
199     + if (nfs_write_pageuptodate(page, inode) &&
200     + inode->i_flock == NULL &&
201     + !(file->f_mode & O_SYNC)) {
202     count = max(count + offset, nfs_page_length(page));
203     offset = 0;
204     }
205     diff --git a/include/asm-powerpc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h
206     index 26bcb0a..877c35a 100644
207     --- a/include/asm-powerpc/pmac_feature.h
208     +++ b/include/asm-powerpc/pmac_feature.h
209     @@ -392,6 +392,14 @@ extern u32 __iomem *uninorth_base;
210     #define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
211     #define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
212    
213     +/* Uninorth variant:
214     + *
215     + * 0 = not uninorth
216     + * 1 = U1.x or U2.x
217     + * 3 = U3
218     + * 4 = U4
219     + */
220     +extern int pmac_get_uninorth_variant(void);
221    
222     #endif /* __ASM_POWERPC_PMAC_FEATURE_H */
223     #endif /* __KERNEL__ */
224     diff --git a/include/linux/ktime.h b/include/linux/ktime.h
225     index dae7143..15a0229 100644
226     --- a/include/linux/ktime.h
227     +++ b/include/linux/ktime.h
228     @@ -289,6 +289,8 @@ static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
229     return ktime_add_ns(kt, usec * 1000);
230     }
231    
232     +extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
233     +
234     /*
235     * The resolution of the clocks. The resolution value is returned in
236     * the clock_getres() system call to give application programmers an
237     diff --git a/kernel/futex.c b/kernel/futex.c
238     index b658a9a..0c55a58 100644
239     --- a/kernel/futex.c
240     +++ b/kernel/futex.c
241     @@ -2063,7 +2063,7 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
242    
243     t = timespec_to_ktime(ts);
244     if (cmd == FUTEX_WAIT)
245     - t = ktime_add(ktime_get(), t);
246     + t = ktime_add_safe(ktime_get(), t);
247     tp = &t;
248     }
249     /*
250     diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
251     index f938c23..bba74b6 100644
252     --- a/kernel/futex_compat.c
253     +++ b/kernel/futex_compat.c
254     @@ -175,7 +175,7 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
255    
256     t = timespec_to_ktime(ts);
257     if (cmd == FUTEX_WAIT)
258     - t = ktime_add(ktime_get(), t);
259     + t = ktime_add_safe(ktime_get(), t);
260     tp = &t;
261     }
262     if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE)
263     diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
264     index ee8d0ac..2ee0497 100644
265     --- a/kernel/hrtimer.c
266     +++ b/kernel/hrtimer.c
267     @@ -301,6 +301,24 @@ unsigned long ktime_divns(const ktime_t kt, s64 div)
268     }
269     #endif /* BITS_PER_LONG >= 64 */
270    
271     +/*
272     + * Add two ktime values and do a safety check for overflow:
273     + */
274     +
275     +ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
276     +{
277     + ktime_t res = ktime_add(lhs, rhs);
278     +
279     + /*
280     + * We use KTIME_SEC_MAX here, the maximum timeout which we can
281     + * return to user space in a timespec:
282     + */
283     + if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
284     + res = ktime_set(KTIME_SEC_MAX, 0);
285     +
286     + return res;
287     +}
288     +
289     /* High resolution timer related functions */
290     #ifdef CONFIG_HIGH_RES_TIMERS
291    
292     @@ -658,13 +676,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
293     */
294     orun++;
295     }
296     - timer->expires = ktime_add(timer->expires, interval);
297     - /*
298     - * Make sure, that the result did not wrap with a very large
299     - * interval.
300     - */
301     - if (timer->expires.tv64 < 0)
302     - timer->expires = ktime_set(KTIME_SEC_MAX, 0);
303     + timer->expires = ktime_add_safe(timer->expires, interval);
304    
305     return orun;
306     }
307     @@ -815,7 +827,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
308     new_base = switch_hrtimer_base(timer, base);
309    
310     if (mode == HRTIMER_MODE_REL) {
311     - tim = ktime_add(tim, new_base->get_time());
312     + tim = ktime_add_safe(tim, new_base->get_time());
313     /*
314     * CONFIG_TIME_LOW_RES is a temporary way for architectures
315     * to signal that they simply return xtime in
316     @@ -824,16 +836,8 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
317     * timeouts. This will go away with the GTOD framework.
318     */
319     #ifdef CONFIG_TIME_LOW_RES
320     - tim = ktime_add(tim, base->resolution);
321     + tim = ktime_add_safe(tim, base->resolution);
322     #endif
323     - /*
324     - * Careful here: User space might have asked for a
325     - * very long sleep, so the add above might result in a
326     - * negative number, which enqueues the timer in front
327     - * of the queue.
328     - */
329     - if (tim.tv64 < 0)
330     - tim.tv64 = KTIME_MAX;
331     }
332     timer->expires = tim;
333    
334     diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
335     index f1a73f0..7279484 100644
336     --- a/kernel/irq/chip.c
337     +++ b/kernel/irq/chip.c
338     @@ -246,6 +246,17 @@ static unsigned int default_startup(unsigned int irq)
339     }
340    
341     /*
342     + * default shutdown function
343     + */
344     +static void default_shutdown(unsigned int irq)
345     +{
346     + struct irq_desc *desc = irq_desc + irq;
347     +
348     + desc->chip->mask(irq);
349     + desc->status |= IRQ_MASKED;
350     +}
351     +
352     +/*
353     * Fixup enable/disable function pointers
354     */
355     void irq_chip_set_defaults(struct irq_chip *chip)
356     @@ -256,8 +267,15 @@ void irq_chip_set_defaults(struct irq_chip *chip)
357     chip->disable = default_disable;
358     if (!chip->startup)
359     chip->startup = default_startup;
360     + /*
361     + * We use chip->disable, when the user provided its own. When
362     + * we have default_disable set for chip->disable, then we need
363     + * to use default_shutdown, otherwise the irq line is not
364     + * disabled on free_irq():
365     + */
366     if (!chip->shutdown)
367     - chip->shutdown = chip->disable;
368     + chip->shutdown = chip->disable != default_disable ?
369     + chip->disable : default_shutdown;
370     if (!chip->name)
371     chip->name = chip->typename;
372     if (!chip->end)
373     diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
374     index 7a15afb..00c9e25 100644
375     --- a/kernel/posix-timers.c
376     +++ b/kernel/posix-timers.c
377     @@ -765,9 +765,11 @@ common_timer_set(struct k_itimer *timr, int flags,
378     /* SIGEV_NONE timers are not queued ! See common_timer_get */
379     if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
380     /* Setup correct expiry time for relative timers */
381     - if (mode == HRTIMER_MODE_REL)
382     - timer->expires = ktime_add(timer->expires,
383     - timer->base->get_time());
384     + if (mode == HRTIMER_MODE_REL) {
385     + timer->expires =
386     + ktime_add_safe(timer->expires,
387     + timer->base->get_time());
388     + }
389     return 0;
390     }
391    
392     diff --git a/mm/memory.c b/mm/memory.c
393     index f82b359..51a8691 100644
394     --- a/mm/memory.c
395     +++ b/mm/memory.c
396     @@ -981,6 +981,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
397     int i;
398     unsigned int vm_flags;
399    
400     + if (len <= 0)
401     + return 0;
402     /*
403     * Require read or write permissions.
404     * If 'force' is set, we only require the "MAY" flags.
405     diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
406     index 70c5b7d..09b902d 100644
407     --- a/net/netfilter/nf_conntrack_proto_tcp.c
408     +++ b/net/netfilter/nf_conntrack_proto_tcp.c
409     @@ -135,7 +135,7 @@ enum tcp_bit_set {
410     * CLOSE_WAIT: ACK seen (after FIN)
411     * LAST_ACK: FIN seen (after FIN)
412     * TIME_WAIT: last ACK seen
413     - * CLOSE: closed connection
414     + * CLOSE: closed connection (RST)
415     *
416     * LISTEN state is not used.
417     *
418     @@ -834,8 +834,21 @@ static int tcp_packet(struct nf_conn *conntrack,
419     case TCP_CONNTRACK_SYN_SENT:
420     if (old_state < TCP_CONNTRACK_TIME_WAIT)
421     break;
422     - if ((conntrack->proto.tcp.seen[!dir].flags &
423     - IP_CT_TCP_FLAG_CLOSE_INIT)
424     + /* RFC 1122: "When a connection is closed actively,
425     + * it MUST linger in TIME-WAIT state for a time 2xMSL
426     + * (Maximum Segment Lifetime). However, it MAY accept
427     + * a new SYN from the remote TCP to reopen the connection
428     + * directly from TIME-WAIT state, if..."
429     + * We ignore the conditions because we are in the
430     + * TIME-WAIT state anyway.
431     + *
432     + * Handle aborted connections: we and the server
433     + * think there is an existing connection but the client
434     + * aborts it and starts a new one.
435     + */
436     + if (((conntrack->proto.tcp.seen[dir].flags
437     + | conntrack->proto.tcp.seen[!dir].flags)
438     + & IP_CT_TCP_FLAG_CLOSE_INIT)
439     || (conntrack->proto.tcp.last_dir == dir
440     && conntrack->proto.tcp.last_index == TCP_RST_SET)) {
441     /* Attempt to reopen a closed/aborted connection.
442     @@ -850,16 +863,23 @@ static int tcp_packet(struct nf_conn *conntrack,
443     case TCP_CONNTRACK_IGNORE:
444     /* Ignored packets:
445     *
446     + * Our connection entry may be out of sync, so ignore
447     + * packets which may signal the real connection between
448     + * the client and the server.
449     + *
450     * a) SYN in ORIGINAL
451     * b) SYN/ACK in REPLY
452     * c) ACK in reply direction after initial SYN in original.
453     + *
454     + * If the ignored packet is invalid, the receiver will send
455     + * a RST we'll catch below.
456     */
457     if (index == TCP_SYNACK_SET
458     && conntrack->proto.tcp.last_index == TCP_SYN_SET
459     && conntrack->proto.tcp.last_dir != dir
460     && ntohl(th->ack_seq) ==
461     conntrack->proto.tcp.last_end) {
462     - /* This SYN/ACK acknowledges a SYN that we earlier
463     + /* b) This SYN/ACK acknowledges a SYN that we earlier
464     * ignored as invalid. This means that the client and
465     * the server are both in sync, while the firewall is
466     * not. We kill this session and block the SYN/ACK so
467     @@ -884,7 +904,7 @@ static int tcp_packet(struct nf_conn *conntrack,
468     write_unlock_bh(&tcp_lock);
469     if (LOG_INVALID(IPPROTO_TCP))
470     nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
471     - "nf_ct_tcp: invalid packed ignored ");
472     + "nf_ct_tcp: invalid packet ignored ");
473     return NF_ACCEPT;
474     case TCP_CONNTRACK_MAX:
475     /* Invalid packet */
476     @@ -938,8 +958,7 @@ static int tcp_packet(struct nf_conn *conntrack,
477    
478     conntrack->proto.tcp.state = new_state;
479     if (old_state != new_state
480     - && (new_state == TCP_CONNTRACK_FIN_WAIT
481     - || new_state == TCP_CONNTRACK_CLOSE))
482     + && new_state == TCP_CONNTRACK_FIN_WAIT)
483     conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
484     timeout = conntrack->proto.tcp.retrans >= nf_ct_tcp_max_retrans
485     && *tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans