Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.21-r3/0101-2.6.21.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 220 - (hide annotations) (download)
Sun Jun 10 22:40:30 2007 UTC (16 years, 11 months ago) by niro
File size: 83696 byte(s)
files for 2.6.21-magellan-r3

1 niro 220 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2     index 2409560..7ed141f 100644
3     --- a/arch/arm/kernel/traps.c
4     +++ b/arch/arm/kernel/traps.c
5     @@ -273,6 +273,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs)
6     struct undef_hook *hook;
7     siginfo_t info;
8     void __user *pc;
9     + unsigned long flags;
10    
11     /*
12     * According to the ARM ARM, PC is 2 or 4 bytes ahead,
13     @@ -291,7 +292,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs)
14     get_user(instr, (u32 __user *)pc);
15     }
16    
17     - spin_lock_irq(&undef_lock);
18     + spin_lock_irqsave(&undef_lock, flags);
19     list_for_each_entry(hook, &undef_hook, node) {
20     if ((instr & hook->instr_mask) == hook->instr_val &&
21     (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) {
22     @@ -301,7 +302,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs)
23     }
24     }
25     }
26     - spin_unlock_irq(&undef_lock);
27     + spin_unlock_irqrestore(&undef_lock, flags);
28    
29     #ifdef CONFIG_DEBUG_USER
30     if (user_debug & UDBG_UNDEFINED) {
31     diff --git a/arch/arm/mach-iop13xx/pci.c b/arch/arm/mach-iop13xx/pci.c
32     index 89ec70e..d907a2a 100644
33     --- a/arch/arm/mach-iop13xx/pci.c
34     +++ b/arch/arm/mach-iop13xx/pci.c
35     @@ -1023,7 +1023,7 @@ int iop13xx_pci_setup(int nr, struct pci_sys_data *sys)
36     << IOP13XX_ATUX_PCIXSR_FUNC_NUM;
37     __raw_writel(pcixsr, IOP13XX_ATUX_PCIXSR);
38    
39     - res[0].start = IOP13XX_PCIX_LOWER_IO_PA;
40     + res[0].start = IOP13XX_PCIX_LOWER_IO_PA + IOP13XX_PCIX_IO_BUS_OFFSET;
41     res[0].end = IOP13XX_PCIX_UPPER_IO_PA;
42     res[0].name = "IQ81340 ATUX PCI I/O Space";
43     res[0].flags = IORESOURCE_IO;
44     @@ -1033,7 +1033,7 @@ int iop13xx_pci_setup(int nr, struct pci_sys_data *sys)
45     res[1].name = "IQ81340 ATUX PCI Memory Space";
46     res[1].flags = IORESOURCE_MEM;
47     sys->mem_offset = IOP13XX_PCIX_MEM_OFFSET;
48     - sys->io_offset = IOP13XX_PCIX_IO_OFFSET;
49     + sys->io_offset = IOP13XX_PCIX_LOWER_IO_PA;
50     break;
51     case IOP13XX_INIT_ATU_ATUE:
52     /* Note: the function number field in the PCSR is ro */
53     @@ -1044,7 +1044,7 @@ int iop13xx_pci_setup(int nr, struct pci_sys_data *sys)
54    
55     __raw_writel(pcsr, IOP13XX_ATUE_PCSR);
56    
57     - res[0].start = IOP13XX_PCIE_LOWER_IO_PA;
58     + res[0].start = IOP13XX_PCIE_LOWER_IO_PA + IOP13XX_PCIE_IO_BUS_OFFSET;
59     res[0].end = IOP13XX_PCIE_UPPER_IO_PA;
60     res[0].name = "IQ81340 ATUE PCI I/O Space";
61     res[0].flags = IORESOURCE_IO;
62     @@ -1054,7 +1054,7 @@ int iop13xx_pci_setup(int nr, struct pci_sys_data *sys)
63     res[1].name = "IQ81340 ATUE PCI Memory Space";
64     res[1].flags = IORESOURCE_MEM;
65     sys->mem_offset = IOP13XX_PCIE_MEM_OFFSET;
66     - sys->io_offset = IOP13XX_PCIE_IO_OFFSET;
67     + sys->io_offset = IOP13XX_PCIE_LOWER_IO_PA;
68     sys->map_irq = iop13xx_pcie_map_irq;
69     break;
70     default:
71     diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
72     index 16300ad..0cc26da 100644
73     --- a/arch/arm/plat-iop/time.c
74     +++ b/arch/arm/plat-iop/time.c
75     @@ -32,22 +32,22 @@ static unsigned long next_jiffy_time;
76    
77     unsigned long iop_gettimeoffset(void)
78     {
79     - unsigned long offset, temp1, temp2;
80     + unsigned long offset, temp;
81    
82     /* enable cp6, if necessary, to avoid taking the overhead of an
83     * undefined instruction trap
84     */
85     asm volatile (
86     "mrc p15, 0, %0, c15, c1, 0\n\t"
87     - "ands %1, %0, #(1 << 6)\n\t"
88     + "tst %0, #(1 << 6)\n\t"
89     "orreq %0, %0, #(1 << 6)\n\t"
90     "mcreq p15, 0, %0, c15, c1, 0\n\t"
91     -#ifdef CONFIG_XSCALE
92     +#ifdef CONFIG_CPU_XSCALE
93     "mrceq p15, 0, %0, c15, c1, 0\n\t"
94     "moveq %0, %0\n\t"
95     "subeq pc, pc, #4\n\t"
96     #endif
97     - : "=r"(temp1), "=r"(temp2) : : "cc");
98     + : "=r"(temp) : : "cc");
99    
100     offset = next_jiffy_time - read_tcr1();
101    
102     diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
103     index 837b041..ca3e1d3 100644
104     --- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
105     +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
106     @@ -341,15 +341,17 @@ static int powernow_acpi_init(void)
107     pc.val = (unsigned long) acpi_processor_perf->states[0].control;
108     for (i = 0; i < number_scales; i++) {
109     u8 fid, vid;
110     - unsigned int speed;
111     + struct acpi_processor_px *state =
112     + &acpi_processor_perf->states[i];
113     + unsigned int speed, speed_mhz;
114    
115     - pc.val = (unsigned long) acpi_processor_perf->states[i].control;
116     + pc.val = (unsigned long) state->control;
117     dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
118     i,
119     - (u32) acpi_processor_perf->states[i].core_frequency,
120     - (u32) acpi_processor_perf->states[i].power,
121     - (u32) acpi_processor_perf->states[i].transition_latency,
122     - (u32) acpi_processor_perf->states[i].control,
123     + (u32) state->core_frequency,
124     + (u32) state->power,
125     + (u32) state->transition_latency,
126     + (u32) state->control,
127     pc.bits.sgtc);
128    
129     vid = pc.bits.vid;
130     @@ -360,6 +362,18 @@ static int powernow_acpi_init(void)
131     powernow_table[i].index |= (vid << 8); /* upper 8 bits */
132    
133     speed = powernow_table[i].frequency;
134     + speed_mhz = speed / 1000;
135     +
136     + /* processor_perflib will multiply the MHz value by 1000 to
137     + * get a KHz value (e.g. 1266000). However, powernow-k7 works
138     + * with true KHz values (e.g. 1266768). To ensure that all
139     + * powernow frequencies are available, we must ensure that
140     + * ACPI doesn't restrict them, so we round up the MHz value
141     + * to ensure that perflib's computed KHz value is greater than
142     + * or equal to powernow's KHz value.
143     + */
144     + if (speed % 1000 > 0)
145     + speed_mhz++;
146    
147     if ((fid_codes[fid] % 10)==5) {
148     if (have_a0 == 1)
149     @@ -368,10 +382,16 @@ static int powernow_acpi_init(void)
150    
151     dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
152     "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
153     - fid_codes[fid] % 10, speed/1000, vid,
154     + fid_codes[fid] % 10, speed_mhz, vid,
155     mobile_vid_table[vid]/1000,
156     mobile_vid_table[vid]%1000);
157    
158     + if (state->core_frequency != speed_mhz) {
159     + state->core_frequency = speed_mhz;
160     + dprintk(" Corrected ACPI frequency to %d\n",
161     + speed_mhz);
162     + }
163     +
164     if (latency < pc.bits.sgtc)
165     latency = pc.bits.sgtc;
166    
167     @@ -602,7 +622,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
168     result = powernow_acpi_init();
169     if (result) {
170     printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
171     - printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.shtml\n");
172     + printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n");
173     }
174     } else {
175     /* SGTC use the bus clock as timer */
176     diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
177     index fe3b670..e295d87 100644
178     --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
179     +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
180     @@ -521,7 +521,7 @@ static int check_supported_cpu(unsigned int cpu)
181    
182     if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
183     if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
184     - ((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) {
185     + ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
186     printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
187     goto out;
188     }
189     diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
190     index 0fb2a30..575541f 100644
191     --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
192     +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
193     @@ -46,8 +46,8 @@ struct powernow_k8_data {
194     #define CPUID_XFAM 0x0ff00000 /* extended family */
195     #define CPUID_XFAM_K8 0
196     #define CPUID_XMOD 0x000f0000 /* extended model */
197     -#define CPUID_XMOD_REV_G 0x00060000
198     -#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
199     +#define CPUID_XMOD_REV_MASK 0x00080000
200     +#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
201     #define CPUID_USE_XFAM_XMOD 0x00000f00
202     #define CPUID_GET_MAX_CAPABILITIES 0x80000000
203     #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
204     diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
205     index fb9bf1e..f56569f 100644
206     --- a/arch/sparc64/kernel/of_device.c
207     +++ b/arch/sparc64/kernel/of_device.c
208     @@ -508,6 +508,13 @@ static int __init build_one_resource(struct device_node *parent,
209     return 0;
210     }
211    
212     + /* When we miss an I/O space match on PCI, just pass it up
213     + * to the next PCI bridge and/or controller.
214     + */
215     + if (!strcmp(bus->name, "pci") &&
216     + (addr[0] & 0x03000000) == 0x01000000)
217     + return 0;
218     +
219     return 1;
220     }
221    
222     diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
223     index 0917c24..3494adf 100644
224     --- a/arch/sparc64/kernel/prom.c
225     +++ b/arch/sparc64/kernel/prom.c
226     @@ -1555,10 +1555,21 @@ static struct device_node * __init create_node(phandle node, struct device_node
227    
228     static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp)
229     {
230     + struct device_node *ret = NULL, *prev_sibling = NULL;
231     struct device_node *dp;
232    
233     - dp = create_node(node, parent);
234     - if (dp) {
235     + while (1) {
236     + dp = create_node(node, parent);
237     + if (!dp)
238     + break;
239     +
240     + if (prev_sibling)
241     + prev_sibling->sibling = dp;
242     +
243     + if (!ret)
244     + ret = dp;
245     + prev_sibling = dp;
246     +
247     *(*nextp) = dp;
248     *nextp = &dp->allnext;
249    
250     @@ -1567,10 +1578,10 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl
251    
252     dp->child = build_tree(dp, prom_getchild(node), nextp);
253    
254     - dp->sibling = build_tree(parent, prom_getsibling(node), nextp);
255     + node = prom_getsibling(node);
256     }
257    
258     - return dp;
259     + return ret;
260     }
261    
262     void __init prom_build_devicetree(void)
263     diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
264     index fc99f7b..8ad7bdb 100644
265     --- a/arch/sparc64/kernel/smp.c
266     +++ b/arch/sparc64/kernel/smp.c
267     @@ -566,6 +566,9 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
268     unsigned long flags, status;
269     int cnt, retries, this_cpu, prev_sent, i;
270    
271     + if (cpus_empty(mask))
272     + return;
273     +
274     /* We have to do this whole thing with interrupts fully disabled.
275     * Otherwise if we send an xcall from interrupt context it will
276     * corrupt both our mondo block and cpu list state.
277     diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
278     index b43c698..fc9f042 100644
279     --- a/arch/x86_64/kernel/vsyscall.c
280     +++ b/arch/x86_64/kernel/vsyscall.c
281     @@ -132,7 +132,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
282    
283     /* convert to usecs and add to timespec: */
284     tv->tv_usec += nsec_delta / NSEC_PER_USEC;
285     - while (tv->tv_usec > USEC_PER_SEC) {
286     + while (tv->tv_usec >= USEC_PER_SEC) {
287     tv->tv_sec += 1;
288     tv->tv_usec -= USEC_PER_SEC;
289     }
290     diff --git a/crypto/api.c b/crypto/api.c
291     index 55af8bb..33734fd 100644
292     --- a/crypto/api.c
293     +++ b/crypto/api.c
294     @@ -48,8 +48,10 @@ EXPORT_SYMBOL_GPL(crypto_mod_get);
295    
296     void crypto_mod_put(struct crypto_alg *alg)
297     {
298     + struct module *module = alg->cra_module;
299     +
300     crypto_alg_put(alg);
301     - module_put(alg->cra_module);
302     + module_put(module);
303     }
304     EXPORT_SYMBOL_GPL(crypto_mod_put);
305    
306     diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
307     index 807c711..d341491 100644
308     --- a/drivers/acpi/tables/tbfadt.c
309     +++ b/drivers/acpi/tables/tbfadt.c
310     @@ -347,6 +347,20 @@ static void acpi_tb_convert_fadt(void)
311     acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
312    
313     }
314     + /*
315     + * _CST object and C States change notification start with
316     + * ACPI 2.0 (FADT r3). Although the field should be Reserved
317     + * and 0 before then, some pre-r3 FADT set this field and
318     + * it results in SMM-related boot failures. For them, clear it.
319     + */
320     + if ((acpi_gbl_FADT.header.revision < 3) &&
321     + (acpi_gbl_FADT.cst_control != 0)) {
322     + ACPI_WARNING((AE_INFO,
323     + "Ignoring BIOS FADT r%u C-state control",
324     + acpi_gbl_FADT.header.revision));
325     + acpi_gbl_FADT.cst_control = 0;
326     + }
327     +
328     }
329    
330     /******************************************************************************
331     diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
332     index 2ffcca0..4d63974 100644
333     --- a/drivers/ata/libata-sff.c
334     +++ b/drivers/ata/libata-sff.c
335     @@ -557,12 +557,30 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
336     int i, p = 0;
337     void __iomem * const *iomap;
338    
339     + /* Discard disabled ports. Some controllers show their
340     + unused channels this way */
341     + if (ata_resources_present(pdev, 0) == 0)
342     + ports &= ~ATA_PORT_PRIMARY;
343     + if (ata_resources_present(pdev, 1) == 0)
344     + ports &= ~ATA_PORT_SECONDARY;
345     +
346     /* iomap BARs */
347     - for (i = 0; i < 4; i++) {
348     - if (pcim_iomap(pdev, i, 0) == NULL) {
349     - dev_printk(KERN_ERR, &pdev->dev,
350     - "failed to iomap PCI BAR %d\n", i);
351     - return NULL;
352     + if (ports & ATA_PORT_PRIMARY) {
353     + for (i = 0; i <= 1; i++) {
354     + if (pcim_iomap(pdev, i, 0) == NULL) {
355     + dev_printk(KERN_ERR, &pdev->dev,
356     + "failed to iomap PCI BAR %d\n", i);
357     + return NULL;
358     + }
359     + }
360     + }
361     + if (ports & ATA_PORT_SECONDARY) {
362     + for (i = 2; i <= 3; i++) {
363     + if (pcim_iomap(pdev, i, 0) == NULL) {
364     + dev_printk(KERN_ERR, &pdev->dev,
365     + "failed to iomap PCI BAR %d\n", i);
366     + return NULL;
367     + }
368     }
369     }
370    
371     @@ -577,13 +595,6 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
372     probe_ent->irq = pdev->irq;
373     probe_ent->irq_flags = IRQF_SHARED;
374    
375     - /* Discard disabled ports. Some controllers show their
376     - unused channels this way */
377     - if (ata_resources_present(pdev, 0) == 0)
378     - ports &= ~ATA_PORT_PRIMARY;
379     - if (ata_resources_present(pdev, 1) == 0)
380     - ports &= ~ATA_PORT_SECONDARY;
381     -
382     if (ports & ATA_PORT_PRIMARY) {
383     probe_ent->port[p].cmd_addr = iomap[0];
384     probe_ent->port[p].altstatus_addr =
385     diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
386     index 598e6a2..ea6efca 100644
387     --- a/drivers/ata/sata_via.c
388     +++ b/drivers/ata/sata_via.c
389     @@ -97,6 +97,10 @@ static struct pci_driver svia_pci_driver = {
390     .name = DRV_NAME,
391     .id_table = svia_pci_tbl,
392     .probe = svia_init_one,
393     +#ifdef CONFIG_PM
394     + .suspend = ata_pci_device_suspend,
395     + .resume = ata_pci_device_resume,
396     +#endif
397     .remove = ata_pci_remove_one,
398     };
399    
400     @@ -116,6 +120,10 @@ static struct scsi_host_template svia_sht = {
401     .slave_configure = ata_scsi_slave_config,
402     .slave_destroy = ata_scsi_slave_destroy,
403     .bios_param = ata_std_bios_param,
404     +#ifdef CONFIG_PM
405     + .suspend = ata_scsi_device_suspend,
406     + .resume = ata_scsi_device_resume,
407     +#endif
408     };
409    
410     static const struct ata_port_operations vt6420_sata_ops = {
411     diff --git a/drivers/base/core.c b/drivers/base/core.c
412     index d7fcf82..a8dfee2 100644
413     --- a/drivers/base/core.c
414     +++ b/drivers/base/core.c
415     @@ -93,6 +93,9 @@ static void device_release(struct kobject * kobj)
416     {
417     struct device * dev = to_dev(kobj);
418    
419     + kfree(dev->devt_attr);
420     + dev->devt_attr = NULL;
421     +
422     if (dev->release)
423     dev->release(dev);
424     else if (dev->type && dev->type->release)
425     @@ -765,10 +768,8 @@ void device_del(struct device * dev)
426    
427     if (parent)
428     klist_del(&dev->knode_parent);
429     - if (dev->devt_attr) {
430     + if (dev->devt_attr)
431     device_remove_file(dev, dev->devt_attr);
432     - kfree(dev->devt_attr);
433     - }
434     if (dev->class) {
435     sysfs_remove_link(&dev->kobj, "subsystem");
436     /* If this is not a "fake" compatible device, remove the
437     diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
438     index e221465..cc13ebc 100644
439     --- a/drivers/char/ipmi/ipmi_si_intf.c
440     +++ b/drivers/char/ipmi/ipmi_si_intf.c
441     @@ -1859,10 +1859,10 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
442    
443     if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
444     info->io_setup = mem_setup;
445     - info->io.addr_type = IPMI_IO_ADDR_SPACE;
446     + info->io.addr_type = IPMI_MEM_ADDR_SPACE;
447     } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
448     info->io_setup = port_setup;
449     - info->io.addr_type = IPMI_MEM_ADDR_SPACE;
450     + info->io.addr_type = IPMI_IO_ADDR_SPACE;
451     } else {
452     kfree(info);
453     printk("ipmi_si: Unknown ACPI I/O Address type\n");
454     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
455     index 97ee870..3a95cc5 100644
456     --- a/drivers/md/raid1.c
457     +++ b/drivers/md/raid1.c
458     @@ -271,21 +271,25 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
459     */
460     update_head_pos(mirror, r1_bio);
461    
462     - if (uptodate || (conf->raid_disks - conf->mddev->degraded) <= 1) {
463     - /*
464     - * Set R1BIO_Uptodate in our master bio, so that
465     - * we will return a good error code for to the higher
466     - * levels even if IO on some other mirrored buffer fails.
467     - *
468     - * The 'master' represents the composite IO operation to
469     - * user-side. So if something waits for IO, then it will
470     - * wait for the 'master' bio.
471     + if (uptodate)
472     + set_bit(R1BIO_Uptodate, &r1_bio->state);
473     + else {
474     + /* If all other devices have failed, we want to return
475     + * the error upwards rather than fail the last device.
476     + * Here we redefine "uptodate" to mean "Don't want to retry"
477     */
478     - if (uptodate)
479     - set_bit(R1BIO_Uptodate, &r1_bio->state);
480     + unsigned long flags;
481     + spin_lock_irqsave(&conf->device_lock, flags);
482     + if (r1_bio->mddev->degraded == conf->raid_disks ||
483     + (r1_bio->mddev->degraded == conf->raid_disks-1 &&
484     + !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
485     + uptodate = 1;
486     + spin_unlock_irqrestore(&conf->device_lock, flags);
487     + }
488    
489     + if (uptodate)
490     raid_end_bio_io(r1_bio);
491     - } else {
492     + else {
493     /*
494     * oops, read error:
495     */
496     @@ -992,13 +996,14 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
497     unsigned long flags;
498     spin_lock_irqsave(&conf->device_lock, flags);
499     mddev->degraded++;
500     + set_bit(Faulty, &rdev->flags);
501     spin_unlock_irqrestore(&conf->device_lock, flags);
502     /*
503     * if recovery is running, make sure it aborts.
504     */
505     set_bit(MD_RECOVERY_ERR, &mddev->recovery);
506     - }
507     - set_bit(Faulty, &rdev->flags);
508     + } else
509     + set_bit(Faulty, &rdev->flags);
510     set_bit(MD_CHANGE_DEVS, &mddev->flags);
511     printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
512     " Operation continuing on %d devices\n",
513     diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
514     index 85f21b5..2eb5741 100644
515     --- a/drivers/message/fusion/mptspi.c
516     +++ b/drivers/message/fusion/mptspi.c
517     @@ -726,13 +726,15 @@ static int mptspi_slave_configure(struct scsi_device *sdev)
518     struct _MPT_SCSI_HOST *hd =
519     (struct _MPT_SCSI_HOST *)sdev->host->hostdata;
520     VirtTarget *vtarget = scsi_target(sdev)->hostdata;
521     - int ret = mptscsih_slave_configure(sdev);
522     + int ret;
523     +
524     + mptspi_initTarget(hd, vtarget, sdev);
525     +
526     + ret = mptscsih_slave_configure(sdev);
527    
528     if (ret)
529     return ret;
530    
531     - mptspi_initTarget(hd, vtarget, sdev);
532     -
533     ddvprintk((MYIOC_s_INFO_FMT "id=%d min_period=0x%02x"
534     " max_offset=0x%02x max_width=%d\n", hd->ioc->name,
535     sdev->id, spi_min_period(scsi_target(sdev)),
536     diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
537     index a3d46ea..32a3003 100644
538     --- a/drivers/net/Kconfig
539     +++ b/drivers/net/Kconfig
540     @@ -2929,11 +2929,6 @@ endif #NETDEVICES
541     config NETPOLL
542     def_bool NETCONSOLE
543    
544     -config NETPOLL_RX
545     - bool "Netpoll support for trapping incoming packets"
546     - default n
547     - depends on NETPOLL
548     -
549     config NETPOLL_TRAP
550     bool "Netpoll traffic trapping"
551     default n
552     diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
553     index e85f5ec..5006c67 100644
554     --- a/drivers/net/bnx2.c
555     +++ b/drivers/net/bnx2.c
556     @@ -54,8 +54,8 @@
557    
558     #define DRV_MODULE_NAME "bnx2"
559     #define PFX DRV_MODULE_NAME ": "
560     -#define DRV_MODULE_VERSION "1.5.8"
561     -#define DRV_MODULE_RELDATE "April 24, 2007"
562     +#define DRV_MODULE_VERSION "1.5.8.1"
563     +#define DRV_MODULE_RELDATE "May 7, 2007"
564    
565     #define RUN_AT(x) (jiffies + (x))
566    
567     @@ -4510,8 +4510,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
568     vlan_tag_flags |=
569     (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
570     }
571     - if ((mss = skb_shinfo(skb)->gso_size) &&
572     - (skb->len > (bp->dev->mtu + ETH_HLEN))) {
573     + if ((mss = skb_shinfo(skb)->gso_size)) {
574     u32 tcp_opt_len, ip_tcp_len;
575    
576     if (skb_header_cloned(skb) &&
577     @@ -5565,6 +5564,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
578     case SIOCGMIIREG: {
579     u32 mii_regval;
580    
581     + if (!netif_running(dev))
582     + return -EAGAIN;
583     +
584     spin_lock_bh(&bp->phy_lock);
585     err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
586     spin_unlock_bh(&bp->phy_lock);
587     @@ -5578,6 +5580,9 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
588     if (!capable(CAP_NET_ADMIN))
589     return -EPERM;
590    
591     + if (!netif_running(dev))
592     + return -EAGAIN;
593     +
594     spin_lock_bh(&bp->phy_lock);
595     err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
596     spin_unlock_bh(&bp->phy_lock);
597     @@ -6143,6 +6148,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
598     reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
599     bnx2_reset_chip(bp, reset_code);
600     bnx2_free_skbs(bp);
601     + pci_save_state(pdev);
602     bnx2_set_power_state(bp, pci_choose_state(pdev, state));
603     return 0;
604     }
605     @@ -6156,6 +6162,7 @@ bnx2_resume(struct pci_dev *pdev)
606     if (!netif_running(dev))
607     return 0;
608    
609     + pci_restore_state(pdev);
610     bnx2_set_power_state(bp, PCI_D0);
611     netif_device_attach(dev);
612     bnx2_init_nic(bp);
613     diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
614     index b2a3b19..ce547af 100644
615     --- a/drivers/net/sis900.c
616     +++ b/drivers/net/sis900.c
617     @@ -1754,6 +1754,7 @@ static int sis900_rx(struct net_device *net_dev)
618     sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
619     } else {
620     struct sk_buff * skb;
621     + struct sk_buff * rx_skb;
622    
623     pci_unmap_single(sis_priv->pci_dev,
624     sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
625     @@ -1787,10 +1788,10 @@ static int sis900_rx(struct net_device *net_dev)
626     }
627    
628     /* give the socket buffer to upper layers */
629     - skb = sis_priv->rx_skbuff[entry];
630     - skb_put(skb, rx_size);
631     - skb->protocol = eth_type_trans(skb, net_dev);
632     - netif_rx(skb);
633     + rx_skb = sis_priv->rx_skbuff[entry];
634     + skb_put(rx_skb, rx_size);
635     + rx_skb->protocol = eth_type_trans(rx_skb, net_dev);
636     + netif_rx(rx_skb);
637    
638     /* some network statistics */
639     if ((rx_status & BCAST) == MCAST)
640     diff --git a/drivers/net/skge.c b/drivers/net/skge.c
641     index d476a3c..5ef9023 100644
642     --- a/drivers/net/skge.c
643     +++ b/drivers/net/skge.c
644     @@ -135,10 +135,13 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
645     /* Wake on Lan only supported on Yukon chips with rev 1 or above */
646     static u32 wol_supported(const struct skge_hw *hw)
647     {
648     - if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev != 0)
649     - return WAKE_MAGIC | WAKE_PHY;
650     - else
651     + if (hw->chip_id == CHIP_ID_GENESIS)
652     + return 0;
653     +
654     + if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
655     return 0;
656     +
657     + return WAKE_MAGIC | WAKE_PHY;
658     }
659    
660     static u32 pci_wake_enabled(struct pci_dev *dev)
661     @@ -3583,7 +3586,9 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
662     skge->duplex = -1;
663     skge->speed = -1;
664     skge->advertising = skge_supported_modes(hw);
665     - skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0;
666     +
667     + if (pci_wake_enabled(hw->pdev))
668     + skge->wol = wol_supported(hw) & WAKE_MAGIC;
669    
670     hw->dev[port] = dev;
671    
672     @@ -3789,6 +3794,9 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
673     struct skge_hw *hw = pci_get_drvdata(pdev);
674     int i, err, wol = 0;
675    
676     + if (!hw)
677     + return 0;
678     +
679     err = pci_save_state(pdev);
680     if (err)
681     return err;
682     @@ -3817,6 +3825,9 @@ static int skge_resume(struct pci_dev *pdev)
683     struct skge_hw *hw = pci_get_drvdata(pdev);
684     int i, err;
685    
686     + if (!hw)
687     + return 0;
688     +
689     err = pci_set_power_state(pdev, PCI_D0);
690     if (err)
691     goto out;
692     @@ -3855,6 +3866,9 @@ static void skge_shutdown(struct pci_dev *pdev)
693     struct skge_hw *hw = pci_get_drvdata(pdev);
694     int i, wol = 0;
695    
696     + if (!hw)
697     + return;
698     +
699     for (i = 0; i < hw->ports; i++) {
700     struct net_device *dev = hw->dev[i];
701     struct skge_port *skge = netdev_priv(dev);
702     diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
703     index ac36152..b6b444b 100644
704     --- a/drivers/net/sky2.c
705     +++ b/drivers/net/sky2.c
706     @@ -123,16 +123,13 @@ static const struct pci_device_id sky2_id_table[] = {
707     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
708     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
709     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
710     -#ifdef broken
711     - /* This device causes data corruption problems that are not resolved */
712     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
713     -#endif
714     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
715     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
716     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
717     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
718     { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
719     - { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
720     +// { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
721     { 0 }
722     };
723    
724     @@ -3722,6 +3719,7 @@ err_out_free_regions:
725     pci_release_regions(pdev);
726     pci_disable_device(pdev);
727     err_out:
728     + pci_set_drvdata(pdev, NULL);
729     return err;
730     }
731    
732     @@ -3774,6 +3772,9 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
733     struct sky2_hw *hw = pci_get_drvdata(pdev);
734     int i, wol = 0;
735    
736     + if (!hw)
737     + return 0;
738     +
739     del_timer_sync(&hw->idle_timer);
740     netif_poll_disable(hw->dev[0]);
741    
742     @@ -3805,6 +3806,9 @@ static int sky2_resume(struct pci_dev *pdev)
743     struct sky2_hw *hw = pci_get_drvdata(pdev);
744     int i, err;
745    
746     + if (!hw)
747     + return 0;
748     +
749     err = pci_set_power_state(pdev, PCI_D0);
750     if (err)
751     goto out;
752     @@ -3851,6 +3855,9 @@ static void sky2_shutdown(struct pci_dev *pdev)
753     struct sky2_hw *hw = pci_get_drvdata(pdev);
754     int i, wol = 0;
755    
756     + if (!hw)
757     + return;
758     +
759     del_timer_sync(&hw->idle_timer);
760     netif_poll_disable(hw->dev[0]);
761    
762     diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
763     index c956141..0b89812 100644
764     --- a/drivers/net/smc911x.c
765     +++ b/drivers/net/smc911x.c
766     @@ -499,7 +499,7 @@ static inline void smc911x_rcv(struct net_device *dev)
767     SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
768     SMC_PULL_DATA(data, pkt_len+2+3);
769    
770     - DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,);
771     + DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
772     PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
773     dev->last_rx = jiffies;
774     skb->dev = dev;
775     diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
776     index 256969e..3d20115 100644
777     --- a/drivers/net/tg3.c
778     +++ b/drivers/net/tg3.c
779     @@ -64,8 +64,8 @@
780    
781     #define DRV_MODULE_NAME "tg3"
782     #define PFX DRV_MODULE_NAME ": "
783     -#define DRV_MODULE_VERSION "3.75"
784     -#define DRV_MODULE_RELDATE "March 23, 2007"
785     +#define DRV_MODULE_VERSION "3.75.1"
786     +#define DRV_MODULE_RELDATE "May 7, 2007"
787    
788     #define TG3_DEF_MAC_MODE 0
789     #define TG3_DEF_RX_MODE 0
790     @@ -3895,8 +3895,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
791     entry = tp->tx_prod;
792     base_flags = 0;
793     mss = 0;
794     - if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
795     - (mss = skb_shinfo(skb)->gso_size) != 0) {
796     + if ((mss = skb_shinfo(skb)->gso_size) != 0) {
797     int tcp_opt_len, ip_tcp_len;
798    
799     if (skb_header_cloned(skb) &&
800     @@ -4053,8 +4052,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
801     if (skb->ip_summed == CHECKSUM_PARTIAL)
802     base_flags |= TXD_FLAG_TCPUDP_CSUM;
803     mss = 0;
804     - if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
805     - (mss = skb_shinfo(skb)->gso_size) != 0) {
806     + if ((mss = skb_shinfo(skb)->gso_size) != 0) {
807     int tcp_opt_len, ip_tcp_len, hdr_len;
808    
809     if (skb_header_cloned(skb) &&
810     @@ -5936,7 +5934,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
811    
812    
813     /* tp->lock is held. */
814     -static void __tg3_set_mac_addr(struct tg3 *tp)
815     +static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
816     {
817     u32 addr_high, addr_low;
818     int i;
819     @@ -5948,6 +5946,8 @@ static void __tg3_set_mac_addr(struct tg3 *tp)
820     (tp->dev->dev_addr[4] << 8) |
821     (tp->dev->dev_addr[5] << 0));
822     for (i = 0; i < 4; i++) {
823     + if (i == 1 && skip_mac_1)
824     + continue;
825     tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
826     tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
827     }
828     @@ -5974,7 +5974,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
829     {
830     struct tg3 *tp = netdev_priv(dev);
831     struct sockaddr *addr = p;
832     - int err = 0;
833     + int err = 0, skip_mac_1 = 0;
834    
835     if (!is_valid_ether_addr(addr->sa_data))
836     return -EINVAL;
837     @@ -5985,22 +5985,21 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
838     return 0;
839    
840     if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
841     - /* Reset chip so that ASF can re-init any MAC addresses it
842     - * needs.
843     - */
844     - tg3_netif_stop(tp);
845     - tg3_full_lock(tp, 1);
846     + u32 addr0_high, addr0_low, addr1_high, addr1_low;
847    
848     - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
849     - err = tg3_restart_hw(tp, 0);
850     - if (!err)
851     - tg3_netif_start(tp);
852     - tg3_full_unlock(tp);
853     - } else {
854     - spin_lock_bh(&tp->lock);
855     - __tg3_set_mac_addr(tp);
856     - spin_unlock_bh(&tp->lock);
857     + addr0_high = tr32(MAC_ADDR_0_HIGH);
858     + addr0_low = tr32(MAC_ADDR_0_LOW);
859     + addr1_high = tr32(MAC_ADDR_1_HIGH);
860     + addr1_low = tr32(MAC_ADDR_1_LOW);
861     +
862     + /* Skip MAC addr 1 if ASF is using it. */
863     + if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
864     + !(addr1_high == 0 && addr1_low == 0))
865     + skip_mac_1 = 1;
866     }
867     + spin_lock_bh(&tp->lock);
868     + __tg3_set_mac_addr(tp, skip_mac_1);
869     + spin_unlock_bh(&tp->lock);
870    
871     return err;
872     }
873     @@ -6317,7 +6316,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
874     tp->rx_jumbo_ptr);
875    
876     /* Initialize MAC address and backoff seed. */
877     - __tg3_set_mac_addr(tp);
878     + __tg3_set_mac_addr(tp, 0);
879    
880     /* MTU + ethernet header + FCS + optional VLAN tag */
881     tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
882     @@ -6348,8 +6347,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
883     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
884     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
885     if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
886     - (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
887     - tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
888     + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
889     rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
890     } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
891     !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
892     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
893     index 65d6f23..5af9125 100644
894     --- a/drivers/pci/quirks.c
895     +++ b/drivers/pci/quirks.c
896     @@ -1737,18 +1737,20 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
897     quirk_nvidia_ck804_pcie_aer_ext_cap);
898    
899     #ifdef CONFIG_PCI_MSI
900     -/* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely
901     - * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
902     - * some other busses controlled by the chipset even if Linux is not aware of it.
903     - * Instead of setting the flag on all busses in the machine, simply disable MSI
904     - * globally.
905     +/* Some chipsets do not support MSI. We cannot easily rely on setting
906     + * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
907     + * some other busses controlled by the chipset even if Linux is not
908     + * aware of it. Instead of setting the flag on all busses in the
909     + * machine, simply disable MSI globally.
910     */
911     -static void __init quirk_svw_msi(struct pci_dev *dev)
912     +static void __init quirk_disable_all_msi(struct pci_dev *dev)
913     {
914     pci_no_msi();
915     printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n");
916     }
917     -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi);
918     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
919     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
920     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
921    
922     /* Disable MSI on chipsets that are known to not support it */
923     static void __devinit quirk_disable_msi(struct pci_dev *dev)
924     diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
925     index 40d4856..c3a6bd2 100644
926     --- a/drivers/serial/sunhv.c
927     +++ b/drivers/serial/sunhv.c
928     @@ -493,6 +493,10 @@ static struct of_device_id hv_match[] = {
929     .name = "console",
930     .compatible = "qcn",
931     },
932     + {
933     + .name = "console",
934     + .compatible = "SUNW,sun4v-console",
935     + },
936     {},
937     };
938     MODULE_DEVICE_TABLE(of, hv_match);
939     diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
940     index 3dfa3e4..3257d94 100644
941     --- a/drivers/usb/atm/cxacru.c
942     +++ b/drivers/usb/atm/cxacru.c
943     @@ -146,6 +146,12 @@ enum cxacru_info_idx {
944     CXINF_MAX = 0x1c,
945     };
946    
947     +enum poll_state {
948     + CX_INIT,
949     + CX_POLLING,
950     + CX_ABORT
951     +};
952     +
953     struct cxacru_modem_type {
954     u32 pll_f_clk;
955     u32 pll_b_clk;
956     @@ -159,6 +165,8 @@ struct cxacru_data {
957    
958     int line_status;
959     struct delayed_work poll_work;
960     + struct mutex poll_state_serialize;
961     + enum poll_state poll_state;
962    
963     /* contol handles */
964     struct mutex cm_serialize;
965     @@ -356,7 +364,7 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
966     /*
967     struct atm_dev *atm_dev = usbatm_instance->atm_dev;
968     */
969     - int ret;
970     + int ret, start_polling = 1;
971    
972     dbg("cxacru_atm_start");
973    
974     @@ -376,7 +384,15 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
975     }
976    
977     /* Start status polling */
978     - cxacru_poll_status(&instance->poll_work.work);
979     + mutex_lock(&instance->poll_state_serialize);
980     + if (instance->poll_state == CX_INIT)
981     + instance->poll_state = CX_POLLING;
982     + else /* poll_state == CX_ABORT */
983     + start_polling = 0;
984     + mutex_unlock(&instance->poll_state_serialize);
985     +
986     + if (start_polling)
987     + cxacru_poll_status(&instance->poll_work.work);
988     return 0;
989     }
990    
991     @@ -685,6 +701,9 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
992     instance->usbatm = usbatm_instance;
993     instance->modem_type = (struct cxacru_modem_type *) id->driver_info;
994    
995     + mutex_init(&instance->poll_state_serialize);
996     + instance->poll_state = CX_INIT;
997     +
998     instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL);
999     if (!instance->rcv_buf) {
1000     dbg("cxacru_bind: no memory for rcv_buf");
1001     @@ -744,6 +763,7 @@ static void cxacru_unbind(struct usbatm_data *usbatm_instance,
1002     struct usb_interface *intf)
1003     {
1004     struct cxacru_data *instance = usbatm_instance->driver_data;
1005     + int stop_polling = 1;
1006    
1007     dbg("cxacru_unbind entered");
1008    
1009     @@ -752,8 +772,20 @@ static void cxacru_unbind(struct usbatm_data *usbatm_instance,
1010     return;
1011     }
1012    
1013     - while (!cancel_delayed_work(&instance->poll_work))
1014     - flush_scheduled_work();
1015     + mutex_lock(&instance->poll_state_serialize);
1016     + if (instance->poll_state != CX_POLLING) {
1017     + /* Polling hasn't started yet and with
1018     + * the mutex locked it can be prevented
1019     + * from starting.
1020     + */
1021     + instance->poll_state = CX_ABORT;
1022     + stop_polling = 0;
1023     + }
1024     + mutex_unlock(&instance->poll_state_serialize);
1025     +
1026     + if (stop_polling)
1027     + while (!cancel_delayed_work(&instance->poll_work))
1028     + flush_scheduled_work();
1029    
1030     usb_kill_urb(instance->snd_urb);
1031     usb_kill_urb(instance->rcv_urb);
1032     diff --git a/drivers/usb/input/hiddev.c b/drivers/usb/input/hiddev.c
1033     index a8b3d66..488d61b 100644
1034     --- a/drivers/usb/input/hiddev.c
1035     +++ b/drivers/usb/input/hiddev.c
1036     @@ -51,6 +51,7 @@ struct hiddev {
1037     wait_queue_head_t wait;
1038     struct hid_device *hid;
1039     struct list_head list;
1040     + spinlock_t list_lock;
1041     };
1042    
1043     struct hiddev_list {
1044     @@ -161,7 +162,9 @@ static void hiddev_send_event(struct hid_device *hid,
1045     {
1046     struct hiddev *hiddev = hid->hiddev;
1047     struct hiddev_list *list;
1048     + unsigned long flags;
1049    
1050     + spin_lock_irqsave(&hiddev->list_lock, flags);
1051     list_for_each_entry(list, &hiddev->list, node) {
1052     if (uref->field_index != HID_FIELD_INDEX_NONE ||
1053     (list->flags & HIDDEV_FLAG_REPORT) != 0) {
1054     @@ -171,6 +174,7 @@ static void hiddev_send_event(struct hid_device *hid,
1055     kill_fasync(&list->fasync, SIGIO, POLL_IN);
1056     }
1057     }
1058     + spin_unlock_irqrestore(&hiddev->list_lock, flags);
1059    
1060     wake_up_interruptible(&hiddev->wait);
1061     }
1062     @@ -235,9 +239,13 @@ static int hiddev_fasync(int fd, struct file *file, int on)
1063     static int hiddev_release(struct inode * inode, struct file * file)
1064     {
1065     struct hiddev_list *list = file->private_data;
1066     + unsigned long flags;
1067    
1068     hiddev_fasync(-1, file, 0);
1069     +
1070     + spin_lock_irqsave(&list->hiddev->list_lock, flags);
1071     list_del(&list->node);
1072     + spin_unlock_irqrestore(&list->hiddev->list_lock, flags);
1073    
1074     if (!--list->hiddev->open) {
1075     if (list->hiddev->exist)
1076     @@ -257,6 +265,7 @@ static int hiddev_release(struct inode * inode, struct file * file)
1077     static int hiddev_open(struct inode *inode, struct file *file)
1078     {
1079     struct hiddev_list *list;
1080     + unsigned long flags;
1081    
1082     int i = iminor(inode) - HIDDEV_MINOR_BASE;
1083    
1084     @@ -267,7 +276,11 @@ static int hiddev_open(struct inode *inode, struct file *file)
1085     return -ENOMEM;
1086    
1087     list->hiddev = hiddev_table[i];
1088     +
1089     + spin_lock_irqsave(&list->hiddev->list_lock, flags);
1090     list_add_tail(&list->node, &hiddev_table[i]->list);
1091     + spin_unlock_irqrestore(&list->hiddev->list_lock, flags);
1092     +
1093     file->private_data = list;
1094    
1095     if (!list->hiddev->open++)
1096     @@ -773,6 +786,7 @@ int hiddev_connect(struct hid_device *hid)
1097    
1098     init_waitqueue_head(&hiddev->wait);
1099     INIT_LIST_HEAD(&hiddev->list);
1100     + spin_lock_init(&hiddev->list_lock);
1101     hiddev->hid = hid;
1102     hiddev->exist = 1;
1103    
1104     diff --git a/fs/fat/dir.c b/fs/fat/dir.c
1105     index c16af24..ccf161d 100644
1106     --- a/fs/fat/dir.c
1107     +++ b/fs/fat/dir.c
1108     @@ -422,7 +422,7 @@ EODir:
1109     EXPORT_SYMBOL_GPL(fat_search_long);
1110    
1111     struct fat_ioctl_filldir_callback {
1112     - struct dirent __user *dirent;
1113     + void __user *dirent;
1114     int result;
1115     /* for dir ioctl */
1116     const char *longname;
1117     @@ -647,62 +647,85 @@ static int fat_readdir(struct file *filp, void *dirent, filldir_t filldir)
1118     return __fat_readdir(inode, filp, dirent, filldir, 0, 0);
1119     }
1120    
1121     -static int fat_ioctl_filldir(void *__buf, const char *name, int name_len,
1122     - loff_t offset, u64 ino, unsigned int d_type)
1123     +#define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type) \
1124     +static int func(void *__buf, const char *name, int name_len, \
1125     + loff_t offset, u64 ino, unsigned int d_type) \
1126     +{ \
1127     + struct fat_ioctl_filldir_callback *buf = __buf; \
1128     + struct dirent_type __user *d1 = buf->dirent; \
1129     + struct dirent_type __user *d2 = d1 + 1; \
1130     + \
1131     + if (buf->result) \
1132     + return -EINVAL; \
1133     + buf->result++; \
1134     + \
1135     + if (name != NULL) { \
1136     + /* dirent has only short name */ \
1137     + if (name_len >= sizeof(d1->d_name)) \
1138     + name_len = sizeof(d1->d_name) - 1; \
1139     + \
1140     + if (put_user(0, d2->d_name) || \
1141     + put_user(0, &d2->d_reclen) || \
1142     + copy_to_user(d1->d_name, name, name_len) || \
1143     + put_user(0, d1->d_name + name_len) || \
1144     + put_user(name_len, &d1->d_reclen)) \
1145     + goto efault; \
1146     + } else { \
1147     + /* dirent has short and long name */ \
1148     + const char *longname = buf->longname; \
1149     + int long_len = buf->long_len; \
1150     + const char *shortname = buf->shortname; \
1151     + int short_len = buf->short_len; \
1152     + \
1153     + if (long_len >= sizeof(d1->d_name)) \
1154     + long_len = sizeof(d1->d_name) - 1; \
1155     + if (short_len >= sizeof(d1->d_name)) \
1156     + short_len = sizeof(d1->d_name) - 1; \
1157     + \
1158     + if (copy_to_user(d2->d_name, longname, long_len) || \
1159     + put_user(0, d2->d_name + long_len) || \
1160     + put_user(long_len, &d2->d_reclen) || \
1161     + put_user(ino, &d2->d_ino) || \
1162     + put_user(offset, &d2->d_off) || \
1163     + copy_to_user(d1->d_name, shortname, short_len) || \
1164     + put_user(0, d1->d_name + short_len) || \
1165     + put_user(short_len, &d1->d_reclen)) \
1166     + goto efault; \
1167     + } \
1168     + return 0; \
1169     +efault: \
1170     + buf->result = -EFAULT; \
1171     + return -EFAULT; \
1172     +}
1173     +
1174     +FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, dirent)
1175     +
1176     +static int fat_ioctl_readdir(struct inode *inode, struct file *filp,
1177     + void __user *dirent, filldir_t filldir,
1178     + int short_only, int both)
1179     {
1180     - struct fat_ioctl_filldir_callback *buf = __buf;
1181     - struct dirent __user *d1 = buf->dirent;
1182     - struct dirent __user *d2 = d1 + 1;
1183     -
1184     - if (buf->result)
1185     - return -EINVAL;
1186     - buf->result++;
1187     -
1188     - if (name != NULL) {
1189     - /* dirent has only short name */
1190     - if (name_len >= sizeof(d1->d_name))
1191     - name_len = sizeof(d1->d_name) - 1;
1192     -
1193     - if (put_user(0, d2->d_name) ||
1194     - put_user(0, &d2->d_reclen) ||
1195     - copy_to_user(d1->d_name, name, name_len) ||
1196     - put_user(0, d1->d_name + name_len) ||
1197     - put_user(name_len, &d1->d_reclen))
1198     - goto efault;
1199     - } else {
1200     - /* dirent has short and long name */
1201     - const char *longname = buf->longname;
1202     - int long_len = buf->long_len;
1203     - const char *shortname = buf->shortname;
1204     - int short_len = buf->short_len;
1205     -
1206     - if (long_len >= sizeof(d1->d_name))
1207     - long_len = sizeof(d1->d_name) - 1;
1208     - if (short_len >= sizeof(d1->d_name))
1209     - short_len = sizeof(d1->d_name) - 1;
1210     -
1211     - if (copy_to_user(d2->d_name, longname, long_len) ||
1212     - put_user(0, d2->d_name + long_len) ||
1213     - put_user(long_len, &d2->d_reclen) ||
1214     - put_user(ino, &d2->d_ino) ||
1215     - put_user(offset, &d2->d_off) ||
1216     - copy_to_user(d1->d_name, shortname, short_len) ||
1217     - put_user(0, d1->d_name + short_len) ||
1218     - put_user(short_len, &d1->d_reclen))
1219     - goto efault;
1220     + struct fat_ioctl_filldir_callback buf;
1221     + int ret;
1222     +
1223     + buf.dirent = dirent;
1224     + buf.result = 0;
1225     + mutex_lock(&inode->i_mutex);
1226     + ret = -ENOENT;
1227     + if (!IS_DEADDIR(inode)) {
1228     + ret = __fat_readdir(inode, filp, &buf, filldir,
1229     + short_only, both);
1230     }
1231     - return 0;
1232     -efault:
1233     - buf->result = -EFAULT;
1234     - return -EFAULT;
1235     + mutex_unlock(&inode->i_mutex);
1236     + if (ret >= 0)
1237     + ret = buf.result;
1238     + return ret;
1239     }
1240    
1241     -static int fat_dir_ioctl(struct inode * inode, struct file * filp,
1242     - unsigned int cmd, unsigned long arg)
1243     +static int fat_dir_ioctl(struct inode *inode, struct file *filp,
1244     + unsigned int cmd, unsigned long arg)
1245     {
1246     - struct fat_ioctl_filldir_callback buf;
1247     - struct dirent __user *d1;
1248     - int ret, short_only, both;
1249     + struct dirent __user *d1 = (struct dirent __user *)arg;
1250     + int short_only, both;
1251    
1252     switch (cmd) {
1253     case VFAT_IOCTL_READDIR_SHORT:
1254     @@ -717,7 +740,6 @@ static int fat_dir_ioctl(struct inode * inode, struct file * filp,
1255     return fat_generic_ioctl(inode, filp, cmd, arg);
1256     }
1257    
1258     - d1 = (struct dirent __user *)arg;
1259     if (!access_ok(VERIFY_WRITE, d1, sizeof(struct dirent[2])))
1260     return -EFAULT;
1261     /*
1262     @@ -728,69 +750,48 @@ static int fat_dir_ioctl(struct inode * inode, struct file * filp,
1263     if (put_user(0, &d1->d_reclen))
1264     return -EFAULT;
1265    
1266     - buf.dirent = d1;
1267     - buf.result = 0;
1268     - mutex_lock(&inode->i_mutex);
1269     - ret = -ENOENT;
1270     - if (!IS_DEADDIR(inode)) {
1271     - ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir,
1272     - short_only, both);
1273     - }
1274     - mutex_unlock(&inode->i_mutex);
1275     - if (ret >= 0)
1276     - ret = buf.result;
1277     - return ret;
1278     + return fat_ioctl_readdir(inode, filp, d1, fat_ioctl_filldir,
1279     + short_only, both);
1280     }
1281    
1282     #ifdef CONFIG_COMPAT
1283     #define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2])
1284     #define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2])
1285    
1286     -static long fat_compat_put_dirent32(struct dirent *d,
1287     - struct compat_dirent __user *d32)
1288     -{
1289     - if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent)))
1290     - return -EFAULT;
1291     -
1292     - __put_user(d->d_ino, &d32->d_ino);
1293     - __put_user(d->d_off, &d32->d_off);
1294     - __put_user(d->d_reclen, &d32->d_reclen);
1295     - if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen))
1296     - return -EFAULT;
1297     +FAT_IOCTL_FILLDIR_FUNC(fat_compat_ioctl_filldir, compat_dirent)
1298    
1299     - return 0;
1300     -}
1301     -
1302     -static long fat_compat_dir_ioctl(struct file *file, unsigned cmd,
1303     +static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd,
1304     unsigned long arg)
1305     {
1306     - struct compat_dirent __user *p = compat_ptr(arg);
1307     - int ret;
1308     - mm_segment_t oldfs = get_fs();
1309     - struct dirent d[2];
1310     + struct inode *inode = filp->f_path.dentry->d_inode;
1311     + struct compat_dirent __user *d1 = compat_ptr(arg);
1312     + int short_only, both;
1313    
1314     switch (cmd) {
1315     - case VFAT_IOCTL_READDIR_BOTH32:
1316     - cmd = VFAT_IOCTL_READDIR_BOTH;
1317     - break;
1318     case VFAT_IOCTL_READDIR_SHORT32:
1319     - cmd = VFAT_IOCTL_READDIR_SHORT;
1320     + short_only = 1;
1321     + both = 0;
1322     + break;
1323     + case VFAT_IOCTL_READDIR_BOTH32:
1324     + short_only = 0;
1325     + both = 1;
1326     break;
1327     default:
1328     return -ENOIOCTLCMD;
1329     }
1330    
1331     - set_fs(KERNEL_DS);
1332     - lock_kernel();
1333     - ret = fat_dir_ioctl(file->f_path.dentry->d_inode, file,
1334     - cmd, (unsigned long) &d);
1335     - unlock_kernel();
1336     - set_fs(oldfs);
1337     - if (ret >= 0) {
1338     - ret |= fat_compat_put_dirent32(&d[0], p);
1339     - ret |= fat_compat_put_dirent32(&d[1], p + 1);
1340     - }
1341     - return ret;
1342     + if (!access_ok(VERIFY_WRITE, d1, sizeof(struct compat_dirent[2])))
1343     + return -EFAULT;
1344     + /*
1345     + * Yes, we don't need this put_user() absolutely. However old
1346     + * code didn't return the right value. So, app use this value,
1347     + * in order to check whether it is EOF.
1348     + */
1349     + if (put_user(0, &d1->d_reclen))
1350     + return -EFAULT;
1351     +
1352     + return fat_ioctl_readdir(inode, filp, d1, fat_compat_ioctl_filldir,
1353     + short_only, both);
1354     }
1355     #endif /* CONFIG_COMPAT */
1356    
1357     diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
1358     index 5065baa..3760d02 100644
1359     --- a/fs/jfs/jfs_logmgr.c
1360     +++ b/fs/jfs/jfs_logmgr.c
1361     @@ -2354,12 +2354,13 @@ int jfsIOWait(void *arg)
1362     lbmStartIO(bp);
1363     spin_lock_irq(&log_redrive_lock);
1364     }
1365     - spin_unlock_irq(&log_redrive_lock);
1366    
1367     if (freezing(current)) {
1368     + spin_unlock_irq(&log_redrive_lock);
1369     refrigerator();
1370     } else {
1371     set_current_state(TASK_INTERRUPTIBLE);
1372     + spin_unlock_irq(&log_redrive_lock);
1373     schedule();
1374     current->state = TASK_RUNNING;
1375     }
1376     diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
1377     index 6f24768..79bd03b 100644
1378     --- a/fs/nfsd/export.c
1379     +++ b/fs/nfsd/export.c
1380     @@ -469,6 +469,13 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
1381     nd.dentry = NULL;
1382     exp.ex_path = NULL;
1383    
1384     + /* fs locations */
1385     + exp.ex_fslocs.locations = NULL;
1386     + exp.ex_fslocs.locations_count = 0;
1387     + exp.ex_fslocs.migrated = 0;
1388     +
1389     + exp.ex_uuid = NULL;
1390     +
1391     if (mesg[mlen-1] != '\n')
1392     return -EINVAL;
1393     mesg[mlen-1] = 0;
1394     @@ -509,13 +516,6 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
1395     if (exp.h.expiry_time == 0)
1396     goto out;
1397    
1398     - /* fs locations */
1399     - exp.ex_fslocs.locations = NULL;
1400     - exp.ex_fslocs.locations_count = 0;
1401     - exp.ex_fslocs.migrated = 0;
1402     -
1403     - exp.ex_uuid = NULL;
1404     -
1405     /* flags */
1406     err = get_int(&mesg, &an_int);
1407     if (err == -ENOENT)
1408     diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
1409     index c8178b7..2cac562 100644
1410     --- a/fs/reiserfs/xattr.c
1411     +++ b/fs/reiserfs/xattr.c
1412     @@ -68,7 +68,7 @@ static struct dentry *get_xa_root(struct super_block *sb, int flags)
1413     if (!privroot)
1414     return ERR_PTR(-ENODATA);
1415    
1416     - mutex_lock(&privroot->d_inode->i_mutex);
1417     + mutex_lock_nested(&privroot->d_inode->i_mutex, I_MUTEX_XATTR);
1418     if (REISERFS_SB(sb)->xattr_root) {
1419     xaroot = dget(REISERFS_SB(sb)->xattr_root);
1420     goto out;
1421     diff --git a/fs/udf/namei.c b/fs/udf/namei.c
1422     index fe361cd..b254375 100644
1423     --- a/fs/udf/namei.c
1424     +++ b/fs/udf/namei.c
1425     @@ -878,7 +878,7 @@ static int udf_rmdir(struct inode * dir, struct dentry * dentry)
1426     inode->i_nlink);
1427     clear_nlink(inode);
1428     inode->i_size = 0;
1429     - inode_dec_link_count(inode);
1430     + inode_dec_link_count(dir);
1431     inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb);
1432     mark_inode_dirty(dir);
1433    
1434     diff --git a/include/asm-arm/arch-iop13xx/iop13xx.h b/include/asm-arm/arch-iop13xx/iop13xx.h
1435     index d26b755..74d7498 100644
1436     --- a/include/asm-arm/arch-iop13xx/iop13xx.h
1437     +++ b/include/asm-arm/arch-iop13xx/iop13xx.h
1438     @@ -27,19 +27,24 @@ static inline int iop13xx_cpu_id(void)
1439     #define IOP13XX_PCI_OFFSET IOP13XX_MAX_RAM_SIZE
1440    
1441     /* PCI MAP
1442     - * 0x0000.0000 - 0x8000.0000 1:1 mapping with Physical RAM
1443     - * 0x8000.0000 - 0x8800.0000 PCIX/PCIE memory window (128MB)
1444     -*/
1445     + * bus range cpu phys cpu virt note
1446     + * 0x0000.0000 + 2GB (n/a) (n/a) inbound, 1:1 mapping with Physical RAM
1447     + * 0x8000.0000 + 928M 0x1.8000.0000 (ioremap) PCIX outbound memory window
1448     + * 0x8000.0000 + 928M 0x2.8000.0000 (ioremap) PCIE outbound memory window
1449     + *
1450     + * IO MAP
1451     + * 0x1000 + 64K 0x0.fffb.1000 0xfec6.1000 PCIX outbound i/o window
1452     + * 0x1000 + 64K 0x0.fffd.1000 0xfed7.1000 PCIE outbound i/o window
1453     + */
1454     #define IOP13XX_PCIX_IO_WINDOW_SIZE 0x10000UL
1455     #define IOP13XX_PCIX_LOWER_IO_PA 0xfffb0000UL
1456     #define IOP13XX_PCIX_LOWER_IO_VA 0xfec60000UL
1457     -#define IOP13XX_PCIX_LOWER_IO_BA 0x0fff0000UL
1458     +#define IOP13XX_PCIX_LOWER_IO_BA 0x0UL /* OIOTVR */
1459     +#define IOP13XX_PCIX_IO_BUS_OFFSET 0x1000UL
1460     #define IOP13XX_PCIX_UPPER_IO_PA (IOP13XX_PCIX_LOWER_IO_PA +\
1461     IOP13XX_PCIX_IO_WINDOW_SIZE - 1)
1462     #define IOP13XX_PCIX_UPPER_IO_VA (IOP13XX_PCIX_LOWER_IO_VA +\
1463     IOP13XX_PCIX_IO_WINDOW_SIZE - 1)
1464     -#define IOP13XX_PCIX_IO_OFFSET (IOP13XX_PCIX_LOWER_IO_VA -\
1465     - IOP13XX_PCIX_LOWER_IO_BA)
1466     #define IOP13XX_PCIX_IO_PHYS_TO_VIRT(addr) (u32) ((u32) addr -\
1467     (IOP13XX_PCIX_LOWER_IO_PA\
1468     - IOP13XX_PCIX_LOWER_IO_VA))
1469     @@ -65,15 +70,14 @@ static inline int iop13xx_cpu_id(void)
1470     #define IOP13XX_PCIE_IO_WINDOW_SIZE 0x10000UL
1471     #define IOP13XX_PCIE_LOWER_IO_PA 0xfffd0000UL
1472     #define IOP13XX_PCIE_LOWER_IO_VA 0xfed70000UL
1473     -#define IOP13XX_PCIE_LOWER_IO_BA 0x0fff0000UL
1474     +#define IOP13XX_PCIE_LOWER_IO_BA 0x0UL /* OIOTVR */
1475     +#define IOP13XX_PCIE_IO_BUS_OFFSET 0x1000UL
1476     #define IOP13XX_PCIE_UPPER_IO_PA (IOP13XX_PCIE_LOWER_IO_PA +\
1477     IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
1478     #define IOP13XX_PCIE_UPPER_IO_VA (IOP13XX_PCIE_LOWER_IO_VA +\
1479     IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
1480     #define IOP13XX_PCIE_UPPER_IO_BA (IOP13XX_PCIE_LOWER_IO_BA +\
1481     IOP13XX_PCIE_IO_WINDOW_SIZE - 1)
1482     -#define IOP13XX_PCIE_IO_OFFSET (IOP13XX_PCIE_LOWER_IO_VA -\
1483     - IOP13XX_PCIE_LOWER_IO_BA)
1484     #define IOP13XX_PCIE_IO_PHYS_TO_VIRT(addr) (u32) ((u32) addr -\
1485     (IOP13XX_PCIE_LOWER_IO_PA\
1486     - IOP13XX_PCIE_LOWER_IO_VA))
1487     diff --git a/include/asm-sparc64/openprom.h b/include/asm-sparc64/openprom.h
1488     index e01b805..26ec046 100644
1489     --- a/include/asm-sparc64/openprom.h
1490     +++ b/include/asm-sparc64/openprom.h
1491     @@ -177,7 +177,7 @@ struct linux_nodeops {
1492     /* More fun PROM structures for device probing. */
1493     #define PROMREG_MAX 24
1494     #define PROMVADDR_MAX 16
1495     -#define PROMINTR_MAX 15
1496     +#define PROMINTR_MAX 32
1497    
1498     struct linux_prom_registers {
1499     unsigned which_io; /* hi part of physical address */
1500     diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
1501     index daa4940..bf92c26 100644
1502     --- a/include/linux/clocksource.h
1503     +++ b/include/linux/clocksource.h
1504     @@ -48,6 +48,7 @@ struct clocksource;
1505     * @shift: cycle to nanosecond divisor (power of two)
1506     * @flags: flags describing special properties
1507     * @vread: vsyscall based read
1508     + * @resume: resume function for the clocksource, if necessary
1509     * @cycle_interval: Used internally by timekeeping core, please ignore.
1510     * @xtime_interval: Used internally by timekeeping core, please ignore.
1511     */
1512     @@ -61,6 +62,7 @@ struct clocksource {
1513     u32 shift;
1514     unsigned long flags;
1515     cycle_t (*vread)(void);
1516     + void (*resume)(void);
1517    
1518     /* timekeeping specific data, ignore */
1519     cycle_t cycle_last, cycle_interval;
1520     @@ -198,6 +200,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
1521     extern int clocksource_register(struct clocksource*);
1522     extern struct clocksource* clocksource_get_next(void);
1523     extern void clocksource_change_rating(struct clocksource *cs, int rating);
1524     +extern void clocksource_resume(void);
1525    
1526     #ifdef CONFIG_GENERIC_TIME_VSYSCALL
1527     extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
1528     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1529     index 1a52854..b1b0f68 100644
1530     --- a/include/linux/netdevice.h
1531     +++ b/include/linux/netdevice.h
1532     @@ -647,8 +647,10 @@ static inline void netif_start_queue(struct net_device *dev)
1533     static inline void netif_wake_queue(struct net_device *dev)
1534     {
1535     #ifdef CONFIG_NETPOLL_TRAP
1536     - if (netpoll_trap())
1537     + if (netpoll_trap()) {
1538     + clear_bit(__LINK_STATE_XOFF, &dev->state);
1539     return;
1540     + }
1541     #endif
1542     if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
1543     __netif_schedule(dev);
1544     @@ -656,10 +658,6 @@ static inline void netif_wake_queue(struct net_device *dev)
1545    
1546     static inline void netif_stop_queue(struct net_device *dev)
1547     {
1548     -#ifdef CONFIG_NETPOLL_TRAP
1549     - if (netpoll_trap())
1550     - return;
1551     -#endif
1552     set_bit(__LINK_STATE_XOFF, &dev->state);
1553     }
1554    
1555     diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
1556     index 4e6bbce..535e421 100644
1557     --- a/include/linux/netfilter/nf_conntrack_proto_gre.h
1558     +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
1559     @@ -87,24 +87,6 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
1560     /* delete keymap entries */
1561     void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
1562    
1563     -/* get pointer to gre key, if present */
1564     -static inline __be32 *gre_key(struct gre_hdr *greh)
1565     -{
1566     - if (!greh->key)
1567     - return NULL;
1568     - if (greh->csum || greh->routing)
1569     - return (__be32 *)(greh+sizeof(*greh)+4);
1570     - return (__be32 *)(greh+sizeof(*greh));
1571     -}
1572     -
1573     -/* get pointer ot gre csum, if present */
1574     -static inline __sum16 *gre_csum(struct gre_hdr *greh)
1575     -{
1576     - if (!greh->csum)
1577     - return NULL;
1578     - return (__sum16 *)(greh+sizeof(*greh));
1579     -}
1580     -
1581     extern void nf_ct_gre_keymap_flush(void);
1582     extern void nf_nat_need_gre(void);
1583    
1584     diff --git a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
1585     index e371e0f..d0f36f5 100644
1586     --- a/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
1587     +++ b/include/linux/netfilter_ipv4/ip_conntrack_proto_gre.h
1588     @@ -90,25 +90,6 @@ int ip_ct_gre_keymap_add(struct ip_conntrack *ct,
1589     /* delete keymap entries */
1590     void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct);
1591    
1592     -
1593     -/* get pointer to gre key, if present */
1594     -static inline __be32 *gre_key(struct gre_hdr *greh)
1595     -{
1596     - if (!greh->key)
1597     - return NULL;
1598     - if (greh->csum || greh->routing)
1599     - return (__be32 *) (greh+sizeof(*greh)+4);
1600     - return (__be32 *) (greh+sizeof(*greh));
1601     -}
1602     -
1603     -/* get pointer ot gre csum, if present */
1604     -static inline __sum16 *gre_csum(struct gre_hdr *greh)
1605     -{
1606     - if (!greh->csum)
1607     - return NULL;
1608     - return (__sum16 *) (greh+sizeof(*greh));
1609     -}
1610     -
1611     #endif /* __KERNEL__ */
1612    
1613     #endif /* _CONNTRACK_PROTO_GRE_H */
1614     diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
1615     index fe5c7db..5baee91 100644
1616     --- a/kernel/time/clocksource.c
1617     +++ b/kernel/time/clocksource.c
1618     @@ -74,6 +74,8 @@ static struct clocksource *watchdog;
1619     static struct timer_list watchdog_timer;
1620     static DEFINE_SPINLOCK(watchdog_lock);
1621     static cycle_t watchdog_last;
1622     +static int watchdog_resumed;
1623     +
1624     /*
1625     * Interval: 0.5sec Treshold: 0.0625s
1626     */
1627     @@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data)
1628     struct clocksource *cs, *tmp;
1629     cycle_t csnow, wdnow;
1630     int64_t wd_nsec, cs_nsec;
1631     + int resumed;
1632    
1633     spin_lock(&watchdog_lock);
1634    
1635     + resumed = watchdog_resumed;
1636     + if (unlikely(resumed))
1637     + watchdog_resumed = 0;
1638     +
1639     wdnow = watchdog->read();
1640     wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
1641     watchdog_last = wdnow;
1642    
1643     list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
1644     csnow = cs->read();
1645     +
1646     + if (unlikely(resumed)) {
1647     + cs->wd_last = csnow;
1648     + continue;
1649     + }
1650     +
1651     /* Initialized ? */
1652     if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
1653     if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
1654     @@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data)
1655     }
1656     spin_unlock(&watchdog_lock);
1657     }
1658     +static void clocksource_resume_watchdog(void)
1659     +{
1660     + spin_lock(&watchdog_lock);
1661     + watchdog_resumed = 1;
1662     + spin_unlock(&watchdog_lock);
1663     +}
1664     +
1665     static void clocksource_check_watchdog(struct clocksource *cs)
1666     {
1667     struct clocksource *cse;
1668     @@ -182,9 +202,34 @@ static void clocksource_check_watchdog(struct clocksource *cs)
1669     if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
1670     cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
1671     }
1672     +
1673     +static inline void clocksource_resume_watchdog(void) { }
1674     #endif
1675    
1676     /**
1677     + * clocksource_resume - resume the clocksource(s)
1678     + */
1679     +void clocksource_resume(void)
1680     +{
1681     + struct list_head *tmp;
1682     + unsigned long flags;
1683     +
1684     + spin_lock_irqsave(&clocksource_lock, flags);
1685     +
1686     + list_for_each(tmp, &clocksource_list) {
1687     + struct clocksource *cs;
1688     +
1689     + cs = list_entry(tmp, struct clocksource, list);
1690     + if (cs->resume)
1691     + cs->resume();
1692     + }
1693     +
1694     + clocksource_resume_watchdog();
1695     +
1696     + spin_unlock_irqrestore(&clocksource_lock, flags);
1697     +}
1698     +
1699     +/**
1700     * clocksource_get_next - Returns the selected clocksource
1701     *
1702     */
1703     diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
1704     index bfda3f7..a96ec9a 100644
1705     --- a/kernel/time/tick-common.c
1706     +++ b/kernel/time/tick-common.c
1707     @@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
1708     */
1709     ktime_t tick_next_period;
1710     ktime_t tick_period;
1711     -static int tick_do_timer_cpu = -1;
1712     +int tick_do_timer_cpu __read_mostly = -1;
1713     DEFINE_SPINLOCK(tick_device_lock);
1714    
1715     /*
1716     @@ -295,6 +295,12 @@ static void tick_shutdown(unsigned int *cpup)
1717     clockevents_exchange_device(dev, NULL);
1718     td->evtdev = NULL;
1719     }
1720     + /* Transfer the do_timer job away from this cpu */
1721     + if (*cpup == tick_do_timer_cpu) {
1722     + int cpu = first_cpu(cpu_online_map);
1723     +
1724     + tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1;
1725     + }
1726     spin_unlock_irqrestore(&tick_device_lock, flags);
1727     }
1728    
1729     diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
1730     index c9d203b..bb13f27 100644
1731     --- a/kernel/time/tick-internal.h
1732     +++ b/kernel/time/tick-internal.h
1733     @@ -5,6 +5,7 @@ DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
1734     extern spinlock_t tick_device_lock;
1735     extern ktime_t tick_next_period;
1736     extern ktime_t tick_period;
1737     +extern int tick_do_timer_cpu __read_mostly;
1738    
1739     extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
1740     extern void tick_handle_periodic(struct clock_event_device *dev);
1741     diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1742     index 51556b9..f4fc867 100644
1743     --- a/kernel/time/tick-sched.c
1744     +++ b/kernel/time/tick-sched.c
1745     @@ -221,6 +221,18 @@ void tick_nohz_stop_sched_tick(void)
1746     ts->tick_stopped = 1;
1747     ts->idle_jiffies = last_jiffies;
1748     }
1749     +
1750     + /*
1751     + * If this cpu is the one which updates jiffies, then
1752     + * give up the assignment and let it be taken by the
1753     + * cpu which runs the tick timer next, which might be
1754     + * this cpu as well. If we don't drop this here the
1755     + * jiffies might be stale and do_timer() never
1756     + * invoked.
1757     + */
1758     + if (cpu == tick_do_timer_cpu)
1759     + tick_do_timer_cpu = -1;
1760     +
1761     /*
1762     * calculate the expiry time for the next timer wheel
1763     * timer
1764     @@ -338,12 +350,24 @@ static void tick_nohz_handler(struct clock_event_device *dev)
1765     {
1766     struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
1767     struct pt_regs *regs = get_irq_regs();
1768     + int cpu = smp_processor_id();
1769     ktime_t now = ktime_get();
1770    
1771     dev->next_event.tv64 = KTIME_MAX;
1772    
1773     + /*
1774     + * Check if the do_timer duty was dropped. We don't care about
1775     + * concurrency: This happens only when the cpu in charge went
1776     + * into a long sleep. If two cpus happen to assign themself to
1777     + * this duty, then the jiffies update is still serialized by
1778     + * xtime_lock.
1779     + */
1780     + if (unlikely(tick_do_timer_cpu == -1))
1781     + tick_do_timer_cpu = cpu;
1782     +
1783     /* Check, if the jiffies need an update */
1784     - tick_do_update_jiffies64(now);
1785     + if (tick_do_timer_cpu == cpu)
1786     + tick_do_update_jiffies64(now);
1787    
1788     /*
1789     * When we are idle and the tick is stopped, we have to touch
1790     @@ -431,9 +455,23 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1791     struct hrtimer_cpu_base *base = timer->base->cpu_base;
1792     struct pt_regs *regs = get_irq_regs();
1793     ktime_t now = ktime_get();
1794     + int cpu = smp_processor_id();
1795     +
1796     +#ifdef CONFIG_NO_HZ
1797     + /*
1798     + * Check if the do_timer duty was dropped. We don't care about
1799     + * concurrency: This happens only when the cpu in charge went
1800     + * into a long sleep. If two cpus happen to assign themself to
1801     + * this duty, then the jiffies update is still serialized by
1802     + * xtime_lock.
1803     + */
1804     + if (unlikely(tick_do_timer_cpu == -1))
1805     + tick_do_timer_cpu = cpu;
1806     +#endif
1807    
1808     /* Check, if the jiffies need an update */
1809     - tick_do_update_jiffies64(now);
1810     + if (tick_do_timer_cpu == cpu)
1811     + tick_do_update_jiffies64(now);
1812    
1813     /*
1814     * Do not call, when we are not in irq context and have
1815     diff --git a/kernel/timer.c b/kernel/timer.c
1816     index dd6c2c1..e045774 100644
1817     --- a/kernel/timer.c
1818     +++ b/kernel/timer.c
1819     @@ -1903,6 +1903,8 @@ unregister_time_interpolator(struct time_interpolator *ti)
1820     prev = &curr->next;
1821     }
1822    
1823     + clocksource_resume();
1824     +
1825     write_seqlock_irqsave(&xtime_lock, flags);
1826     if (ti == time_interpolator) {
1827     /* we lost the best time-interpolator: */
1828     diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
1829     index fceb97c..7e1e311 100644
1830     --- a/lib/zlib_inflate/inflate.c
1831     +++ b/lib/zlib_inflate/inflate.c
1832     @@ -743,12 +743,14 @@ int zlib_inflate(z_streamp strm, int flush)
1833    
1834     strm->data_type = state->bits + (state->last ? 64 : 0) +
1835     (state->mode == TYPE ? 128 : 0);
1836     - if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
1837     - ret = Z_BUF_ERROR;
1838    
1839     if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
1840     - (strm->avail_out != 0 || strm->avail_in == 0))
1841     + strm->avail_out != 0 && strm->avail_in == 0)
1842     return zlib_inflateSyncPacket(strm);
1843     +
1844     + if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
1845     + ret = Z_BUF_ERROR;
1846     +
1847     return ret;
1848     }
1849    
1850     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1851     index 36db012..88e708b 100644
1852     --- a/mm/hugetlb.c
1853     +++ b/mm/hugetlb.c
1854     @@ -140,6 +140,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1855     return page;
1856    
1857     fail:
1858     + if (vma->vm_flags & VM_MAYSHARE)
1859     + resv_huge_pages++;
1860     spin_unlock(&hugetlb_lock);
1861     return NULL;
1862     }
1863     diff --git a/mm/oom_kill.c b/mm/oom_kill.c
1864     index 3791edf..b3a3dd6 100644
1865     --- a/mm/oom_kill.c
1866     +++ b/mm/oom_kill.c
1867     @@ -397,6 +397,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
1868     struct task_struct *p;
1869     unsigned long points = 0;
1870     unsigned long freed = 0;
1871     + int constraint;
1872    
1873     blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1874     if (freed > 0)
1875     @@ -411,14 +412,15 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
1876     show_mem();
1877     }
1878    
1879     - cpuset_lock();
1880     - read_lock(&tasklist_lock);
1881     -
1882     /*
1883     * Check if there were limitations on the allocation (only relevant for
1884     * NUMA) that may require different handling.
1885     */
1886     - switch (constrained_alloc(zonelist, gfp_mask)) {
1887     + constraint = constrained_alloc(zonelist, gfp_mask);
1888     + cpuset_lock();
1889     + read_lock(&tasklist_lock);
1890     +
1891     + switch (constraint) {
1892     case CONSTRAINT_MEMORY_POLICY:
1893     oom_kill_process(current, points,
1894     "No available memory (MPOL_BIND)");
1895     diff --git a/mm/slob.c b/mm/slob.c
1896     index 5adc29c..c683d35 100644
1897     --- a/mm/slob.c
1898     +++ b/mm/slob.c
1899     @@ -150,15 +150,6 @@ static void slob_free(void *block, int size)
1900     spin_unlock_irqrestore(&slob_lock, flags);
1901     }
1902    
1903     -static int FASTCALL(find_order(int size));
1904     -static int fastcall find_order(int size)
1905     -{
1906     - int order = 0;
1907     - for ( ; size > 4096 ; size >>=1)
1908     - order++;
1909     - return order;
1910     -}
1911     -
1912     void *__kmalloc(size_t size, gfp_t gfp)
1913     {
1914     slob_t *m;
1915     @@ -174,7 +165,7 @@ void *__kmalloc(size_t size, gfp_t gfp)
1916     if (!bb)
1917     return 0;
1918    
1919     - bb->order = find_order(size);
1920     + bb->order = get_order(size);
1921     bb->pages = (void *)__get_free_pages(gfp, bb->order);
1922    
1923     if (bb->pages) {
1924     @@ -284,7 +275,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
1925     if (c->size < PAGE_SIZE)
1926     b = slob_alloc(c->size, flags, c->align);
1927     else
1928     - b = (void *)__get_free_pages(flags, find_order(c->size));
1929     + b = (void *)__get_free_pages(flags, get_order(c->size));
1930    
1931     if (c->ctor)
1932     c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
1933     @@ -311,7 +302,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
1934     if (c->size < PAGE_SIZE)
1935     slob_free(b, c->size);
1936     else
1937     - free_pages((unsigned long)b, find_order(c->size));
1938     + free_pages((unsigned long)b, get_order(c->size));
1939     }
1940     EXPORT_SYMBOL(kmem_cache_free);
1941    
1942     diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
1943     index 23b99ae..75bd597 100644
1944     --- a/net/ipv4/netfilter/ip_conntrack_core.c
1945     +++ b/net/ipv4/netfilter/ip_conntrack_core.c
1946     @@ -302,7 +302,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
1947     {
1948     struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
1949     struct ip_conntrack_protocol *proto;
1950     - struct ip_conntrack_helper *helper;
1951     typeof(ip_conntrack_destroyed) destroyed;
1952    
1953     DEBUGP("destroy_conntrack(%p)\n", ct);
1954     @@ -312,10 +311,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
1955     ip_conntrack_event(IPCT_DESTROY, ct);
1956     set_bit(IPS_DYING_BIT, &ct->status);
1957    
1958     - helper = ct->helper;
1959     - if (helper && helper->destroy)
1960     - helper->destroy(ct);
1961     -
1962     /* To make sure we don't get any weird locking issues here:
1963     * destroy_conntrack() MUST NOT be called with a write lock
1964     * to ip_conntrack_lock!!! -HW */
1965     @@ -356,6 +351,11 @@ destroy_conntrack(struct nf_conntrack *nfct)
1966     static void death_by_timeout(unsigned long ul_conntrack)
1967     {
1968     struct ip_conntrack *ct = (void *)ul_conntrack;
1969     + struct ip_conntrack_helper *helper;
1970     +
1971     + helper = ct->helper;
1972     + if (helper && helper->destroy)
1973     + helper->destroy(ct);
1974    
1975     write_lock_bh(&ip_conntrack_lock);
1976     /* Inside lock so preempt is disabled on module removal path.
1977     diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
1978     index 9581020..e3146a3 100644
1979     --- a/net/ipv4/netfilter/ip_nat_proto_gre.c
1980     +++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
1981     @@ -70,6 +70,11 @@ gre_unique_tuple(struct ip_conntrack_tuple *tuple,
1982     __be16 *keyptr;
1983     unsigned int min, i, range_size;
1984    
1985     + /* If there is no master conntrack we are not PPTP,
1986     + do not change tuples */
1987     + if (!conntrack->master)
1988     + return 0;
1989     +
1990     if (maniptype == IP_NAT_MANIP_SRC)
1991     keyptr = &tuple->src.u.gre.key;
1992     else
1993     @@ -122,18 +127,9 @@ gre_manip_pkt(struct sk_buff **pskb,
1994     if (maniptype == IP_NAT_MANIP_DST) {
1995     /* key manipulation is always dest */
1996     switch (greh->version) {
1997     - case 0:
1998     - if (!greh->key) {
1999     - DEBUGP("can't nat GRE w/o key\n");
2000     - break;
2001     - }
2002     - if (greh->csum) {
2003     - /* FIXME: Never tested this code... */
2004     - nf_proto_csum_replace4(gre_csum(greh), *pskb,
2005     - *(gre_key(greh)),
2006     - tuple->dst.u.gre.key, 0);
2007     - }
2008     - *(gre_key(greh)) = tuple->dst.u.gre.key;
2009     + case GRE_VERSION_1701:
2010     + /* We do not currently NAT any GREv0 packets.
2011     + * Try to behave like "ip_nat_proto_unknown" */
2012     break;
2013     case GRE_VERSION_PPTP:
2014     DEBUGP("call_id -> 0x%04x\n",
2015     diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
2016     index e5a34c1..ca3ff84 100644
2017     --- a/net/ipv4/netfilter/nf_nat_proto_gre.c
2018     +++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
2019     @@ -72,6 +72,11 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
2020     __be16 *keyptr;
2021     unsigned int min, i, range_size;
2022    
2023     + /* If there is no master conntrack we are not PPTP,
2024     + do not change tuples */
2025     + if (!conntrack->master)
2026     + return 0;
2027     +
2028     if (maniptype == IP_NAT_MANIP_SRC)
2029     keyptr = &tuple->src.u.gre.key;
2030     else
2031     @@ -122,18 +127,9 @@ gre_manip_pkt(struct sk_buff **pskb, unsigned int iphdroff,
2032     if (maniptype != IP_NAT_MANIP_DST)
2033     return 1;
2034     switch (greh->version) {
2035     - case 0:
2036     - if (!greh->key) {
2037     - DEBUGP("can't nat GRE w/o key\n");
2038     - break;
2039     - }
2040     - if (greh->csum) {
2041     - /* FIXME: Never tested this code... */
2042     - nf_proto_csum_replace4(gre_csum(greh), *pskb,
2043     - *(gre_key(greh)),
2044     - tuple->dst.u.gre.key, 0);
2045     - }
2046     - *(gre_key(greh)) = tuple->dst.u.gre.key;
2047     + case GRE_VERSION_1701:
2048     + /* We do not currently NAT any GREv0 packets.
2049     + * Try to behave like "nf_nat_proto_unknown" */
2050     break;
2051     case GRE_VERSION_PPTP:
2052     DEBUGP("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
2053     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2054     index 3834b10..824c6b9 100644
2055     --- a/net/ipv4/tcp.c
2056     +++ b/net/ipv4/tcp.c
2057     @@ -1759,8 +1759,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2058     tcp_clear_retrans(tp);
2059     inet_csk_delack_init(sk);
2060     sk->sk_send_head = NULL;
2061     - tp->rx_opt.saw_tstamp = 0;
2062     - tcp_sack_reset(&tp->rx_opt);
2063     + memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2064     __sk_dst_reset(sk);
2065    
2066     BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
2067     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2068     index 452a82c..a541137 100644
2069     --- a/net/ipv6/addrconf.c
2070     +++ b/net/ipv6/addrconf.c
2071     @@ -2281,8 +2281,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2072     break;
2073    
2074     case NETDEV_CHANGENAME:
2075     -#ifdef CONFIG_SYSCTL
2076     if (idev) {
2077     + snmp6_unregister_dev(idev);
2078     +#ifdef CONFIG_SYSCTL
2079     addrconf_sysctl_unregister(&idev->cnf);
2080     neigh_sysctl_unregister(idev->nd_parms);
2081     neigh_sysctl_register(dev, idev->nd_parms,
2082     @@ -2290,8 +2291,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2083     &ndisc_ifinfo_sysctl_change,
2084     NULL);
2085     addrconf_sysctl_register(idev, &idev->cnf);
2086     - }
2087     #endif
2088     + snmp6_register_dev(idev);
2089     + }
2090     break;
2091     };
2092    
2093     @@ -4060,6 +4062,10 @@ int __init addrconf_init(void)
2094     return err;
2095    
2096     ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev);
2097     +#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2098     + ip6_prohibit_entry.rt6i_idev = in6_dev_get(&loopback_dev);
2099     + ip6_blk_hole_entry.rt6i_idev = in6_dev_get(&loopback_dev);
2100     +#endif
2101    
2102     register_netdevice_notifier(&ipv6_dev_notf);
2103    
2104     diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
2105     index 3205ec9..794b930 100644
2106     --- a/net/ipv6/exthdrs.c
2107     +++ b/net/ipv6/exthdrs.c
2108     @@ -652,6 +652,14 @@ EXPORT_SYMBOL_GPL(ipv6_invert_rthdr);
2109     Hop-by-hop options.
2110     **********************************/
2111    
2112     +/*
2113     + * Note: we cannot rely on skb->dst before we assign it in ip6_route_input().
2114     + */
2115     +static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
2116     +{
2117     + return skb->dst ? ip6_dst_idev(skb->dst) : __in6_dev_get(skb->dev);
2118     +}
2119     +
2120     /* Router Alert as of RFC 2711 */
2121    
2122     static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
2123     @@ -678,25 +686,25 @@ static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff)
2124     if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
2125     LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
2126     skb->nh.raw[optoff+1]);
2127     - IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
2128     + IP6_INC_STATS_BH(ipv6_skb_idev(skb),
2129     IPSTATS_MIB_INHDRERRORS);
2130     goto drop;
2131     }
2132    
2133     pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2));
2134     if (pkt_len <= IPV6_MAXPLEN) {
2135     - IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
2136     + IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS);
2137     icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
2138     return 0;
2139     }
2140     if (skb->nh.ipv6h->payload_len) {
2141     - IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
2142     + IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS);
2143     icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
2144     return 0;
2145     }
2146    
2147     if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
2148     - IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INTRUNCATEDPKTS);
2149     + IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INTRUNCATEDPKTS);
2150     goto drop;
2151     }
2152    
2153     diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
2154     index 61e7a6c..1b34ee5 100644
2155     --- a/net/ipv6/ip6_input.c
2156     +++ b/net/ipv6/ip6_input.c
2157     @@ -235,7 +235,7 @@ int ip6_mc_input(struct sk_buff *skb)
2158     IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);
2159    
2160     hdr = skb->nh.ipv6h;
2161     - deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) ||
2162     + deliver = unlikely(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) ||
2163     ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
2164    
2165     /*
2166     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2167     index 3055169..9fa3ffb 100644
2168     --- a/net/ipv6/ip6_output.c
2169     +++ b/net/ipv6/ip6_output.c
2170     @@ -449,10 +449,17 @@ int ip6_forward(struct sk_buff *skb)
2171     */
2172     if (xrlim_allow(dst, 1*HZ))
2173     ndisc_send_redirect(skb, n, target);
2174     - } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
2175     - |IPV6_ADDR_LINKLOCAL)) {
2176     + } else {
2177     + int addrtype = ipv6_addr_type(&hdr->saddr);
2178     +
2179     /* This check is security critical. */
2180     - goto error;
2181     + if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK))
2182     + goto error;
2183     + if (addrtype & IPV6_ADDR_LINKLOCAL) {
2184     + icmpv6_send(skb, ICMPV6_DEST_UNREACH,
2185     + ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);
2186     + goto error;
2187     + }
2188     }
2189    
2190     if (skb->len > dst_mtu(dst)) {
2191     diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
2192     index fa3fb50..d57853d 100644
2193     --- a/net/ipv6/proc.c
2194     +++ b/net/ipv6/proc.c
2195     @@ -236,6 +236,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
2196     return -EINVAL;
2197     remove_proc_entry(idev->stats.proc_dir_entry->name,
2198     proc_net_devsnmp6);
2199     + idev->stats.proc_dir_entry = NULL;
2200     return 0;
2201     }
2202    
2203     diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
2204     index 93c4223..dff33cc 100644
2205     --- a/net/ipv6/xfrm6_tunnel.c
2206     +++ b/net/ipv6/xfrm6_tunnel.c
2207     @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
2208     __be32 spi;
2209    
2210     spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
2211     - return xfrm6_rcv_spi(skb, spi);
2212     + return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
2213     }
2214    
2215     static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2216     diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2217     index b3a70eb..ce28fdd 100644
2218     --- a/net/netfilter/nf_conntrack_core.c
2219     +++ b/net/netfilter/nf_conntrack_core.c
2220     @@ -315,7 +315,6 @@ static void
2221     destroy_conntrack(struct nf_conntrack *nfct)
2222     {
2223     struct nf_conn *ct = (struct nf_conn *)nfct;
2224     - struct nf_conn_help *help = nfct_help(ct);
2225     struct nf_conntrack_l3proto *l3proto;
2226     struct nf_conntrack_l4proto *l4proto;
2227     typeof(nf_conntrack_destroyed) destroyed;
2228     @@ -327,9 +326,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
2229     nf_conntrack_event(IPCT_DESTROY, ct);
2230     set_bit(IPS_DYING_BIT, &ct->status);
2231    
2232     - if (help && help->helper && help->helper->destroy)
2233     - help->helper->destroy(ct);
2234     -
2235     /* To make sure we don't get any weird locking issues here:
2236     * destroy_conntrack() MUST NOT be called with a write lock
2237     * to nf_conntrack_lock!!! -HW */
2238     @@ -375,6 +371,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
2239     static void death_by_timeout(unsigned long ul_conntrack)
2240     {
2241     struct nf_conn *ct = (void *)ul_conntrack;
2242     + struct nf_conn_help *help = nfct_help(ct);
2243     +
2244     + if (help && help->helper && help->helper->destroy)
2245     + help->helper->destroy(ct);
2246    
2247     write_lock_bh(&nf_conntrack_lock);
2248     /* Inside lock so preempt is disabled on module removal path.
2249     diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
2250     index de889f2..a86f36b 100644
2251     --- a/net/sched/sch_prio.c
2252     +++ b/net/sched/sch_prio.c
2253     @@ -74,7 +74,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
2254     band = res.classid;
2255     }
2256     band = TC_H_MIN(band) - 1;
2257     - if (band > q->bands)
2258     + if (band >= q->bands)
2259     return q->queues[q->prio2band[0]];
2260    
2261     return q->queues[band];
2262     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2263     index a1d026f..843c928 100644
2264     --- a/net/sctp/socket.c
2265     +++ b/net/sctp/socket.c
2266     @@ -3847,7 +3847,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
2267     memcpy(&temp, &from->ipaddr, sizeof(temp));
2268     sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
2269     addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
2270     - if(space_left < addrlen)
2271     + if (space_left < addrlen)
2272     return -ENOMEM;
2273     if (copy_to_user(to, &temp, addrlen))
2274     return -EFAULT;
2275     @@ -3936,8 +3936,9 @@ done:
2276     /* Helper function that copies local addresses to user and returns the number
2277     * of addresses copied.
2278     */
2279     -static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs,
2280     - void __user *to)
2281     +static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
2282     + int max_addrs, void *to,
2283     + int *bytes_copied)
2284     {
2285     struct list_head *pos, *next;
2286     struct sctp_sockaddr_entry *addr;
2287     @@ -3954,10 +3955,10 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
2288     sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
2289     &temp);
2290     addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
2291     - if (copy_to_user(to, &temp, addrlen))
2292     - return -EFAULT;
2293     + memcpy(to, &temp, addrlen);
2294    
2295     to += addrlen;
2296     + *bytes_copied += addrlen;
2297     cnt ++;
2298     if (cnt >= max_addrs) break;
2299     }
2300     @@ -3965,8 +3966,8 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
2301     return cnt;
2302     }
2303    
2304     -static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
2305     - void __user **to, size_t space_left)
2306     +static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
2307     + size_t space_left, int *bytes_copied)
2308     {
2309     struct list_head *pos, *next;
2310     struct sctp_sockaddr_entry *addr;
2311     @@ -3983,14 +3984,14 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
2312     sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
2313     &temp);
2314     addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
2315     - if(space_left<addrlen)
2316     + if (space_left < addrlen)
2317     return -ENOMEM;
2318     - if (copy_to_user(*to, &temp, addrlen))
2319     - return -EFAULT;
2320     + memcpy(to, &temp, addrlen);
2321    
2322     - *to += addrlen;
2323     + to += addrlen;
2324     cnt ++;
2325     space_left -= addrlen;
2326     + bytes_copied += addrlen;
2327     }
2328    
2329     return cnt;
2330     @@ -4014,6 +4015,9 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
2331     int addrlen;
2332     rwlock_t *addr_lock;
2333     int err = 0;
2334     + void *addrs;
2335     + void *buf;
2336     + int bytes_copied = 0;
2337    
2338     if (len != sizeof(struct sctp_getaddrs_old))
2339     return -EINVAL;
2340     @@ -4041,6 +4045,15 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
2341    
2342     to = getaddrs.addrs;
2343    
2344     + /* Allocate space for a local instance of packed array to hold all
2345     + * the data. We store addresses here first and then put write them
2346     + * to the user in one shot.
2347     + */
2348     + addrs = kmalloc(sizeof(union sctp_addr) * getaddrs.addr_num,
2349     + GFP_KERNEL);
2350     + if (!addrs)
2351     + return -ENOMEM;
2352     +
2353     sctp_read_lock(addr_lock);
2354    
2355     /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
2356     @@ -4050,38 +4063,42 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
2357     addr = list_entry(bp->address_list.next,
2358     struct sctp_sockaddr_entry, list);
2359     if (sctp_is_any(&addr->a)) {
2360     - cnt = sctp_copy_laddrs_to_user_old(sk, bp->port,
2361     - getaddrs.addr_num,
2362     - to);
2363     - if (cnt < 0) {
2364     - err = cnt;
2365     - goto unlock;
2366     - }
2367     + cnt = sctp_copy_laddrs_old(sk, bp->port,
2368     + getaddrs.addr_num,
2369     + addrs, &bytes_copied);
2370     goto copy_getaddrs;
2371     }
2372     }
2373    
2374     + buf = addrs;
2375     list_for_each(pos, &bp->address_list) {
2376     addr = list_entry(pos, struct sctp_sockaddr_entry, list);
2377     memcpy(&temp, &addr->a, sizeof(temp));
2378     sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
2379     addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
2380     - if (copy_to_user(to, &temp, addrlen)) {
2381     - err = -EFAULT;
2382     - goto unlock;
2383     - }
2384     - to += addrlen;
2385     + memcpy(buf, &temp, addrlen);
2386     + buf += addrlen;
2387     + bytes_copied += addrlen;
2388     cnt ++;
2389     if (cnt >= getaddrs.addr_num) break;
2390     }
2391    
2392     copy_getaddrs:
2393     + sctp_read_unlock(addr_lock);
2394     +
2395     + /* copy the entire address list into the user provided space */
2396     + if (copy_to_user(to, addrs, bytes_copied)) {
2397     + err = -EFAULT;
2398     + goto error;
2399     + }
2400     +
2401     + /* copy the leading structure back to user */
2402     getaddrs.addr_num = cnt;
2403     if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old)))
2404     err = -EFAULT;
2405    
2406     -unlock:
2407     - sctp_read_unlock(addr_lock);
2408     +error:
2409     + kfree(addrs);
2410     return err;
2411     }
2412    
2413     @@ -4101,7 +4118,9 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
2414     rwlock_t *addr_lock;
2415     int err = 0;
2416     size_t space_left;
2417     - int bytes_copied;
2418     + int bytes_copied = 0;
2419     + void *addrs;
2420     + void *buf;
2421    
2422     if (len <= sizeof(struct sctp_getaddrs))
2423     return -EINVAL;
2424     @@ -4129,6 +4148,9 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
2425     to = optval + offsetof(struct sctp_getaddrs,addrs);
2426     space_left = len - sizeof(struct sctp_getaddrs) -
2427     offsetof(struct sctp_getaddrs,addrs);
2428     + addrs = kmalloc(space_left, GFP_KERNEL);
2429     + if (!addrs)
2430     + return -ENOMEM;
2431    
2432     sctp_read_lock(addr_lock);
2433    
2434     @@ -4139,41 +4161,47 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
2435     addr = list_entry(bp->address_list.next,
2436     struct sctp_sockaddr_entry, list);
2437     if (sctp_is_any(&addr->a)) {
2438     - cnt = sctp_copy_laddrs_to_user(sk, bp->port,
2439     - &to, space_left);
2440     + cnt = sctp_copy_laddrs(sk, bp->port, addrs,
2441     + space_left, &bytes_copied);
2442     if (cnt < 0) {
2443     err = cnt;
2444     - goto unlock;
2445     + goto error;
2446     }
2447     goto copy_getaddrs;
2448     }
2449     }
2450    
2451     + buf = addrs;
2452     list_for_each(pos, &bp->address_list) {
2453     addr = list_entry(pos, struct sctp_sockaddr_entry, list);
2454     memcpy(&temp, &addr->a, sizeof(temp));
2455     sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
2456     addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
2457     - if(space_left < addrlen)
2458     - return -ENOMEM; /*fixme: right error?*/
2459     - if (copy_to_user(to, &temp, addrlen)) {
2460     - err = -EFAULT;
2461     - goto unlock;
2462     + if (space_left < addrlen) {
2463     + err = -ENOMEM; /*fixme: right error?*/
2464     + goto error;
2465     }
2466     - to += addrlen;
2467     + memcpy(buf, &temp, addrlen);
2468     + buf += addrlen;
2469     + bytes_copied += addrlen;
2470     cnt ++;
2471     space_left -= addrlen;
2472     }
2473    
2474     copy_getaddrs:
2475     + sctp_read_unlock(addr_lock);
2476     +
2477     + if (copy_to_user(to, addrs, bytes_copied)) {
2478     + err = -EFAULT;
2479     + goto error;
2480     + }
2481     if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
2482     return -EFAULT;
2483     - bytes_copied = ((char __user *)to) - optval;
2484     if (put_user(bytes_copied, optlen))
2485     return -EFAULT;
2486    
2487     -unlock:
2488     - sctp_read_unlock(addr_lock);
2489     +error:
2490     + kfree(addrs);
2491     return err;
2492     }
2493    
2494     @@ -4961,7 +4989,12 @@ int sctp_inet_listen(struct socket *sock, int backlog)
2495     /* Allocate HMAC for generating cookie. */
2496     if (sctp_hmac_alg) {
2497     tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
2498     - if (!tfm) {
2499     + if (IS_ERR(tfm)) {
2500     + if (net_ratelimit()) {
2501     + printk(KERN_INFO
2502     + "SCTP: failed to load transform for %s: %ld\n",
2503     + sctp_hmac_alg, PTR_ERR(tfm));
2504     + }
2505     err = -ENOSYS;
2506     goto out;
2507     }
2508     diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
2509     index db298b5..c678f5f 100644
2510     --- a/net/sunrpc/auth_gss/svcauth_gss.c
2511     +++ b/net/sunrpc/auth_gss/svcauth_gss.c
2512     @@ -1196,13 +1196,7 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
2513     if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
2514     integ_len))
2515     BUG();
2516     - if (resbuf->page_len == 0
2517     - && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
2518     - < PAGE_SIZE) {
2519     - BUG_ON(resbuf->tail[0].iov_len);
2520     - /* Use head for everything */
2521     - resv = &resbuf->head[0];
2522     - } else if (resbuf->tail[0].iov_base == NULL) {
2523     + if (resbuf->tail[0].iov_base == NULL) {
2524     if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
2525     goto out_err;
2526     resbuf->tail[0].iov_base = resbuf->head[0].iov_base
2527     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
2528     index 785c3e3..ba89293 100644
2529     --- a/net/xfrm/xfrm_policy.c
2530     +++ b/net/xfrm/xfrm_policy.c
2531     @@ -782,6 +782,10 @@ struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
2532     struct hlist_head *chain;
2533     struct hlist_node *entry;
2534    
2535     + *err = -ENOENT;
2536     + if (xfrm_policy_id2dir(id) != dir)
2537     + return NULL;
2538     +
2539     *err = 0;
2540     write_lock_bh(&xfrm_policy_lock);
2541     chain = xfrm_policy_byidx + idx_hash(id);
2542     diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
2543     index 6bc7e7c..8912c0f 100644
2544     --- a/scripts/basic/fixdep.c
2545     +++ b/scripts/basic/fixdep.c
2546     @@ -249,6 +249,8 @@ void parse_config_file(char *map, size_t len)
2547     found:
2548     if (!memcmp(q - 7, "_MODULE", 7))
2549     q -= 7;
2550     + if( (q-p-7) < 0 )
2551     + continue;
2552     use_config(p+7, q-p-7);
2553     }
2554     }
2555     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
2556     index c94291b..a6f8992 100644
2557     --- a/sound/pci/hda/patch_sigmatel.c
2558     +++ b/sound/pci/hda/patch_sigmatel.c
2559     @@ -1751,6 +1751,7 @@ static int stac92xx_resume(struct hda_codec *codec)
2560    
2561     stac92xx_init(codec);
2562     stac92xx_set_config_regs(codec);
2563     + snd_hda_resume_ctls(codec, spec->mixer);
2564     for (i = 0; i < spec->num_mixers; i++)
2565     snd_hda_resume_ctls(codec, spec->mixers[i]);
2566     if (spec->multiout.dig_out_nid)