Magellan Linux

Annotation of /trunk/kernel26-alx/patches-2.6.20-r6/0117-2.6.20.17-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1175 - (hide annotations) (download)
Thu Oct 14 12:15:46 2010 UTC (13 years, 8 months ago) by niro
File size: 53880 byte(s)
-2.6.20-alx-r6 new magellan 0.5.2 kernel
1 niro 1175 diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
2     index 10baa35..18c8b67 100644
3     --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
4     +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
5     @@ -167,11 +167,13 @@ static void do_drv_read(struct drv_cmd *cmd)
6    
7     static void do_drv_write(struct drv_cmd *cmd)
8     {
9     - u32 h = 0;
10     + u32 lo, hi;
11    
12     switch (cmd->type) {
13     case SYSTEM_INTEL_MSR_CAPABLE:
14     - wrmsr(cmd->addr.msr.reg, cmd->val, h);
15     + rdmsr(cmd->addr.msr.reg, lo, hi);
16     + lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
17     + wrmsr(cmd->addr.msr.reg, lo, hi);
18     break;
19     case SYSTEM_IO_CAPABLE:
20     acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
21     @@ -372,7 +374,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
22     struct cpufreq_freqs freqs;
23     cpumask_t online_policy_cpus;
24     struct drv_cmd cmd;
25     - unsigned int msr;
26     unsigned int next_state = 0; /* Index into freq_table */
27     unsigned int next_perf_state = 0; /* Index into perf table */
28     unsigned int i;
29     @@ -417,11 +418,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
30     case SYSTEM_INTEL_MSR_CAPABLE:
31     cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
32     cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
33     - msr =
34     - (u32) perf->states[next_perf_state].
35     - control & INTEL_MSR_RANGE;
36     - cmd.val = get_cur_val(online_policy_cpus);
37     - cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
38     + cmd.val = (u32) perf->states[next_perf_state].control;
39     break;
40     case SYSTEM_IO_CAPABLE:
41     cmd.type = SYSTEM_IO_CAPABLE;
42     diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
43     index 831f540..eac3838 100644
44     --- a/arch/sparc/kernel/entry.S
45     +++ b/arch/sparc/kernel/entry.S
46     @@ -1749,8 +1749,8 @@ fpload:
47     __ndelay:
48     save %sp, -STACKFRAME_SZ, %sp
49     mov %i0, %o0
50     - call .umul
51     - mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ)
52     + call .umul ! round multiplier up so large ns ok
53     + mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ)
54     call .umul
55     mov %i1, %o1 ! udelay_val
56     ba delay_continue
57     @@ -1760,11 +1760,17 @@ __ndelay:
58     __udelay:
59     save %sp, -STACKFRAME_SZ, %sp
60     mov %i0, %o0
61     - sethi %hi(0x10c6), %o1
62     + sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok
63     call .umul
64     - or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000
65     + or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000
66     call .umul
67     mov %i1, %o1 ! udelay_val
68     + sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32,
69     + or %g0, %lo(0x028f4b62), %l0
70     + addcc %o0, %l0, %o0 ! 2**32 * 0.009 999
71     + bcs,a 3f
72     + add %o1, 0x01, %o1
73     +3:
74     call .umul
75     mov HZ, %o0 ! >>32 earlier for wider range
76    
77     diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
78     index a65eba4..1c37ea8 100644
79     --- a/arch/sparc/lib/memset.S
80     +++ b/arch/sparc/lib/memset.S
81     @@ -162,7 +162,7 @@ __bzero:
82     8:
83     add %o0, 1, %o0
84     subcc %o1, 1, %o1
85     - bne,a 8b
86     + bne 8b
87     EX(stb %g3, [%o0 - 1], add %o1, 1)
88     0:
89     retl
90     diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
91     index 06459ae..0e19369 100644
92     --- a/arch/sparc64/kernel/head.S
93     +++ b/arch/sparc64/kernel/head.S
94     @@ -458,7 +458,6 @@ tlb_fixup_done:
95     or %g6, %lo(init_thread_union), %g6
96     ldx [%g6 + TI_TASK], %g4
97     mov %sp, %l6
98     - mov %o4, %l7
99    
100     wr %g0, ASI_P, %asi
101     mov 1, %g1
102     diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
103     index 3f33165..419b2d5 100644
104     --- a/arch/um/os-Linux/user_syms.c
105     +++ b/arch/um/os-Linux/user_syms.c
106     @@ -5,7 +5,8 @@
107     * so I *must* declare good prototypes for them and then EXPORT them.
108     * The kernel code uses the macro defined by include/linux/string.h,
109     * so I undef macros; the userspace code does not include that and I
110     - * add an EXPORT for the glibc one.*/
111     + * add an EXPORT for the glibc one.
112     + */
113    
114     #undef strlen
115     #undef strstr
116     @@ -61,12 +62,18 @@ EXPORT_SYMBOL_PROTO(dup2);
117     EXPORT_SYMBOL_PROTO(__xstat);
118     EXPORT_SYMBOL_PROTO(__lxstat);
119     EXPORT_SYMBOL_PROTO(__lxstat64);
120     +EXPORT_SYMBOL_PROTO(__fxstat64);
121     EXPORT_SYMBOL_PROTO(lseek);
122     EXPORT_SYMBOL_PROTO(lseek64);
123     EXPORT_SYMBOL_PROTO(chown);
124     +EXPORT_SYMBOL_PROTO(fchown);
125     EXPORT_SYMBOL_PROTO(truncate);
126     +EXPORT_SYMBOL_PROTO(ftruncate64);
127     EXPORT_SYMBOL_PROTO(utime);
128     +EXPORT_SYMBOL_PROTO(utimes);
129     +EXPORT_SYMBOL_PROTO(futimes);
130     EXPORT_SYMBOL_PROTO(chmod);
131     +EXPORT_SYMBOL_PROTO(fchmod);
132     EXPORT_SYMBOL_PROTO(rename);
133     EXPORT_SYMBOL_PROTO(__xmknod);
134    
135     @@ -102,14 +109,3 @@ EXPORT_SYMBOL(__stack_smash_handler);
136    
137     extern long __guard __attribute__((weak));
138     EXPORT_SYMBOL(__guard);
139     -
140     -/*
141     - * Overrides for Emacs so that we follow Linus's tabbing style.
142     - * Emacs will notice this stuff at the end of the file and automatically
143     - * adjust the settings for this buffer only. This must remain at the end
144     - * of the file.
145     - * ---------------------------------------------------------------------------
146     - * Local variables:
147     - * c-file-style: "linux"
148     - * End:
149     - */
150     diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
151     index 57c0db3..0c646dc 100644
152     --- a/drivers/ata/ata_piix.c
153     +++ b/drivers/ata/ata_piix.c
154     @@ -431,7 +431,7 @@ static const struct piix_map_db ich8_map_db = {
155     /* PM PS SM SS MAP */
156     { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
157     { RV, RV, RV, RV },
158     - { IDE, IDE, NA, NA }, /* 10b (IDE mode) */
159     + { P0, P2, IDE, IDE }, /* 10b (IDE mode) */
160     { RV, RV, RV, RV },
161     },
162     };
163     diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
164     index b50ebb6..17d75ec 100644
165     --- a/drivers/ata/pata_atiixp.c
166     +++ b/drivers/ata/pata_atiixp.c
167     @@ -283,6 +283,7 @@ static const struct pci_device_id atiixp[] = {
168     { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
169     { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
170     { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
171     + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
172    
173     { },
174     };
175     diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
176     index 7fd095e..be31b43 100644
177     --- a/drivers/base/cpu.c
178     +++ b/drivers/base/cpu.c
179     @@ -53,7 +53,7 @@ static ssize_t store_online(struct sys_device *dev, const char *buf,
180     ret = count;
181     return ret;
182     }
183     -static SYSDEV_ATTR(online, 0600, show_online, store_online);
184     +static SYSDEV_ATTR(online, 0644, show_online, store_online);
185    
186     static void __devinit register_cpu_control(struct cpu *cpu)
187     {
188     diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
189     index 9354ce3..ab1a1d0 100644
190     --- a/drivers/char/drm/i915_dma.c
191     +++ b/drivers/char/drm/i915_dma.c
192     @@ -172,6 +172,8 @@ static int i915_initialize(drm_device_t * dev,
193     * private backbuffer/depthbuffer usage.
194     */
195     dev_priv->use_mi_batchbuffer_start = 0;
196     + if (IS_I965G(dev)) /* 965 doesn't support older method */
197     + dev_priv->use_mi_batchbuffer_start = 1;
198    
199     /* Allow hardware batchbuffers unless told otherwise.
200     */
201     @@ -504,8 +506,13 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev,
202    
203     if (dev_priv->use_mi_batchbuffer_start) {
204     BEGIN_LP_RING(2);
205     - OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
206     - OUT_RING(batch->start | MI_BATCH_NON_SECURE);
207     + if (IS_I965G(dev)) {
208     + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
209     + OUT_RING(batch->start);
210     + } else {
211     + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
212     + OUT_RING(batch->start | MI_BATCH_NON_SECURE);
213     + }
214     ADVANCE_LP_RING();
215     } else {
216     BEGIN_LP_RING(4);
217     @@ -722,7 +729,8 @@ static int i915_setparam(DRM_IOCTL_ARGS)
218    
219     switch (param.param) {
220     case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
221     - dev_priv->use_mi_batchbuffer_start = param.value;
222     + if (!IS_I965G(dev))
223     + dev_priv->use_mi_batchbuffer_start = param.value;
224     break;
225     case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
226     dev_priv->tex_lru_log_granularity = param.value;
227     diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
228     index 93cdcfe..30022c8 100644
229     --- a/drivers/char/drm/i915_drv.h
230     +++ b/drivers/char/drm/i915_drv.h
231     @@ -280,6 +280,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
232     #define MI_BATCH_BUFFER_START (0x31<<23)
233     #define MI_BATCH_BUFFER_END (0xA<<23)
234     #define MI_BATCH_NON_SECURE (1)
235     +#define MI_BATCH_NON_SECURE_I965 (1<<8)
236    
237     #define MI_WAIT_FOR_EVENT ((0x3<<23))
238     #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
239     diff --git a/drivers/char/random.c b/drivers/char/random.c
240     index 263e5e5..96561c8 100644
241     --- a/drivers/char/random.c
242     +++ b/drivers/char/random.c
243     @@ -693,9 +693,14 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
244    
245     if (r->pull && r->entropy_count < nbytes * 8 &&
246     r->entropy_count < r->poolinfo->POOLBITS) {
247     - int bytes = max_t(int, random_read_wakeup_thresh / 8,
248     - min_t(int, nbytes, sizeof(tmp)));
249     + /* If we're limited, always leave two wakeup worth's BITS */
250     int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
251     + int bytes = nbytes;
252     +
253     + /* pull at least as many as BYTES as wakeup BITS */
254     + bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
255     + /* but never more than the buffer size */
256     + bytes = min_t(int, bytes, sizeof(tmp));
257    
258     DEBUG_ENT("going to reseed %s with %d bits "
259     "(%d of %d requested)\n",
260     diff --git a/drivers/char/sx.c b/drivers/char/sx.c
261     index 1da92a6..85a2328 100644
262     --- a/drivers/char/sx.c
263     +++ b/drivers/char/sx.c
264     @@ -2721,9 +2721,9 @@ static void __devexit sx_pci_remove(struct pci_dev *pdev)
265     its because the standard requires it. So check for SUBVENDOR_ID. */
266     static struct pci_device_id sx_pci_tbl[] = {
267     { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
268     - .subvendor = 0x0200,.subdevice = PCI_ANY_ID },
269     + .subvendor = PCI_ANY_ID, .subdevice = 0x0200 },
270     { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
271     - .subvendor = 0x0300,.subdevice = PCI_ANY_ID },
272     + .subvendor = PCI_ANY_ID, .subdevice = 0x0300 },
273     { 0 }
274     };
275    
276     diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
277     index f697449..1d11d13 100644
278     --- a/drivers/cpufreq/cpufreq_ondemand.c
279     +++ b/drivers/cpufreq/cpufreq_ondemand.c
280     @@ -95,15 +95,25 @@ static struct dbs_tuners {
281    
282     static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
283     {
284     - cputime64_t retval;
285     + cputime64_t idle_time;
286     + cputime64_t cur_jiffies;
287     + cputime64_t busy_time;
288    
289     - retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
290     - kstat_cpu(cpu).cpustat.iowait);
291     + cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
292     + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
293     + kstat_cpu(cpu).cpustat.system);
294    
295     - if (dbs_tuners_ins.ignore_nice)
296     - retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
297     + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
298     + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
299     + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
300    
301     - return retval;
302     + if (!dbs_tuners_ins.ignore_nice) {
303     + busy_time = cputime64_add(busy_time,
304     + kstat_cpu(cpu).cpustat.nice);
305     + }
306     +
307     + idle_time = cputime64_sub(cur_jiffies, busy_time);
308     + return idle_time;
309     }
310    
311     /*
312     @@ -324,7 +334,7 @@ static struct attribute_group dbs_attr_group = {
313     static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
314     {
315     unsigned int idle_ticks, total_ticks;
316     - unsigned int load;
317     + unsigned int load = 0;
318     cputime64_t cur_jiffies;
319    
320     struct cpufreq_policy *policy;
321     @@ -338,7 +348,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
322     cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
323     total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
324     this_dbs_info->prev_cpu_wall);
325     - this_dbs_info->prev_cpu_wall = cur_jiffies;
326     + this_dbs_info->prev_cpu_wall = get_jiffies_64();
327     +
328     if (!total_ticks)
329     return;
330     /*
331     @@ -369,7 +380,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
332     if (tmp_idle_ticks < idle_ticks)
333     idle_ticks = tmp_idle_ticks;
334     }
335     - load = (100 * (total_ticks - idle_ticks)) / total_ticks;
336     + if (likely(total_ticks > idle_ticks))
337     + load = (100 * (total_ticks - idle_ticks)) / total_ticks;
338    
339     /* Check for frequency increase */
340     if (load > dbs_tuners_ins.up_threshold) {
341     diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
342     index c79df79..1e90401 100644
343     --- a/drivers/kvm/svm.c
344     +++ b/drivers/kvm/svm.c
345     @@ -1655,6 +1655,12 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
346    
347     static int is_disabled(void)
348     {
349     + u64 vm_cr;
350     +
351     + rdmsrl(MSR_VM_CR, vm_cr);
352     + if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
353     + return 1;
354     +
355     return 0;
356     }
357    
358     diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
359     index df731c3..e8f0349 100644
360     --- a/drivers/kvm/svm.h
361     +++ b/drivers/kvm/svm.h
362     @@ -172,8 +172,11 @@ struct __attribute__ ((__packed__)) vmcb {
363     #define SVM_CPUID_FUNC 0x8000000a
364    
365     #define MSR_EFER_SVME_MASK (1ULL << 12)
366     +#define MSR_VM_CR 0xc0010114
367     #define MSR_VM_HSAVE_PA 0xc0010117ULL
368    
369     +#define SVM_VM_CR_SVM_DISABLE 4
370     +
371     #define SVM_SELECTOR_S_SHIFT 4
372     #define SVM_SELECTOR_DPL_SHIFT 5
373     #define SVM_SELECTOR_P_SHIFT 7
374     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
375     index b9ff4e3..7989dac 100644
376     --- a/drivers/md/dm-crypt.c
377     +++ b/drivers/md/dm-crypt.c
378     @@ -917,6 +917,8 @@ static void crypt_dtr(struct dm_target *ti)
379     {
380     struct crypt_config *cc = (struct crypt_config *) ti->private;
381    
382     + flush_workqueue(_kcryptd_workqueue);
383     +
384     bioset_free(cc->bs);
385     mempool_destroy(cc->page_pool);
386     mempool_destroy(cc->io_pool);
387     @@ -938,9 +940,6 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
388     struct crypt_config *cc = ti->private;
389     struct crypt_io *io;
390    
391     - if (bio_barrier(bio))
392     - return -EOPNOTSUPP;
393     -
394     io = mempool_alloc(cc->io_pool, GFP_NOIO);
395     io->target = ti;
396     io->base_bio = bio;
397     diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
398     index 99cdffa..fc496fc 100644
399     --- a/drivers/md/dm-exception-store.c
400     +++ b/drivers/md/dm-exception-store.c
401     @@ -453,11 +453,6 @@ static int persistent_read_metadata(struct exception_store *store)
402     /*
403     * Sanity checks.
404     */
405     - if (!ps->valid) {
406     - DMWARN("snapshot is marked invalid");
407     - return -EINVAL;
408     - }
409     -
410     if (ps->version != SNAPSHOT_DISK_VERSION) {
411     DMWARN("unable to handle snapshot disk version %d",
412     ps->version);
413     @@ -465,6 +460,12 @@ static int persistent_read_metadata(struct exception_store *store)
414     }
415    
416     /*
417     + * Metadata are valid, but snapshot is invalidated
418     + */
419     + if (!ps->valid)
420     + return 1;
421     +
422     + /*
423     * Read the metadata.
424     */
425     r = read_exceptions(ps);
426     diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
427     index 3aa0135..af27464 100644
428     --- a/drivers/md/dm-mpath.c
429     +++ b/drivers/md/dm-mpath.c
430     @@ -795,9 +795,6 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
431     struct mpath_io *mpio;
432     struct multipath *m = (struct multipath *) ti->private;
433    
434     - if (bio_barrier(bio))
435     - return -EOPNOTSUPP;
436     -
437     mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
438     dm_bio_record(&mpio->details, bio);
439    
440     diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
441     index 0821a2b..3955621 100644
442     --- a/drivers/md/dm-snap.c
443     +++ b/drivers/md/dm-snap.c
444     @@ -522,9 +522,12 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
445    
446     /* Metadata must only be loaded into one table at once */
447     r = s->store.read_metadata(&s->store);
448     - if (r) {
449     + if (r < 0) {
450     ti->error = "Failed to read snapshot metadata";
451     goto bad6;
452     + } else if (r > 0) {
453     + s->valid = 0;
454     + DMWARN("Snapshot is marked invalid.");
455     }
456    
457     bio_list_init(&s->queued_bios);
458     @@ -884,9 +887,6 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
459     if (!s->valid)
460     return -EIO;
461    
462     - if (unlikely(bio_barrier(bio)))
463     - return -EOPNOTSUPP;
464     -
465     /* FIXME: should only take write lock if we need
466     * to copy an exception */
467     down_write(&s->lock);
468     @@ -1157,9 +1157,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
469     struct dm_dev *dev = (struct dm_dev *) ti->private;
470     bio->bi_bdev = dev->bdev;
471    
472     - if (unlikely(bio_barrier(bio)))
473     - return -EOPNOTSUPP;
474     -
475     /* Only tell snapshots if this is a write */
476     return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
477     }
478     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
479     index 3668b17..d531f0c 100644
480     --- a/drivers/md/dm.c
481     +++ b/drivers/md/dm.c
482     @@ -802,6 +802,15 @@ static int dm_request(request_queue_t *q, struct bio *bio)
483     int rw = bio_data_dir(bio);
484     struct mapped_device *md = q->queuedata;
485    
486     + /*
487     + * There is no use in forwarding any barrier request since we can't
488     + * guarantee it is (or can be) handled by the targets correctly.
489     + */
490     + if (unlikely(bio_barrier(bio))) {
491     + bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
492     + return 0;
493     + }
494     +
495     down_read(&md->io_lock);
496    
497     disk_stat_inc(dm_disk(md), ios[rw]);
498     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
499     index 9eb66c1..e0029ea 100644
500     --- a/drivers/md/raid10.c
501     +++ b/drivers/md/raid10.c
502     @@ -917,6 +917,13 @@ static int make_request(request_queue_t *q, struct bio * bio)
503     bio_list_add(&bl, mbio);
504     }
505    
506     + if (unlikely(!atomic_read(&r10_bio->remaining))) {
507     + /* the array is dead */
508     + md_write_end(mddev);
509     + raid_end_bio_io(r10_bio);
510     + return 0;
511     + }
512     +
513     bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
514     spin_lock_irqsave(&conf->device_lock, flags);
515     bio_list_merge(&conf->pending_bio_list, &bl);
516     @@ -1558,7 +1565,6 @@ static void raid10d(mddev_t *mddev)
517     bio = r10_bio->devs[r10_bio->read_slot].bio;
518     r10_bio->devs[r10_bio->read_slot].bio =
519     mddev->ro ? IO_BLOCKED : NULL;
520     - bio_put(bio);
521     mirror = read_balance(conf, r10_bio);
522     if (mirror == -1) {
523     printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
524     @@ -1566,8 +1572,10 @@ static void raid10d(mddev_t *mddev)
525     bdevname(bio->bi_bdev,b),
526     (unsigned long long)r10_bio->sector);
527     raid_end_bio_io(r10_bio);
528     + bio_put(bio);
529     } else {
530     const int do_sync = bio_sync(r10_bio->master_bio);
531     + bio_put(bio);
532     rdev = conf->mirrors[mirror].rdev;
533     if (printk_ratelimit())
534     printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
535     diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
536     index b87d571..31807ba 100644
537     --- a/drivers/media/video/v4l2-common.c
538     +++ b/drivers/media/video/v4l2-common.c
539     @@ -1499,16 +1499,25 @@ int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qc
540     When no more controls are available 0 is returned. */
541     u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
542     {
543     - u32 ctrl_class;
544     + u32 ctrl_class = V4L2_CTRL_ID2CLASS(id);
545     const u32 *pctrl;
546    
547     - /* if no query is desired, then just return the control ID */
548     - if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0)
549     - return id;
550     if (ctrl_classes == NULL)
551     return 0;
552     +
553     + /* if no query is desired, then check if the ID is part of ctrl_classes */
554     + if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) {
555     + /* find class */
556     + while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class)
557     + ctrl_classes++;
558     + if (*ctrl_classes == NULL)
559     + return 0;
560     + pctrl = *ctrl_classes;
561     + /* find control ID */
562     + while (*pctrl && *pctrl != id) pctrl++;
563     + return *pctrl ? id : 0;
564     + }
565     id &= V4L2_CTRL_ID_MASK;
566     - ctrl_class = V4L2_CTRL_ID2CLASS(id);
567     id++; /* select next control */
568     /* find first class that matches (or is greater than) the class of
569     the ID */
570     diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
571     index a9b59c3..009941b 100644
572     --- a/drivers/media/video/wm8739.c
573     +++ b/drivers/media/video/wm8739.c
574     @@ -317,12 +317,14 @@ static int wm8739_probe(struct i2c_adapter *adapter)
575    
576     static int wm8739_detach(struct i2c_client *client)
577     {
578     + struct wm8739_state *state = i2c_get_clientdata(client);
579     int err;
580    
581     err = i2c_detach_client(client);
582     if (err)
583     return err;
584    
585     + kfree(state);
586     kfree(client);
587     return 0;
588     }
589     diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
590     index d81a88b..e555b3a 100644
591     --- a/drivers/media/video/wm8775.c
592     +++ b/drivers/media/video/wm8775.c
593     @@ -218,12 +218,14 @@ static int wm8775_probe(struct i2c_adapter *adapter)
594    
595     static int wm8775_detach(struct i2c_client *client)
596     {
597     + struct wm8775_state *state = i2c_get_clientdata(client);
598     int err;
599    
600     err = i2c_detach_client(client);
601     if (err) {
602     return err;
603     }
604     + kfree(state);
605     kfree(client);
606    
607     return 0;
608     diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
609     index 952a6bd..c383dc3 100644
610     --- a/drivers/net/forcedeth.c
611     +++ b/drivers/net/forcedeth.c
612     @@ -553,6 +553,7 @@ union ring_type {
613     /* PHY defines */
614     #define PHY_OUI_MARVELL 0x5043
615     #define PHY_OUI_CICADA 0x03f1
616     +#define PHY_OUI_VITESSE 0x01c1
617     #define PHYID1_OUI_MASK 0x03ff
618     #define PHYID1_OUI_SHFT 6
619     #define PHYID2_OUI_MASK 0xfc00
620     @@ -560,12 +561,29 @@ union ring_type {
621     #define PHYID2_MODEL_MASK 0x03f0
622     #define PHY_MODEL_MARVELL_E3016 0x220
623     #define PHY_MARVELL_E3016_INITMASK 0x0300
624     -#define PHY_INIT1 0x0f000
625     -#define PHY_INIT2 0x0e00
626     -#define PHY_INIT3 0x01000
627     -#define PHY_INIT4 0x0200
628     -#define PHY_INIT5 0x0004
629     -#define PHY_INIT6 0x02000
630     +#define PHY_CICADA_INIT1 0x0f000
631     +#define PHY_CICADA_INIT2 0x0e00
632     +#define PHY_CICADA_INIT3 0x01000
633     +#define PHY_CICADA_INIT4 0x0200
634     +#define PHY_CICADA_INIT5 0x0004
635     +#define PHY_CICADA_INIT6 0x02000
636     +#define PHY_VITESSE_INIT_REG1 0x1f
637     +#define PHY_VITESSE_INIT_REG2 0x10
638     +#define PHY_VITESSE_INIT_REG3 0x11
639     +#define PHY_VITESSE_INIT_REG4 0x12
640     +#define PHY_VITESSE_INIT_MSK1 0xc
641     +#define PHY_VITESSE_INIT_MSK2 0x0180
642     +#define PHY_VITESSE_INIT1 0x52b5
643     +#define PHY_VITESSE_INIT2 0xaf8a
644     +#define PHY_VITESSE_INIT3 0x8
645     +#define PHY_VITESSE_INIT4 0x8f8a
646     +#define PHY_VITESSE_INIT5 0xaf86
647     +#define PHY_VITESSE_INIT6 0x8f86
648     +#define PHY_VITESSE_INIT7 0xaf82
649     +#define PHY_VITESSE_INIT8 0x0100
650     +#define PHY_VITESSE_INIT9 0x8f82
651     +#define PHY_VITESSE_INIT10 0x0
652     +
653     #define PHY_GIGABIT 0x0100
654    
655     #define PHY_TIMEOUT 0x1
656     @@ -1133,14 +1151,14 @@ static int phy_init(struct net_device *dev)
657     /* phy vendor specific configuration */
658     if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
659     phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
660     - phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
661     - phy_reserved |= (PHY_INIT3 | PHY_INIT4);
662     + phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
663     + phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
664     if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
665     printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
666     return PHY_ERROR;
667     }
668     phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
669     - phy_reserved |= PHY_INIT5;
670     + phy_reserved |= PHY_CICADA_INIT5;
671     if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
672     printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
673     return PHY_ERROR;
674     @@ -1148,12 +1166,82 @@ static int phy_init(struct net_device *dev)
675     }
676     if (np->phy_oui == PHY_OUI_CICADA) {
677     phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
678     - phy_reserved |= PHY_INIT6;
679     + phy_reserved |= PHY_CICADA_INIT6;
680     if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
681     printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
682     return PHY_ERROR;
683     }
684     }
685     + if (np->phy_oui == PHY_OUI_VITESSE) {
686     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
687     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
688     + return PHY_ERROR;
689     + }
690     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
691     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
692     + return PHY_ERROR;
693     + }
694     + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
695     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
696     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
697     + return PHY_ERROR;
698     + }
699     + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
700     + phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
701     + phy_reserved |= PHY_VITESSE_INIT3;
702     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
703     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
704     + return PHY_ERROR;
705     + }
706     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
707     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
708     + return PHY_ERROR;
709     + }
710     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
711     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
712     + return PHY_ERROR;
713     + }
714     + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
715     + phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
716     + phy_reserved |= PHY_VITESSE_INIT3;
717     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
718     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
719     + return PHY_ERROR;
720     + }
721     + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
722     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
723     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
724     + return PHY_ERROR;
725     + }
726     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
727     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
728     + return PHY_ERROR;
729     + }
730     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
731     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
732     + return PHY_ERROR;
733     + }
734     + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
735     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
736     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
737     + return PHY_ERROR;
738     + }
739     + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
740     + phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
741     + phy_reserved |= PHY_VITESSE_INIT8;
742     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
743     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
744     + return PHY_ERROR;
745     + }
746     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
747     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
748     + return PHY_ERROR;
749     + }
750     + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
751     + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
752     + return PHY_ERROR;
753     + }
754     + }
755     /* some phys clear out pause advertisment on reset, set it back */
756     mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
757    
758     diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
759     index 606a467..2785d83 100644
760     --- a/drivers/pcmcia/cs.c
761     +++ b/drivers/pcmcia/cs.c
762     @@ -410,6 +410,9 @@ static void socket_shutdown(struct pcmcia_socket *s)
763     #endif
764     s->functions = 0;
765    
766     + /* give socket some time to power down */
767     + msleep(100);
768     +
769     s->ops->get_status(s, &status);
770     if (status & SS_POWERON) {
771     printk(KERN_ERR "PCMCIA: socket %p: *** DANGER *** unable to remove socket power\n", s);
772     diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
773     index d2cf875..3d5cff7 100644
774     --- a/drivers/scsi/aacraid/linit.c
775     +++ b/drivers/scsi/aacraid/linit.c
776     @@ -539,6 +539,8 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
777     static int aac_cfg_ioctl(struct inode *inode, struct file *file,
778     unsigned int cmd, unsigned long arg)
779     {
780     + if (!capable(CAP_SYS_ADMIN))
781     + return -EPERM;
782     return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
783     }
784    
785     @@ -592,6 +594,8 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
786    
787     static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
788     {
789     + if (!capable(CAP_SYS_ADMIN))
790     + return -EPERM;
791     return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
792     }
793     #endif
794     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
795     index 8ddeed3..d5890d5 100644
796     --- a/drivers/usb/core/hub.c
797     +++ b/drivers/usb/core/hub.c
798     @@ -1356,6 +1356,10 @@ static int __usb_new_device(void *void_data)
799     }
800     #endif
801    
802     + /* Increment the parent's count of unsuspended children */
803     + if (udev->parent)
804     + usb_autoresume_device(udev->parent);
805     +
806     /* Register the device. The device driver is responsible
807     * for adding the device files to usbfs and sysfs and for
808     * configuring the device.
809     @@ -1363,13 +1367,11 @@ static int __usb_new_device(void *void_data)
810     err = device_add (&udev->dev);
811     if (err) {
812     dev_err(&udev->dev, "can't device_add, error %d\n", err);
813     + if (udev->parent)
814     + usb_autosuspend_device(udev->parent);
815     goto fail;
816     }
817    
818     - /* Increment the parent's count of unsuspended children */
819     - if (udev->parent)
820     - usb_autoresume_device(udev->parent);
821     -
822     exit:
823     module_put(THIS_MODULE);
824     return err;
825     diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
826     index ab21495..083f603 100644
827     --- a/drivers/video/macmodes.c
828     +++ b/drivers/video/macmodes.c
829     @@ -369,9 +369,8 @@ EXPORT_SYMBOL(mac_map_monitor_sense);
830     *
831     */
832    
833     -int __devinit mac_find_mode(struct fb_var_screeninfo *var,
834     - struct fb_info *info, const char *mode_option,
835     - unsigned int default_bpp)
836     +int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
837     + const char *mode_option, unsigned int default_bpp)
838     {
839     const struct fb_videomode *db = NULL;
840     unsigned int dbsize = 0;
841     diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
842     index babeb81..b86ba08 100644
843     --- a/drivers/video/macmodes.h
844     +++ b/drivers/video/macmodes.h
845     @@ -55,10 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode,
846     extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
847     int *cmode);
848     extern int mac_map_monitor_sense(int sense);
849     -extern int __devinit mac_find_mode(struct fb_var_screeninfo *var,
850     - struct fb_info *info,
851     - const char *mode_option,
852     - unsigned int default_bpp);
853     +extern int mac_find_mode(struct fb_var_screeninfo *var,
854     + struct fb_info *info,
855     + const char *mode_option,
856     + unsigned int default_bpp);
857    
858    
859     /*
860     diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
861     index 69f3b26..6074ae9 100644
862     --- a/drivers/video/stifb.c
863     +++ b/drivers/video/stifb.c
864     @@ -1101,13 +1101,18 @@ stifb_init_fb(struct sti_struct *sti, int bpp_pref)
865     /* only supported cards are allowed */
866     switch (fb->id) {
867     case CRT_ID_VISUALIZE_EG:
868     - /* look for a double buffering device like e.g. the
869     - "INTERNAL_EG_DX1024" in the RDI precisionbook laptop
870     - which won't work. The same device in non-double
871     - buffering mode returns "INTERNAL_EG_X1024". */
872     - if (strstr(sti->outptr.dev_name, "EG_DX")) {
873     - printk(KERN_WARNING
874     - "stifb: ignoring '%s'. Disable double buffering in IPL menu.\n",
875     + /* Visualize cards can run either in "double buffer" or
876     + "standard" mode. Depending on the mode, the card reports
877     + a different device name, e.g. "INTERNAL_EG_DX1024" in double
878     + buffer mode and "INTERNAL_EG_X1024" in standard mode.
879     + Since this driver only supports standard mode, we check
880     + if the device name contains the string "DX" and tell the
881     + user how to reconfigure the card. */
882     + if (strstr(sti->outptr.dev_name, "DX")) {
883     + printk(KERN_WARNING "WARNING: stifb framebuffer driver does not "
884     + "support '%s' in double-buffer mode.\n"
885     + KERN_WARNING "WARNING: Please disable the double-buffer mode "
886     + "in IPL menu (the PARISC-BIOS).\n",
887     sti->outptr.dev_name);
888     goto out_err0;
889     }
890     diff --git a/fs/9p/conv.c b/fs/9p/conv.c
891     index a3ed571..923d75c 100644
892     --- a/fs/9p/conv.c
893     +++ b/fs/9p/conv.c
894     @@ -742,6 +742,7 @@ struct v9fs_fcall *v9fs_create_twrite(u32 fid, u64 offset, u32 count,
895     if (err) {
896     kfree(fc);
897     fc = ERR_PTR(err);
898     + goto error;
899     }
900    
901     if (buf_check_overflow(bufp)) {
902     diff --git a/fs/direct-io.c b/fs/direct-io.c
903     index d9d0833..0286993 100644
904     --- a/fs/direct-io.c
905     +++ b/fs/direct-io.c
906     @@ -978,6 +978,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
907     dio->get_block = get_block;
908     dio->end_io = end_io;
909     dio->map_bh.b_private = NULL;
910     + dio->map_bh.b_state = 0;
911     dio->final_block_in_bio = -1;
912     dio->next_block_for_io = -1;
913    
914     diff --git a/fs/exec.c b/fs/exec.c
915     index 0f8573a..bd1ab3f 100644
916     --- a/fs/exec.c
917     +++ b/fs/exec.c
918     @@ -881,9 +881,12 @@ int flush_old_exec(struct linux_binprm * bprm)
919     */
920     current->mm->task_size = TASK_SIZE;
921    
922     - if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
923     - file_permission(bprm->file, MAY_READ) ||
924     - (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
925     + if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
926     + suid_keys(current);
927     + current->mm->dumpable = suid_dumpable;
928     + current->pdeath_signal = 0;
929     + } else if (file_permission(bprm->file, MAY_READ) ||
930     + (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
931     suid_keys(current);
932     current->mm->dumpable = suid_dumpable;
933     }
934     @@ -974,8 +977,10 @@ void compute_creds(struct linux_binprm *bprm)
935     {
936     int unsafe;
937    
938     - if (bprm->e_uid != current->uid)
939     + if (bprm->e_uid != current->uid) {
940     suid_keys(current);
941     + current->pdeath_signal = 0;
942     + }
943     exec_keys(current);
944    
945     task_lock(current);
946     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
947     index dc2724f..14b0b49 100644
948     --- a/fs/ext4/extents.c
949     +++ b/fs/ext4/extents.c
950     @@ -1397,7 +1397,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
951    
952     static void
953     ext4_ext_put_in_cache(struct inode *inode, __u32 block,
954     - __u32 len, __u32 start, int type)
955     + __u32 len, ext4_fsblk_t start, int type)
956     {
957     struct ext4_ext_cache *cex;
958     BUG_ON(len == 0);
959     diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
960     index be4648b..84436cb 100644
961     --- a/fs/jbd/commit.c
962     +++ b/fs/jbd/commit.c
963     @@ -888,7 +888,8 @@ restart_loop:
964     journal->j_committing_transaction = NULL;
965     spin_unlock(&journal->j_state_lock);
966    
967     - if (commit_transaction->t_checkpoint_list == NULL) {
968     + if (commit_transaction->t_checkpoint_list == NULL &&
969     + commit_transaction->t_checkpoint_io_list == NULL) {
970     __journal_drop_transaction(journal, commit_transaction);
971     } else {
972     if (journal->j_checkpoint_transactions == NULL) {
973     diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
974     index 6bd8005..7d29549 100644
975     --- a/fs/jbd2/commit.c
976     +++ b/fs/jbd2/commit.c
977     @@ -897,7 +897,8 @@ restart_loop:
978     journal->j_committing_transaction = NULL;
979     spin_unlock(&journal->j_state_lock);
980    
981     - if (commit_transaction->t_checkpoint_list == NULL) {
982     + if (commit_transaction->t_checkpoint_list == NULL &&
983     + commit_transaction->t_checkpoint_io_list == NULL) {
984     __jbd2_journal_drop_transaction(journal, commit_transaction);
985     } else {
986     if (journal->j_checkpoint_transactions == NULL) {
987     diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
988     index 8283236..d8b773a 100644
989     --- a/fs/nfsd/vfs.c
990     +++ b/fs/nfsd/vfs.c
991     @@ -1887,7 +1887,7 @@ nfsd_racache_init(int cache_size)
992     raparm_hash[i].pb_head = NULL;
993     spin_lock_init(&raparm_hash[i].pb_lock);
994     }
995     - nperbucket = cache_size >> RAPARM_HASH_BITS;
996     + nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
997     for (i = 0; i < cache_size - 1; i++) {
998     if (i % nperbucket == 0)
999     raparm_hash[j++].pb_head = raparml + i;
1000     diff --git a/fs/splice.c b/fs/splice.c
1001     index 2fca6eb..b657217 100644
1002     --- a/fs/splice.c
1003     +++ b/fs/splice.c
1004     @@ -614,7 +614,7 @@ find_page:
1005     ret = add_to_page_cache_lru(page, mapping, index,
1006     GFP_KERNEL);
1007     if (unlikely(ret))
1008     - goto out;
1009     + goto out_release;
1010     }
1011    
1012     /*
1013     @@ -695,8 +695,9 @@ find_page:
1014     goto find_page;
1015     }
1016     out:
1017     - page_cache_release(page);
1018     unlock_page(page);
1019     +out_release:
1020     + page_cache_release(page);
1021     out_ret:
1022     return ret;
1023     }
1024     diff --git a/include/linux/Kbuild b/include/linux/Kbuild
1025     index 157db77..199fd71 100644
1026     --- a/include/linux/Kbuild
1027     +++ b/include/linux/Kbuild
1028     @@ -134,6 +134,7 @@ header-y += radeonfb.h
1029     header-y += raw.h
1030     header-y += resource.h
1031     header-y += rose.h
1032     +header-y += serial_reg.h
1033     header-y += smbno.h
1034     header-y += snmp.h
1035     header-y += sockios.h
1036     diff --git a/include/linux/netfilter_ipv4/ipt_iprange.h b/include/linux/netfilter_ipv4/ipt_iprange.h
1037     index 34ab0fb..a92fefc 100644
1038     --- a/include/linux/netfilter_ipv4/ipt_iprange.h
1039     +++ b/include/linux/netfilter_ipv4/ipt_iprange.h
1040     @@ -1,6 +1,8 @@
1041     #ifndef _IPT_IPRANGE_H
1042     #define _IPT_IPRANGE_H
1043    
1044     +#include <linux/types.h>
1045     +
1046     #define IPRANGE_SRC 0x01 /* Match source IP address */
1047     #define IPRANGE_DST 0x02 /* Match destination IP address */
1048     #define IPRANGE_SRC_INV 0x10 /* Negate the condition */
1049     diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
1050     index 3c563f0..25aa575 100644
1051     --- a/include/net/bluetooth/rfcomm.h
1052     +++ b/include/net/bluetooth/rfcomm.h
1053     @@ -323,6 +323,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc
1054     #define RFCOMM_RELEASE_ONHUP 1
1055     #define RFCOMM_HANGUP_NOW 2
1056     #define RFCOMM_TTY_ATTACHED 3
1057     +#define RFCOMM_TTY_RELEASED 4
1058    
1059     struct rfcomm_dev_req {
1060     s16 dev_id;
1061     diff --git a/include/net/xfrm.h b/include/net/xfrm.h
1062     index e476541..4fc943b 100644
1063     --- a/include/net/xfrm.h
1064     +++ b/include/net/xfrm.h
1065     @@ -561,7 +561,6 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct
1066     struct xfrm_dst
1067     {
1068     union {
1069     - struct xfrm_dst *next;
1070     struct dst_entry dst;
1071     struct rtable rt;
1072     struct rt6_info rt6;
1073     diff --git a/ipc/shm.c b/ipc/shm.c
1074     index f8e10a2..10b7a2c 100644
1075     --- a/ipc/shm.c
1076     +++ b/ipc/shm.c
1077     @@ -652,7 +652,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
1078     struct user_struct * user = current->user;
1079     if (!is_file_hugepages(shp->shm_file)) {
1080     err = shmem_lock(shp->shm_file, 1, user);
1081     - if (!err) {
1082     + if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
1083     shp->shm_perm.mode |= SHM_LOCKED;
1084     shp->mlock_user = user;
1085     }
1086     diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
1087     index b554b40..d787db5 100644
1088     --- a/kernel/lockdep_proc.c
1089     +++ b/kernel/lockdep_proc.c
1090     @@ -323,7 +323,7 @@ static const struct file_operations proc_lockdep_stats_operations = {
1091     .open = lockdep_stats_open,
1092     .read = seq_read,
1093     .llseek = seq_lseek,
1094     - .release = seq_release,
1095     + .release = single_release,
1096     };
1097    
1098     static int __init lockdep_proc_init(void)
1099     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1100     index 36db012..7c07eb2 100644
1101     --- a/mm/hugetlb.c
1102     +++ b/mm/hugetlb.c
1103     @@ -101,13 +101,20 @@ static void free_huge_page(struct page *page)
1104    
1105     static int alloc_fresh_huge_page(void)
1106     {
1107     - static int nid = 0;
1108     + static int prev_nid;
1109     struct page *page;
1110     - page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
1111     - HUGETLB_PAGE_ORDER);
1112     - nid = next_node(nid, node_online_map);
1113     + static DEFINE_SPINLOCK(nid_lock);
1114     + int nid;
1115     +
1116     + spin_lock(&nid_lock);
1117     + nid = next_node(prev_nid, node_online_map);
1118     if (nid == MAX_NUMNODES)
1119     nid = first_node(node_online_map);
1120     + prev_nid = nid;
1121     + spin_unlock(&nid_lock);
1122     +
1123     + page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
1124     + HUGETLB_PAGE_ORDER);
1125     if (page) {
1126     set_compound_page_dtor(page, free_huge_page);
1127     spin_lock(&hugetlb_lock);
1128     diff --git a/mm/mlock.c b/mm/mlock.c
1129     index 3446b7e..ef8fc94 100644
1130     --- a/mm/mlock.c
1131     +++ b/mm/mlock.c
1132     @@ -233,9 +233,12 @@ int user_shm_lock(size_t size, struct user_struct *user)
1133    
1134     locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1135     lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1136     + if (lock_limit == RLIM_INFINITY)
1137     + allowed = 1;
1138     lock_limit >>= PAGE_SHIFT;
1139     spin_lock(&shmlock_user_lock);
1140     - if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
1141     + if (!allowed &&
1142     + locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
1143     goto out;
1144     get_uid(user);
1145     user->locked_shm += locked;
1146     diff --git a/mm/readahead.c b/mm/readahead.c
1147     index 0f539e8..f26cdea 100644
1148     --- a/mm/readahead.c
1149     +++ b/mm/readahead.c
1150     @@ -21,8 +21,16 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1151     }
1152     EXPORT_SYMBOL(default_unplug_io_fn);
1153    
1154     +/*
1155     + * Convienent macros for min/max read-ahead pages.
1156     + * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up.
1157     + * The latter is necessary for systems with large page size(i.e. 64k).
1158     + */
1159     +#define MAX_RA_PAGES (VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE)
1160     +#define MIN_RA_PAGES DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE)
1161     +
1162     struct backing_dev_info default_backing_dev_info = {
1163     - .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
1164     + .ra_pages = MAX_RA_PAGES,
1165     .state = 0,
1166     .capabilities = BDI_CAP_MAP_COPY,
1167     .unplug_io_fn = default_unplug_io_fn,
1168     @@ -51,7 +59,7 @@ static inline unsigned long get_max_readahead(struct file_ra_state *ra)
1169    
1170     static inline unsigned long get_min_readahead(struct file_ra_state *ra)
1171     {
1172     - return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
1173     + return MIN_RA_PAGES;
1174     }
1175    
1176     static inline void reset_ahead_window(struct file_ra_state *ra)
1177     diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
1178     index eb2b524..26e8c02 100644
1179     --- a/net/bluetooth/rfcomm/tty.c
1180     +++ b/net/bluetooth/rfcomm/tty.c
1181     @@ -93,6 +93,10 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
1182    
1183     BT_DBG("dev %p dlc %p", dev, dlc);
1184    
1185     + write_lock_bh(&rfcomm_dev_lock);
1186     + list_del_init(&dev->list);
1187     + write_unlock_bh(&rfcomm_dev_lock);
1188     +
1189     rfcomm_dlc_lock(dlc);
1190     /* Detach DLC if it's owned by this dev */
1191     if (dlc->owner == dev)
1192     @@ -154,8 +158,13 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
1193     read_lock(&rfcomm_dev_lock);
1194    
1195     dev = __rfcomm_dev_get(id);
1196     - if (dev)
1197     - rfcomm_dev_hold(dev);
1198     +
1199     + if (dev) {
1200     + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
1201     + dev = NULL;
1202     + else
1203     + rfcomm_dev_hold(dev);
1204     + }
1205    
1206     read_unlock(&rfcomm_dev_lock);
1207    
1208     @@ -263,6 +272,12 @@ out:
1209    
1210     tty_register_device(rfcomm_tty_driver, dev->id, rfcomm_get_device(dev));
1211    
1212     + if (IS_ERR(dev->tty_dev)) {
1213     + list_del(&dev->list);
1214     + kfree(dev);
1215     + return PTR_ERR(dev->tty_dev);
1216     + }
1217     +
1218     return dev->id;
1219     }
1220    
1221     @@ -270,10 +285,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
1222     {
1223     BT_DBG("dev %p", dev);
1224    
1225     - write_lock_bh(&rfcomm_dev_lock);
1226     - list_del_init(&dev->list);
1227     - write_unlock_bh(&rfcomm_dev_lock);
1228     -
1229     + set_bit(RFCOMM_TTY_RELEASED, &dev->flags);
1230     rfcomm_dev_put(dev);
1231     }
1232    
1233     @@ -327,7 +339,7 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)
1234     if (copy_from_user(&req, arg, sizeof(req)))
1235     return -EFAULT;
1236    
1237     - BT_DBG("sk %p dev_id %id flags 0x%x", sk, req.dev_id, req.flags);
1238     + BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags);
1239    
1240     if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN))
1241     return -EPERM;
1242     @@ -368,7 +380,7 @@ static int rfcomm_release_dev(void __user *arg)
1243     if (copy_from_user(&req, arg, sizeof(req)))
1244     return -EFAULT;
1245    
1246     - BT_DBG("dev_id %id flags 0x%x", req.dev_id, req.flags);
1247     + BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags);
1248    
1249     if (!(dev = rfcomm_dev_get(req.dev_id)))
1250     return -ENODEV;
1251     @@ -381,6 +393,10 @@ static int rfcomm_release_dev(void __user *arg)
1252     if (req.flags & (1 << RFCOMM_HANGUP_NOW))
1253     rfcomm_dlc_close(dev->dlc, 0);
1254    
1255     + /* Shut down TTY synchronously before freeing rfcomm_dev */
1256     + if (dev->tty)
1257     + tty_vhangup(dev->tty);
1258     +
1259     rfcomm_dev_del(dev);
1260     rfcomm_dev_put(dev);
1261     return 0;
1262     @@ -413,6 +429,8 @@ static int rfcomm_get_dev_list(void __user *arg)
1263    
1264     list_for_each(p, &rfcomm_dev_list) {
1265     struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
1266     + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
1267     + continue;
1268     (di + n)->id = dev->id;
1269     (di + n)->flags = dev->flags;
1270     (di + n)->state = dev->dlc->state;
1271     diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
1272     index 3cad026..acc1ee0 100644
1273     --- a/net/core/gen_estimator.c
1274     +++ b/net/core/gen_estimator.c
1275     @@ -79,27 +79,27 @@
1276    
1277     struct gen_estimator
1278     {
1279     - struct gen_estimator *next;
1280     + struct list_head list;
1281     struct gnet_stats_basic *bstats;
1282     struct gnet_stats_rate_est *rate_est;
1283     spinlock_t *stats_lock;
1284     - unsigned interval;
1285     int ewma_log;
1286     u64 last_bytes;
1287     u32 last_packets;
1288     u32 avpps;
1289     u32 avbps;
1290     + struct rcu_head e_rcu;
1291     };
1292    
1293     struct gen_estimator_head
1294     {
1295     struct timer_list timer;
1296     - struct gen_estimator *list;
1297     + struct list_head list;
1298     };
1299    
1300     static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
1301    
1302     -/* Estimator array lock */
1303     +/* Protects against NULL dereference */
1304     static DEFINE_RWLOCK(est_lock);
1305    
1306     static void est_timer(unsigned long arg)
1307     @@ -107,13 +107,17 @@ static void est_timer(unsigned long arg)
1308     int idx = (int)arg;
1309     struct gen_estimator *e;
1310    
1311     - read_lock(&est_lock);
1312     - for (e = elist[idx].list; e; e = e->next) {
1313     + rcu_read_lock();
1314     + list_for_each_entry_rcu(e, &elist[idx].list, list) {
1315     u64 nbytes;
1316     u32 npackets;
1317     u32 rate;
1318    
1319     spin_lock(e->stats_lock);
1320     + read_lock(&est_lock);
1321     + if (e->bstats == NULL)
1322     + goto skip;
1323     +
1324     nbytes = e->bstats->bytes;
1325     npackets = e->bstats->packets;
1326     rate = (nbytes - e->last_bytes)<<(7 - idx);
1327     @@ -125,11 +129,14 @@ static void est_timer(unsigned long arg)
1328     e->last_packets = npackets;
1329     e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
1330     e->rate_est->pps = (e->avpps+0x1FF)>>10;
1331     +skip:
1332     + read_unlock(&est_lock);
1333     spin_unlock(e->stats_lock);
1334     }
1335    
1336     - mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
1337     - read_unlock(&est_lock);
1338     + if (!list_empty(&elist[idx].list))
1339     + mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
1340     + rcu_read_unlock();
1341     }
1342    
1343     /**
1344     @@ -146,12 +153,17 @@ static void est_timer(unsigned long arg)
1345     * &rate_est with the statistics lock grabed during this period.
1346     *
1347     * Returns 0 on success or a negative error code.
1348     + *
1349     + * NOTE: Called under rtnl_mutex
1350     */
1351     int gen_new_estimator(struct gnet_stats_basic *bstats,
1352     - struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt)
1353     + struct gnet_stats_rate_est *rate_est,
1354     + spinlock_t *stats_lock,
1355     + struct rtattr *opt)
1356     {
1357     struct gen_estimator *est;
1358     struct gnet_estimator *parm = RTA_DATA(opt);
1359     + int idx;
1360    
1361     if (RTA_PAYLOAD(opt) < sizeof(*parm))
1362     return -EINVAL;
1363     @@ -163,7 +175,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
1364     if (est == NULL)
1365     return -ENOBUFS;
1366    
1367     - est->interval = parm->interval + 2;
1368     + idx = parm->interval + 2;
1369     est->bstats = bstats;
1370     est->rate_est = rate_est;
1371     est->stats_lock = stats_lock;
1372     @@ -173,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
1373     est->last_packets = bstats->packets;
1374     est->avpps = rate_est->pps<<10;
1375    
1376     - est->next = elist[est->interval].list;
1377     - if (est->next == NULL) {
1378     - init_timer(&elist[est->interval].timer);
1379     - elist[est->interval].timer.data = est->interval;
1380     - elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
1381     - elist[est->interval].timer.function = est_timer;
1382     - add_timer(&elist[est->interval].timer);
1383     + if (!elist[idx].timer.function) {
1384     + INIT_LIST_HEAD(&elist[idx].list);
1385     + setup_timer(&elist[idx].timer, est_timer, idx);
1386     }
1387     - write_lock_bh(&est_lock);
1388     - elist[est->interval].list = est;
1389     - write_unlock_bh(&est_lock);
1390     +
1391     + if (list_empty(&elist[idx].list))
1392     + mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
1393     +
1394     + list_add_rcu(&est->list, &elist[idx].list);
1395     return 0;
1396     }
1397    
1398     +static void __gen_kill_estimator(struct rcu_head *head)
1399     +{
1400     + struct gen_estimator *e = container_of(head,
1401     + struct gen_estimator, e_rcu);
1402     + kfree(e);
1403     +}
1404     +
1405     /**
1406     * gen_kill_estimator - remove a rate estimator
1407     * @bstats: basic statistics
1408     @@ -194,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
1409     *
1410     * Removes the rate estimator specified by &bstats and &rate_est
1411     * and deletes the timer.
1412     + *
1413     + * NOTE: Called under rtnl_mutex
1414     */
1415     void gen_kill_estimator(struct gnet_stats_basic *bstats,
1416     struct gnet_stats_rate_est *rate_est)
1417     {
1418     int idx;
1419     - struct gen_estimator *est, **pest;
1420     + struct gen_estimator *e, *n;
1421    
1422     for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
1423     - int killed = 0;
1424     - pest = &elist[idx].list;
1425     - while ((est=*pest) != NULL) {
1426     - if (est->rate_est != rate_est || est->bstats != bstats) {
1427     - pest = &est->next;
1428     +
1429     + /* Skip non initialized indexes */
1430     + if (!elist[idx].timer.function)
1431     + continue;
1432     +
1433     + list_for_each_entry_safe(e, n, &elist[idx].list, list) {
1434     + if (e->rate_est != rate_est || e->bstats != bstats)
1435     continue;
1436     - }
1437    
1438     write_lock_bh(&est_lock);
1439     - *pest = est->next;
1440     + e->bstats = NULL;
1441     write_unlock_bh(&est_lock);
1442    
1443     - kfree(est);
1444     - killed++;
1445     + list_del_rcu(&e->list);
1446     + call_rcu(&e->e_rcu, __gen_kill_estimator);
1447     }
1448     - if (killed && elist[idx].list == NULL)
1449     - del_timer(&elist[idx].timer);
1450     }
1451     }
1452    
1453     diff --git a/net/core/netpoll.c b/net/core/netpoll.c
1454     index 522e441..3431d48 100644
1455     --- a/net/core/netpoll.c
1456     +++ b/net/core/netpoll.c
1457     @@ -776,7 +776,6 @@ void netpoll_cleanup(struct netpoll *np)
1458     spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1459     }
1460    
1461     - np->dev->npinfo = NULL;
1462     if (atomic_dec_and_test(&npinfo->refcnt)) {
1463     skb_queue_purge(&npinfo->arp_tx);
1464     skb_queue_purge(&npinfo->txq);
1465     @@ -784,6 +783,7 @@ void netpoll_cleanup(struct netpoll *np)
1466     flush_scheduled_work();
1467    
1468     kfree(npinfo);
1469     + np->dev->npinfo = NULL;
1470     }
1471     }
1472    
1473     diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
1474     index a824852..dadb65c 100644
1475     --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
1476     +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
1477     @@ -271,8 +271,11 @@ ieee80211softmac_assoc_work(struct work_struct *work)
1478     */
1479     dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n");
1480     ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
1481     - if (ieee80211softmac_start_scan(mac))
1482     + if (ieee80211softmac_start_scan(mac)) {
1483     dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
1484     + mac->associnfo.associating = 0;
1485     + mac->associnfo.associated = 0;
1486     + }
1487     goto out;
1488     } else {
1489     mac->associnfo.associating = 0;
1490     diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
1491     index fb58e03..c3c39ed 100644
1492     --- a/net/ieee80211/softmac/ieee80211softmac_wx.c
1493     +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
1494     @@ -74,8 +74,8 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
1495     struct ieee80211softmac_auth_queue_item *authptr;
1496     int length = 0;
1497    
1498     +check_assoc_again:
1499     mutex_lock(&sm->associnfo.mutex);
1500     -
1501     /* Check if we're already associating to this or another network
1502     * If it's another network, cancel and start over with our new network
1503     * If it's our network, ignore the change, we're already doing it!
1504     @@ -98,13 +98,18 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
1505     cancel_delayed_work(&authptr->work);
1506     sm->associnfo.bssvalid = 0;
1507     sm->associnfo.bssfixed = 0;
1508     - flush_scheduled_work();
1509     sm->associnfo.associating = 0;
1510     sm->associnfo.associated = 0;
1511     + /* We must unlock to avoid deadlocks with the assoc workqueue
1512     + * on the associnfo.mutex */
1513     + mutex_unlock(&sm->associnfo.mutex);
1514     + flush_scheduled_work();
1515     + /* Avoid race! Check assoc status again. Maybe someone started an
1516     + * association while we flushed. */
1517     + goto check_assoc_again;
1518     }
1519     }
1520    
1521     -
1522     sm->associnfo.static_essid = 0;
1523     sm->associnfo.assoc_wait = 0;
1524    
1525     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1526     index e383ac8..0d21d96 100644
1527     --- a/net/ipv6/addrconf.c
1528     +++ b/net/ipv6/addrconf.c
1529     @@ -2451,6 +2451,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
1530     write_unlock_bh(&idev->lock);
1531    
1532     __ipv6_ifa_notify(RTM_DELADDR, ifa);
1533     + atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
1534     in6_ifa_put(ifa);
1535    
1536     write_lock_bh(&idev->lock);
1537     diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
1538     index fe0c895..7cf5248 100644
1539     --- a/net/ipv6/anycast.c
1540     +++ b/net/ipv6/anycast.c
1541     @@ -67,6 +67,7 @@ ip6_onlink(struct in6_addr *addr, struct net_device *dev)
1542     break;
1543     }
1544     read_unlock_bh(&idev->lock);
1545     + in6_dev_put(idev);
1546     }
1547     rcu_read_unlock();
1548     return onlink;
1549     diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
1550     index 3dcc4b7..4437a70 100644
1551     --- a/net/ipv6/icmp.c
1552     +++ b/net/ipv6/icmp.c
1553     @@ -600,7 +600,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
1554    
1555     read_lock(&raw_v6_lock);
1556     if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
1557     - while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
1558     + while ((sk = __raw_v6_lookup(sk, nexthdr, saddr, daddr,
1559     IP6CB(skb)->iif))) {
1560     rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
1561     sk = sk_next(sk);
1562     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1563     index a2d41ba..03f53f5 100644
1564     --- a/net/ipv6/tcp_ipv6.c
1565     +++ b/net/ipv6/tcp_ipv6.c
1566     @@ -637,6 +637,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
1567     if (tp->md5sig_info->entries6 == 0) {
1568     kfree(tp->md5sig_info->keys6);
1569     tp->md5sig_info->keys6 = NULL;
1570     + tp->md5sig_info->alloced6 = 0;
1571    
1572     tcp_free_md5sig_pool();
1573    
1574     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1575     index ef36be0..c00c73c 100644
1576     --- a/net/sctp/ipv6.c
1577     +++ b/net/sctp/ipv6.c
1578     @@ -874,6 +874,10 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
1579     dev = dev_get_by_index(addr->v6.sin6_scope_id);
1580     if (!dev)
1581     return 0;
1582     + if (!ipv6_chk_addr(&addr->v6.sin6_addr, dev, 0)) {
1583     + dev_put(dev);
1584     + return 0;
1585     + }
1586     dev_put(dev);
1587     }
1588     af = opt->pf->af;
1589     diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
1590     index 066c64a..6179767 100644
1591     --- a/net/sunrpc/auth_gss/svcauth_gss.c
1592     +++ b/net/sunrpc/auth_gss/svcauth_gss.c
1593     @@ -760,11 +760,12 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
1594     new->h.flavour = &svcauthops_gss;
1595     new->pseudoflavor = pseudoflavor;
1596    
1597     + stat = 0;
1598     test = auth_domain_lookup(name, &new->h);
1599     - if (test != &new->h) { /* XXX Duplicate registration? */
1600     - auth_domain_put(&new->h);
1601     - /* dangling ref-count... */
1602     - goto out;
1603     + if (test != &new->h) { /* Duplicate registration */
1604     + auth_domain_put(test);
1605     + kfree(new->h.name);
1606     + goto out_free_dom;
1607     }
1608     return 0;
1609    
1610     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
1611     index 9704e05..c208a8a 100644
1612     --- a/net/xfrm/xfrm_policy.c
1613     +++ b/net/xfrm/xfrm_policy.c
1614     @@ -1971,7 +1971,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
1615     if (last == first)
1616     break;
1617    
1618     - last = last->u.next;
1619     + last = (struct xfrm_dst *)last->u.dst.next;
1620     last->child_mtu_cached = mtu;
1621     }
1622