Magellan Linux

Contents of /trunk/kernel26-magellan/patches-2.6.24-r4/0103-2.6.24.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 530 - (show annotations) (download)
Tue Mar 25 11:20:51 2008 UTC (16 years, 1 month ago) by niro
File size: 116124 byte(s)
2.6.24-magellan-r4:
- updated to linux-2.6.24.4
- use modular ide-framework to fix udma issues with nforce boards

1 diff --git a/arch/arm/mach-pxa/clock.c b/arch/arm/mach-pxa/clock.c
2 index 83ef5ec..df5ae27 100644
3 --- a/arch/arm/mach-pxa/clock.c
4 +++ b/arch/arm/mach-pxa/clock.c
5 @@ -23,18 +23,27 @@ static LIST_HEAD(clocks);
6 static DEFINE_MUTEX(clocks_mutex);
7 static DEFINE_SPINLOCK(clocks_lock);
8
9 +static struct clk *clk_lookup(struct device *dev, const char *id)
10 +{
11 + struct clk *p;
12 +
13 + list_for_each_entry(p, &clocks, node)
14 + if (strcmp(id, p->name) == 0 && p->dev == dev)
15 + return p;
16 +
17 + return NULL;
18 +}
19 +
20 struct clk *clk_get(struct device *dev, const char *id)
21 {
22 struct clk *p, *clk = ERR_PTR(-ENOENT);
23
24 mutex_lock(&clocks_mutex);
25 - list_for_each_entry(p, &clocks, node) {
26 - if (strcmp(id, p->name) == 0 &&
27 - (p->dev == NULL || p->dev == dev)) {
28 - clk = p;
29 - break;
30 - }
31 - }
32 + p = clk_lookup(dev, id);
33 + if (!p)
34 + p = clk_lookup(NULL, id);
35 + if (p)
36 + clk = p;
37 mutex_unlock(&clocks_mutex);
38
39 return clk;
40 diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
41 index 4710135..9158dd8 100644
42 --- a/arch/mips/kernel/i8259.c
43 +++ b/arch/mips/kernel/i8259.c
44 @@ -338,8 +338,10 @@ void __init init_i8259_irqs(void)
45
46 init_8259A(0);
47
48 - for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++)
49 + for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
50 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
51 + set_irq_probe(i);
52 + }
53
54 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
55 }
56 diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
57 index d06e9c9..e3309ff 100644
58 --- a/arch/mips/kernel/irq.c
59 +++ b/arch/mips/kernel/irq.c
60 @@ -145,6 +145,11 @@ __setup("nokgdb", nokgdb);
61
62 void __init init_IRQ(void)
63 {
64 + int i;
65 +
66 + for (i = 0; i < NR_IRQS; i++)
67 + set_irq_noprobe(i);
68 +
69 arch_init_irq();
70
71 #ifdef CONFIG_KGDB
72 diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
73 index 7e8efaa..5efdfe9 100644
74 --- a/arch/s390/lib/uaccess_pt.c
75 +++ b/arch/s390/lib/uaccess_pt.c
76 @@ -406,6 +406,8 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
77 {
78 int ret;
79
80 + if (!current->mm)
81 + return -EFAULT;
82 spin_lock(&current->mm->page_table_lock);
83 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
84 if (!uaddr) {
85 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
86 index e795f28..bf1b15d 100644
87 --- a/arch/sparc/kernel/Makefile
88 +++ b/arch/sparc/kernel/Makefile
89 @@ -1,4 +1,4 @@
90 -# $Id: 0103-2.6.24.4-all-fixes.patch,v 1.1 2008-03-25 11:20:50 niro Exp $
91 +#
92 # Makefile for the linux kernel.
93 #
94
95 @@ -12,7 +12,8 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
96 sys_sparc.o sunos_asm.o systbls.o \
97 time.o windows.o cpu.o devices.o sclow.o \
98 tadpole.o tick14.o ptrace.o sys_solaris.o \
99 - unaligned.o muldiv.o semaphore.o prom.o of_device.o devres.o
100 + unaligned.o una_asm.o muldiv.o semaphore.o \
101 + prom.o of_device.o devres.o
102
103 devres-y = ../../../kernel/irq/devres.o
104
105 diff --git a/arch/sparc/kernel/una_asm.S b/arch/sparc/kernel/una_asm.S
106 new file mode 100644
107 index 0000000..8cc0345
108 --- /dev/null
109 +++ b/arch/sparc/kernel/una_asm.S
110 @@ -0,0 +1,153 @@
111 +/* una_asm.S: Kernel unaligned trap assembler helpers.
112 + *
113 + * Copyright (C) 1996,2005,2008 David S. Miller (davem@davemloft.net)
114 + * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
115 + */
116 +
117 +#include <linux/errno.h>
118 +
119 + .text
120 +
121 +retl_efault:
122 + retl
123 + mov -EFAULT, %o0
124 +
125 + /* int __do_int_store(unsigned long *dst_addr, int size,
126 + * unsigned long *src_val)
127 + *
128 + * %o0 = dest_addr
129 + * %o1 = size
130 + * %o2 = src_val
131 + *
132 + * Return '0' on success, -EFAULT on failure.
133 + */
134 + .globl __do_int_store
135 +__do_int_store:
136 + ld [%o2], %g1
137 + cmp %1, 2
138 + be 2f
139 + cmp %1, 4
140 + be 1f
141 + srl %g1, 24, %g2
142 + srl %g1, 16, %g7
143 +4: stb %g2, [%o0]
144 + srl %g1, 8, %g2
145 +5: stb %g7, [%o0 + 1]
146 + ld [%o2 + 4], %g7
147 +6: stb %g2, [%o0 + 2]
148 + srl %g7, 24, %g2
149 +7: stb %g1, [%o0 + 3]
150 + srl %g7, 16, %g1
151 +8: stb %g2, [%o0 + 4]
152 + srl %g7, 8, %g2
153 +9: stb %g1, [%o0 + 5]
154 +10: stb %g2, [%o0 + 6]
155 + b 0f
156 +11: stb %g7, [%o0 + 7]
157 +1: srl %g1, 16, %g7
158 +12: stb %g2, [%o0]
159 + srl %g1, 8, %g2
160 +13: stb %g7, [%o0 + 1]
161 +14: stb %g2, [%o0 + 2]
162 + b 0f
163 +15: stb %g1, [%o0 + 3]
164 +2: srl %g1, 8, %g2
165 +16: stb %g2, [%o0]
166 +17: stb %g1, [%o0 + 1]
167 +0: retl
168 + mov 0, %o0
169 +
170 + .section __ex_table,#alloc
171 + .word 4b, retl_efault
172 + .word 5b, retl_efault
173 + .word 6b, retl_efault
174 + .word 7b, retl_efault
175 + .word 8b, retl_efault
176 + .word 9b, retl_efault
177 + .word 10b, retl_efault
178 + .word 11b, retl_efault
179 + .word 12b, retl_efault
180 + .word 13b, retl_efault
181 + .word 14b, retl_efault
182 + .word 15b, retl_efault
183 + .word 16b, retl_efault
184 + .word 17b, retl_efault
185 + .previous
186 +
187 + /* int do_int_load(unsigned long *dest_reg, int size,
188 + * unsigned long *saddr, int is_signed)
189 + *
190 + * %o0 = dest_reg
191 + * %o1 = size
192 + * %o2 = saddr
193 + * %o3 = is_signed
194 + *
195 + * Return '0' on success, -EFAULT on failure.
196 + */
197 + .globl do_int_load
198 +do_int_load:
199 + cmp %o1, 8
200 + be 9f
201 + cmp %o1, 4
202 + be 6f
203 +4: ldub [%o2], %g1
204 +5: ldub [%o2 + 1], %g2
205 + sll %g1, 8, %g1
206 + tst %o3
207 + be 3f
208 + or %g1, %g2, %g1
209 + sll %g1, 16, %g1
210 + sra %g1, 16, %g1
211 +3: b 0f
212 + st %g1, [%o0]
213 +6: ldub [%o2 + 1], %g2
214 + sll %g1, 24, %g1
215 +7: ldub [%o2 + 2], %g7
216 + sll %g2, 16, %g2
217 +8: ldub [%o2 + 3], %g3
218 + sll %g7, 8, %g7
219 + or %g3, %g2, %g3
220 + or %g7, %g3, %g7
221 + or %g1, %g7, %g1
222 + b 0f
223 + st %g1, [%o0]
224 +9: ldub [%o2], %g1
225 +10: ldub [%o2 + 1], %g2
226 + sll %g1, 24, %g1
227 +11: ldub [%o2 + 2], %g7
228 + sll %g2, 16, %g2
229 +12: ldub [%o2 + 3], %g3
230 + sll %g7, 8, %g7
231 + or %g1, %g2, %g1
232 + or %g7, %g3, %g7
233 + or %g1, %g7, %g7
234 +13: ldub [%o2 + 4], %g1
235 + st %g7, [%o0]
236 +14: ldub [%o2 + 5], %g2
237 + sll %g1, 24, %g1
238 +15: ldub [%o2 + 6], %g7
239 + sll %g2, 16, %g2
240 +16: ldub [%o2 + 7], %g3
241 + sll %g7, 8, %g7
242 + or %g1, %g2, %g1
243 + or %g7, %g3, %g7
244 + or %g1, %g7, %g7
245 + st %g7, [%o0 + 4]
246 +0: retl
247 + mov 0, %o0
248 +
249 + .section __ex_table,#alloc
250 + .word 4b, retl_efault
251 + .word 5b, retl_efault
252 + .word 6b, retl_efault
253 + .word 7b, retl_efault
254 + .word 8b, retl_efault
255 + .word 9b, retl_efault
256 + .word 10b, retl_efault
257 + .word 11b, retl_efault
258 + .word 12b, retl_efault
259 + .word 13b, retl_efault
260 + .word 14b, retl_efault
261 + .word 15b, retl_efault
262 + .word 16b, retl_efault
263 + .previous
264 diff --git a/arch/sparc/kernel/unaligned.c b/arch/sparc/kernel/unaligned.c
265 index a6330fb..33857be 100644
266 --- a/arch/sparc/kernel/unaligned.c
267 +++ b/arch/sparc/kernel/unaligned.c
268 @@ -175,157 +175,31 @@ static void unaligned_panic(char *str)
269 panic(str);
270 }
271
272 -#define do_integer_load(dest_reg, size, saddr, is_signed, errh) ({ \
273 -__asm__ __volatile__ ( \
274 - "cmp %1, 8\n\t" \
275 - "be 9f\n\t" \
276 - " cmp %1, 4\n\t" \
277 - "be 6f\n" \
278 -"4:\t" " ldub [%2], %%l1\n" \
279 -"5:\t" "ldub [%2 + 1], %%l2\n\t" \
280 - "sll %%l1, 8, %%l1\n\t" \
281 - "tst %3\n\t" \
282 - "be 3f\n\t" \
283 - " add %%l1, %%l2, %%l1\n\t" \
284 - "sll %%l1, 16, %%l1\n\t" \
285 - "sra %%l1, 16, %%l1\n" \
286 -"3:\t" "b 0f\n\t" \
287 - " st %%l1, [%0]\n" \
288 -"6:\t" "ldub [%2 + 1], %%l2\n\t" \
289 - "sll %%l1, 24, %%l1\n" \
290 -"7:\t" "ldub [%2 + 2], %%g7\n\t" \
291 - "sll %%l2, 16, %%l2\n" \
292 -"8:\t" "ldub [%2 + 3], %%g1\n\t" \
293 - "sll %%g7, 8, %%g7\n\t" \
294 - "or %%l1, %%l2, %%l1\n\t" \
295 - "or %%g7, %%g1, %%g7\n\t" \
296 - "or %%l1, %%g7, %%l1\n\t" \
297 - "b 0f\n\t" \
298 - " st %%l1, [%0]\n" \
299 -"9:\t" "ldub [%2], %%l1\n" \
300 -"10:\t" "ldub [%2 + 1], %%l2\n\t" \
301 - "sll %%l1, 24, %%l1\n" \
302 -"11:\t" "ldub [%2 + 2], %%g7\n\t" \
303 - "sll %%l2, 16, %%l2\n" \
304 -"12:\t" "ldub [%2 + 3], %%g1\n\t" \
305 - "sll %%g7, 8, %%g7\n\t" \
306 - "or %%l1, %%l2, %%l1\n\t" \
307 - "or %%g7, %%g1, %%g7\n\t" \
308 - "or %%l1, %%g7, %%g7\n" \
309 -"13:\t" "ldub [%2 + 4], %%l1\n\t" \
310 - "st %%g7, [%0]\n" \
311 -"14:\t" "ldub [%2 + 5], %%l2\n\t" \
312 - "sll %%l1, 24, %%l1\n" \
313 -"15:\t" "ldub [%2 + 6], %%g7\n\t" \
314 - "sll %%l2, 16, %%l2\n" \
315 -"16:\t" "ldub [%2 + 7], %%g1\n\t" \
316 - "sll %%g7, 8, %%g7\n\t" \
317 - "or %%l1, %%l2, %%l1\n\t" \
318 - "or %%g7, %%g1, %%g7\n\t" \
319 - "or %%l1, %%g7, %%g7\n\t" \
320 - "st %%g7, [%0 + 4]\n" \
321 -"0:\n\n\t" \
322 - ".section __ex_table,#alloc\n\t" \
323 - ".word 4b, " #errh "\n\t" \
324 - ".word 5b, " #errh "\n\t" \
325 - ".word 6b, " #errh "\n\t" \
326 - ".word 7b, " #errh "\n\t" \
327 - ".word 8b, " #errh "\n\t" \
328 - ".word 9b, " #errh "\n\t" \
329 - ".word 10b, " #errh "\n\t" \
330 - ".word 11b, " #errh "\n\t" \
331 - ".word 12b, " #errh "\n\t" \
332 - ".word 13b, " #errh "\n\t" \
333 - ".word 14b, " #errh "\n\t" \
334 - ".word 15b, " #errh "\n\t" \
335 - ".word 16b, " #errh "\n\n\t" \
336 - ".previous\n\t" \
337 - : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed) \
338 - : "l1", "l2", "g7", "g1", "cc"); \
339 -})
340 -
341 -#define store_common(dst_addr, size, src_val, errh) ({ \
342 -__asm__ __volatile__ ( \
343 - "ld [%2], %%l1\n" \
344 - "cmp %1, 2\n\t" \
345 - "be 2f\n\t" \
346 - " cmp %1, 4\n\t" \
347 - "be 1f\n\t" \
348 - " srl %%l1, 24, %%l2\n\t" \
349 - "srl %%l1, 16, %%g7\n" \
350 -"4:\t" "stb %%l2, [%0]\n\t" \
351 - "srl %%l1, 8, %%l2\n" \
352 -"5:\t" "stb %%g7, [%0 + 1]\n\t" \
353 - "ld [%2 + 4], %%g7\n" \
354 -"6:\t" "stb %%l2, [%0 + 2]\n\t" \
355 - "srl %%g7, 24, %%l2\n" \
356 -"7:\t" "stb %%l1, [%0 + 3]\n\t" \
357 - "srl %%g7, 16, %%l1\n" \
358 -"8:\t" "stb %%l2, [%0 + 4]\n\t" \
359 - "srl %%g7, 8, %%l2\n" \
360 -"9:\t" "stb %%l1, [%0 + 5]\n" \
361 -"10:\t" "stb %%l2, [%0 + 6]\n\t" \
362 - "b 0f\n" \
363 -"11:\t" " stb %%g7, [%0 + 7]\n" \
364 -"1:\t" "srl %%l1, 16, %%g7\n" \
365 -"12:\t" "stb %%l2, [%0]\n\t" \
366 - "srl %%l1, 8, %%l2\n" \
367 -"13:\t" "stb %%g7, [%0 + 1]\n" \
368 -"14:\t" "stb %%l2, [%0 + 2]\n\t" \
369 - "b 0f\n" \
370 -"15:\t" " stb %%l1, [%0 + 3]\n" \
371 -"2:\t" "srl %%l1, 8, %%l2\n" \
372 -"16:\t" "stb %%l2, [%0]\n" \
373 -"17:\t" "stb %%l1, [%0 + 1]\n" \
374 -"0:\n\n\t" \
375 - ".section __ex_table,#alloc\n\t" \
376 - ".word 4b, " #errh "\n\t" \
377 - ".word 5b, " #errh "\n\t" \
378 - ".word 6b, " #errh "\n\t" \
379 - ".word 7b, " #errh "\n\t" \
380 - ".word 8b, " #errh "\n\t" \
381 - ".word 9b, " #errh "\n\t" \
382 - ".word 10b, " #errh "\n\t" \
383 - ".word 11b, " #errh "\n\t" \
384 - ".word 12b, " #errh "\n\t" \
385 - ".word 13b, " #errh "\n\t" \
386 - ".word 14b, " #errh "\n\t" \
387 - ".word 15b, " #errh "\n\t" \
388 - ".word 16b, " #errh "\n\t" \
389 - ".word 17b, " #errh "\n\n\t" \
390 - ".previous\n\t" \
391 - : : "r" (dst_addr), "r" (size), "r" (src_val) \
392 - : "l1", "l2", "g7", "g1", "cc"); \
393 -})
394 -
395 -#define do_integer_store(reg_num, size, dst_addr, regs, errh) ({ \
396 - unsigned long *src_val; \
397 - static unsigned long zero[2] = { 0, }; \
398 - \
399 - if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
400 - else { \
401 - src_val = &zero[0]; \
402 - if (size == 8) \
403 - zero[1] = fetch_reg(1, regs); \
404 - } \
405 - store_common(dst_addr, size, src_val, errh); \
406 -})
407 +/* una_asm.S */
408 +extern int do_int_load(unsigned long *dest_reg, int size,
409 + unsigned long *saddr, int is_signed);
410 +extern int __do_int_store(unsigned long *dst_addr, int size,
411 + unsigned long *src_val);
412 +
413 +static int do_int_store(int reg_num, int size, unsigned long *dst_addr,
414 + struct pt_regs *regs)
415 +{
416 + unsigned long zero[2] = { 0, 0 };
417 + unsigned long *src_val;
418 +
419 + if (reg_num)
420 + src_val = fetch_reg_addr(reg_num, regs);
421 + else {
422 + src_val = &zero[0];
423 + if (size == 8)
424 + zero[1] = fetch_reg(1, regs);
425 + }
426 + return __do_int_store(dst_addr, size, src_val);
427 +}
428
429 extern void smp_capture(void);
430 extern void smp_release(void);
431
432 -#define do_atomic(srcdest_reg, mem, errh) ({ \
433 - unsigned long flags, tmp; \
434 - \
435 - smp_capture(); \
436 - local_irq_save(flags); \
437 - tmp = *srcdest_reg; \
438 - do_integer_load(srcdest_reg, 4, mem, 0, errh); \
439 - store_common(mem, 4, &tmp, errh); \
440 - local_irq_restore(flags); \
441 - smp_release(); \
442 -})
443 -
444 static inline void advance(struct pt_regs *regs)
445 {
446 regs->pc = regs->npc;
447 @@ -342,9 +216,7 @@ static inline int ok_for_kernel(unsigned int insn)
448 return !floating_point_load_or_store_p(insn);
449 }
450
451 -void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
452 -
453 -void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
454 +static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
455 {
456 unsigned long g2 = regs->u_regs [UREG_G2];
457 unsigned long fixup = search_extables_range(regs->pc, &g2);
458 @@ -379,48 +251,34 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
459 printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
460 regs->pc);
461 unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
462 -
463 - __asm__ __volatile__ ("\n"
464 -"kernel_unaligned_trap_fault:\n\t"
465 - "mov %0, %%o0\n\t"
466 - "call kernel_mna_trap_fault\n\t"
467 - " mov %1, %%o1\n\t"
468 - :
469 - : "r" (regs), "r" (insn)
470 - : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
471 - "g1", "g2", "g3", "g4", "g5", "g7", "cc");
472 } else {
473 unsigned long addr = compute_effective_address(regs, insn);
474 + int err;
475
476 #ifdef DEBUG_MNA
477 printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
478 regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
479 #endif
480 - switch(dir) {
481 + switch (dir) {
482 case load:
483 - do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
484 - size, (unsigned long *) addr,
485 - decode_signedness(insn),
486 - kernel_unaligned_trap_fault);
487 + err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
488 + regs),
489 + size, (unsigned long *) addr,
490 + decode_signedness(insn));
491 break;
492
493 case store:
494 - do_integer_store(((insn>>25)&0x1f), size,
495 - (unsigned long *) addr, regs,
496 - kernel_unaligned_trap_fault);
497 + err = do_int_store(((insn>>25)&0x1f), size,
498 + (unsigned long *) addr, regs);
499 break;
500 -#if 0 /* unsupported */
501 - case both:
502 - do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
503 - (unsigned long *) addr,
504 - kernel_unaligned_trap_fault);
505 - break;
506 -#endif
507 default:
508 panic("Impossible kernel unaligned trap.");
509 /* Not reached... */
510 }
511 - advance(regs);
512 + if (err)
513 + kernel_mna_trap_fault(regs, insn);
514 + else
515 + advance(regs);
516 }
517 }
518
519 @@ -459,9 +317,7 @@ static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
520 return 0;
521 }
522
523 -void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("user_mna_trap_fault");
524 -
525 -void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
526 +static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
527 {
528 siginfo_t info;
529
530 @@ -485,7 +341,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
531 if(!ok_for_user(regs, insn, dir)) {
532 goto kill_user;
533 } else {
534 - int size = decode_access_size(insn);
535 + int err, size = decode_access_size(insn);
536 unsigned long addr;
537
538 if(floating_point_load_or_store_p(insn)) {
539 @@ -496,48 +352,34 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
540 addr = compute_effective_address(regs, insn);
541 switch(dir) {
542 case load:
543 - do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
544 - size, (unsigned long *) addr,
545 - decode_signedness(insn),
546 - user_unaligned_trap_fault);
547 + err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
548 + regs),
549 + size, (unsigned long *) addr,
550 + decode_signedness(insn));
551 break;
552
553 case store:
554 - do_integer_store(((insn>>25)&0x1f), size,
555 - (unsigned long *) addr, regs,
556 - user_unaligned_trap_fault);
557 + err = do_int_store(((insn>>25)&0x1f), size,
558 + (unsigned long *) addr, regs);
559 break;
560
561 case both:
562 -#if 0 /* unsupported */
563 - do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
564 - (unsigned long *) addr,
565 - user_unaligned_trap_fault);
566 -#else
567 /*
568 * This was supported in 2.4. However, we question
569 * the value of SWAP instruction across word boundaries.
570 */
571 printk("Unaligned SWAP unsupported.\n");
572 - goto kill_user;
573 -#endif
574 + err = -EFAULT;
575 break;
576
577 default:
578 unaligned_panic("Impossible user unaligned trap.");
579 -
580 - __asm__ __volatile__ ("\n"
581 -"user_unaligned_trap_fault:\n\t"
582 - "mov %0, %%o0\n\t"
583 - "call user_mna_trap_fault\n\t"
584 - " mov %1, %%o1\n\t"
585 - :
586 - : "r" (regs), "r" (insn)
587 - : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
588 - "g1", "g2", "g3", "g4", "g5", "g7", "cc");
589 goto out;
590 }
591 - advance(regs);
592 + if (err)
593 + goto kill_user;
594 + else
595 + advance(regs);
596 goto out;
597 }
598
599 diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
600 index e2027f2..2650d0d 100644
601 --- a/arch/sparc64/mm/fault.c
602 +++ b/arch/sparc64/mm/fault.c
603 @@ -244,16 +244,8 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
604 if (regs->tstate & TSTATE_PRIV) {
605 const struct exception_table_entry *entry;
606
607 - if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
608 - if (insn & 0x2000)
609 - asi = (regs->tstate >> 24);
610 - else
611 - asi = (insn >> 5);
612 - }
613 -
614 - /* Look in asi.h: All _S asis have LS bit set */
615 - if ((asi & 0x1) &&
616 - (entry = search_exception_tables(regs->tpc))) {
617 + entry = search_exception_tables(regs->tpc);
618 + if (entry) {
619 regs->tpc = entry->fixup;
620 regs->tnpc = regs->tpc + 4;
621 return;
622 @@ -294,7 +286,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
623 unsigned long tpc = regs->tpc;
624
625 /* Sanity check the PC. */
626 - if ((tpc >= KERNBASE && tpc < (unsigned long) _etext) ||
627 + if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
628 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
629 /* Valid, no problems... */
630 } else {
631 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
632 index 6ea19c2..4eaaf78 100644
633 --- a/arch/x86/ia32/ia32_signal.c
634 +++ b/arch/x86/ia32/ia32_signal.c
635 @@ -494,7 +494,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
636 regs->ss = __USER32_DS;
637
638 set_fs(USER_DS);
639 - regs->eflags &= ~TF_MASK;
640 + regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
641 if (test_thread_flag(TIF_SINGLESTEP))
642 ptrace_notify(SIGTRAP);
643
644 @@ -600,7 +600,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
645 regs->ss = __USER32_DS;
646
647 set_fs(USER_DS);
648 - regs->eflags &= ~TF_MASK;
649 + regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
650 if (test_thread_flag(TIF_SINGLESTEP))
651 ptrace_notify(SIGTRAP);
652
653 diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
654 index edb5108..c48fbb1 100644
655 --- a/arch/x86/kernel/apic_32.c
656 +++ b/arch/x86/kernel/apic_32.c
657 @@ -154,7 +154,7 @@ unsigned long safe_apic_wait_icr_idle(void)
658 /**
659 * enable_NMI_through_LVT0 - enable NMI through local vector table 0
660 */
661 -void enable_NMI_through_LVT0 (void * dummy)
662 +void __cpuinit enable_NMI_through_LVT0(void)
663 {
664 unsigned int v = APIC_DM_NMI;
665
666 diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
667 index f28ccb5..0173007 100644
668 --- a/arch/x86/kernel/apic_64.c
669 +++ b/arch/x86/kernel/apic_64.c
670 @@ -151,7 +151,7 @@ unsigned int safe_apic_wait_icr_idle(void)
671 return send_status;
672 }
673
674 -void enable_NMI_through_LVT0 (void * dummy)
675 +void enable_NMI_through_LVT0(void)
676 {
677 unsigned int v;
678
679 diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
680 index a6b1490..232fdeb 100644
681 --- a/arch/x86/kernel/io_apic_32.c
682 +++ b/arch/x86/kernel/io_apic_32.c
683 @@ -2080,7 +2080,7 @@ static struct irq_chip lapic_chip __read_mostly = {
684 .eoi = ack_apic,
685 };
686
687 -static void setup_nmi (void)
688 +static void __init setup_nmi(void)
689 {
690 /*
691 * Dirty trick to enable the NMI watchdog ...
692 @@ -2093,7 +2093,7 @@ static void setup_nmi (void)
693 */
694 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
695
696 - on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
697 + enable_NMI_through_LVT0();
698
699 apic_printk(APIC_VERBOSE, " done.\n");
700 }
701 diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
702 index cbac167..7119cb7 100644
703 --- a/arch/x86/kernel/io_apic_64.c
704 +++ b/arch/x86/kernel/io_apic_64.c
705 @@ -1565,7 +1565,7 @@ static struct hw_interrupt_type lapic_irq_type __read_mostly = {
706 .end = end_lapic_irq,
707 };
708
709 -static void setup_nmi (void)
710 +static void __init setup_nmi(void)
711 {
712 /*
713 * Dirty trick to enable the NMI watchdog ...
714 @@ -1578,7 +1578,7 @@ static void setup_nmi (void)
715 */
716 printk(KERN_INFO "activating NMI Watchdog ...");
717
718 - enable_NMI_through_LVT0(NULL);
719 + enable_NMI_through_LVT0();
720
721 printk(" done.\n");
722 }
723 @@ -1654,7 +1654,7 @@ static inline void unlock_ExtINT_logic(void)
724 *
725 * FIXME: really need to revamp this for modern platforms only.
726 */
727 -static inline void check_timer(void)
728 +static inline void __init check_timer(void)
729 {
730 struct irq_cfg *cfg = irq_cfg + 0;
731 int apic1, pin1, apic2, pin2;
732 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
733 index ab79e1d..d7f7132 100644
734 --- a/arch/x86/kernel/process_64.c
735 +++ b/arch/x86/kernel/process_64.c
736 @@ -212,14 +212,13 @@ void cpu_idle (void)
737 current_thread_info()->status |= TS_POLLING;
738 /* endless idle loop with no priority at all */
739 while (1) {
740 + tick_nohz_stop_sched_tick();
741 while (!need_resched()) {
742 void (*idle)(void);
743
744 if (__get_cpu_var(cpu_idle_state))
745 __get_cpu_var(cpu_idle_state) = 0;
746
747 - tick_nohz_stop_sched_tick();
748 -
749 rmb();
750 idle = pm_idle;
751 if (!idle)
752 diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
753 index 9bdd830..20056db 100644
754 --- a/arch/x86/kernel/signal_32.c
755 +++ b/arch/x86/kernel/signal_32.c
756 @@ -396,7 +396,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
757 * The tracer may want to single-step inside the
758 * handler too.
759 */
760 - regs->eflags &= ~TF_MASK;
761 + regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
762 if (test_thread_flag(TIF_SINGLESTEP))
763 ptrace_notify(SIGTRAP);
764
765 @@ -489,7 +489,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
766 * The tracer may want to single-step inside the
767 * handler too.
768 */
769 - regs->eflags &= ~TF_MASK;
770 + regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
771 if (test_thread_flag(TIF_SINGLESTEP))
772 ptrace_notify(SIGTRAP);
773
774 diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
775 index ab086b0..62964c5 100644
776 --- a/arch/x86/kernel/signal_64.c
777 +++ b/arch/x86/kernel/signal_64.c
778 @@ -295,7 +295,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
779 see include/asm-x86_64/uaccess.h for details. */
780 set_fs(USER_DS);
781
782 - regs->eflags &= ~TF_MASK;
783 + regs->eflags &= ~(TF_MASK | X86_EFLAGS_DF);
784 if (test_thread_flag(TIF_SINGLESTEP))
785 ptrace_notify(SIGTRAP);
786 #ifdef DEBUG_SIG
787 diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
788 index 4ea80cb..fe200cf 100644
789 --- a/arch/x86/kernel/smpboot_32.c
790 +++ b/arch/x86/kernel/smpboot_32.c
791 @@ -405,7 +405,7 @@ static void __cpuinit start_secondary(void *unused)
792 setup_secondary_clock();
793 if (nmi_watchdog == NMI_IO_APIC) {
794 disable_8259A_irq(0);
795 - enable_NMI_through_LVT0(NULL);
796 + enable_NMI_through_LVT0();
797 enable_8259A_irq(0);
798 }
799 /*
800 diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
801 index aaf4e12..eca8026 100644
802 --- a/arch/x86/kernel/smpboot_64.c
803 +++ b/arch/x86/kernel/smpboot_64.c
804 @@ -338,7 +338,7 @@ void __cpuinit start_secondary(void)
805
806 if (nmi_watchdog == NMI_IO_APIC) {
807 disable_8259A_irq(0);
808 - enable_NMI_through_LVT0(NULL);
809 + enable_NMI_through_LVT0();
810 enable_8259A_irq(0);
811 }
812
813 diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
814 index 4df637e..6b521d3 100644
815 --- a/arch/x86/pci/mmconfig-shared.c
816 +++ b/arch/x86/pci/mmconfig-shared.c
817 @@ -22,42 +22,9 @@
818 #define MMCONFIG_APER_MIN (2 * 1024*1024)
819 #define MMCONFIG_APER_MAX (256 * 1024*1024)
820
821 -DECLARE_BITMAP(pci_mmcfg_fallback_slots, 32*PCI_MMCFG_MAX_CHECK_BUS);
822 -
823 /* Indicate if the mmcfg resources have been placed into the resource table. */
824 static int __initdata pci_mmcfg_resources_inserted;
825
826 -/* K8 systems have some devices (typically in the builtin northbridge)
827 - that are only accessible using type1
828 - Normally this can be expressed in the MCFG by not listing them
829 - and assigning suitable _SEGs, but this isn't implemented in some BIOS.
830 - Instead try to discover all devices on bus 0 that are unreachable using MM
831 - and fallback for them. */
832 -static void __init unreachable_devices(void)
833 -{
834 - int i, bus;
835 - /* Use the max bus number from ACPI here? */
836 - for (bus = 0; bus < PCI_MMCFG_MAX_CHECK_BUS; bus++) {
837 - for (i = 0; i < 32; i++) {
838 - unsigned int devfn = PCI_DEVFN(i, 0);
839 - u32 val1, val2;
840 -
841 - pci_conf1_read(0, bus, devfn, 0, 4, &val1);
842 - if (val1 == 0xffffffff)
843 - continue;
844 -
845 - if (pci_mmcfg_arch_reachable(0, bus, devfn)) {
846 - raw_pci_ops->read(0, bus, devfn, 0, 4, &val2);
847 - if (val1 == val2)
848 - continue;
849 - }
850 - set_bit(i + 32 * bus, pci_mmcfg_fallback_slots);
851 - printk(KERN_NOTICE "PCI: No mmconfig possible on device"
852 - " %02x:%02x\n", bus, i);
853 - }
854 - }
855 -}
856 -
857 static const char __init *pci_mmcfg_e7520(void)
858 {
859 u32 win;
860 @@ -270,8 +237,6 @@ void __init pci_mmcfg_init(int type)
861 return;
862
863 if (pci_mmcfg_arch_init()) {
864 - if (type == 1)
865 - unreachable_devices();
866 if (known_bridge)
867 pci_mmcfg_insert_resources(IORESOURCE_BUSY);
868 pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
869 diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
870 index 1bf5816..7b75e65 100644
871 --- a/arch/x86/pci/mmconfig_32.c
872 +++ b/arch/x86/pci/mmconfig_32.c
873 @@ -30,10 +30,6 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
874 struct acpi_mcfg_allocation *cfg;
875 int cfg_num;
876
877 - if (seg == 0 && bus < PCI_MMCFG_MAX_CHECK_BUS &&
878 - test_bit(PCI_SLOT(devfn) + 32*bus, pci_mmcfg_fallback_slots))
879 - return 0;
880 -
881 for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
882 cfg = &pci_mmcfg_config[cfg_num];
883 if (cfg->pci_segment == seg &&
884 @@ -68,13 +64,16 @@ static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
885 u32 base;
886
887 if ((bus > 255) || (devfn > 255) || (reg > 4095)) {
888 - *value = -1;
889 +err: *value = -1;
890 return -EINVAL;
891 }
892
893 + if (reg < 256)
894 + return pci_conf1_read(seg,bus,devfn,reg,len,value);
895 +
896 base = get_base_addr(seg, bus, devfn);
897 if (!base)
898 - return pci_conf1_read(seg,bus,devfn,reg,len,value);
899 + goto err;
900
901 spin_lock_irqsave(&pci_config_lock, flags);
902
903 @@ -105,9 +104,12 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
904 if ((bus > 255) || (devfn > 255) || (reg > 4095))
905 return -EINVAL;
906
907 + if (reg < 256)
908 + return pci_conf1_write(seg,bus,devfn,reg,len,value);
909 +
910 base = get_base_addr(seg, bus, devfn);
911 if (!base)
912 - return pci_conf1_write(seg,bus,devfn,reg,len,value);
913 + return -EINVAL;
914
915 spin_lock_irqsave(&pci_config_lock, flags);
916
917 @@ -134,12 +136,6 @@ static struct pci_raw_ops pci_mmcfg = {
918 .write = pci_mmcfg_write,
919 };
920
921 -int __init pci_mmcfg_arch_reachable(unsigned int seg, unsigned int bus,
922 - unsigned int devfn)
923 -{
924 - return get_base_addr(seg, bus, devfn) != 0;
925 -}
926 -
927 int __init pci_mmcfg_arch_init(void)
928 {
929 printk(KERN_INFO "PCI: Using MMCONFIG\n");
930 diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
931 index 4095e4d..c4cf318 100644
932 --- a/arch/x86/pci/mmconfig_64.c
933 +++ b/arch/x86/pci/mmconfig_64.c
934 @@ -40,9 +40,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
935 static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
936 {
937 char __iomem *addr;
938 - if (seg == 0 && bus < PCI_MMCFG_MAX_CHECK_BUS &&
939 - test_bit(32*bus + PCI_SLOT(devfn), pci_mmcfg_fallback_slots))
940 - return NULL;
941 +
942 addr = get_virt(seg, bus);
943 if (!addr)
944 return NULL;
945 @@ -56,13 +54,16 @@ static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
946
947 /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
948 if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095))) {
949 - *value = -1;
950 +err: *value = -1;
951 return -EINVAL;
952 }
953
954 + if (reg < 256)
955 + return pci_conf1_read(seg,bus,devfn,reg,len,value);
956 +
957 addr = pci_dev_base(seg, bus, devfn);
958 if (!addr)
959 - return pci_conf1_read(seg,bus,devfn,reg,len,value);
960 + goto err;
961
962 switch (len) {
963 case 1:
964 @@ -88,9 +89,12 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
965 if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
966 return -EINVAL;
967
968 + if (reg < 256)
969 + return pci_conf1_write(seg,bus,devfn,reg,len,value);
970 +
971 addr = pci_dev_base(seg, bus, devfn);
972 if (!addr)
973 - return pci_conf1_write(seg,bus,devfn,reg,len,value);
974 + return -EINVAL;
975
976 switch (len) {
977 case 1:
978 @@ -126,12 +130,6 @@ static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg)
979 return addr;
980 }
981
982 -int __init pci_mmcfg_arch_reachable(unsigned int seg, unsigned int bus,
983 - unsigned int devfn)
984 -{
985 - return pci_dev_base(seg, bus, devfn) != NULL;
986 -}
987 -
988 int __init pci_mmcfg_arch_init(void)
989 {
990 int i;
991 diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h
992 index ac56d39..36cb44c 100644
993 --- a/arch/x86/pci/pci.h
994 +++ b/arch/x86/pci/pci.h
995 @@ -98,13 +98,6 @@ extern void pcibios_sort(void);
996
997 /* pci-mmconfig.c */
998
999 -/* Verify the first 16 busses. We assume that systems with more busses
1000 - get MCFG right. */
1001 -#define PCI_MMCFG_MAX_CHECK_BUS 16
1002 -extern DECLARE_BITMAP(pci_mmcfg_fallback_slots, 32*PCI_MMCFG_MAX_CHECK_BUS);
1003 -
1004 -extern int __init pci_mmcfg_arch_reachable(unsigned int seg, unsigned int bus,
1005 - unsigned int devfn);
1006 extern int __init pci_mmcfg_arch_init(void);
1007
1008 /*
1009 diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
1010 index 2575f67..5c579d2 100644
1011 --- a/crypto/async_tx/async_xor.c
1012 +++ b/crypto/async_tx/async_xor.c
1013 @@ -264,7 +264,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
1014
1015 BUG_ON(src_cnt <= 1);
1016
1017 - if (tx) {
1018 + if (tx && src_cnt <= device->max_xor) {
1019 dma_addr_t dma_addr;
1020 enum dma_data_direction dir;
1021
1022 diff --git a/crypto/xcbc.c b/crypto/xcbc.c
1023 index ac68f3b..a957373 100644
1024 --- a/crypto/xcbc.c
1025 +++ b/crypto/xcbc.c
1026 @@ -124,6 +124,11 @@ static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
1027 unsigned int offset = sg[i].offset;
1028 unsigned int slen = sg[i].length;
1029
1030 + if (unlikely(slen > nbytes))
1031 + slen = nbytes;
1032 +
1033 + nbytes -= slen;
1034 +
1035 while (slen > 0) {
1036 unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
1037 char *p = crypto_kmap(pg, 0) + offset;
1038 @@ -177,7 +182,6 @@ static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
1039 offset = 0;
1040 pg++;
1041 }
1042 - nbytes-=sg[i].length;
1043 i++;
1044 } while (nbytes>0);
1045
1046 diff --git a/crypto/xts.c b/crypto/xts.c
1047 index 8eb08bf..d87b0f3 100644
1048 --- a/crypto/xts.c
1049 +++ b/crypto/xts.c
1050 @@ -77,16 +77,16 @@ static int setkey(struct crypto_tfm *parent, const u8 *key,
1051 }
1052
1053 struct sinfo {
1054 - be128 t;
1055 + be128 *t;
1056 struct crypto_tfm *tfm;
1057 void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
1058 };
1059
1060 static inline void xts_round(struct sinfo *s, void *dst, const void *src)
1061 {
1062 - be128_xor(dst, &s->t, src); /* PP <- T xor P */
1063 + be128_xor(dst, s->t, src); /* PP <- T xor P */
1064 s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */
1065 - be128_xor(dst, dst, &s->t); /* C <- T xor CC */
1066 + be128_xor(dst, dst, s->t); /* C <- T xor CC */
1067 }
1068
1069 static int crypt(struct blkcipher_desc *d,
1070 @@ -101,7 +101,6 @@ static int crypt(struct blkcipher_desc *d,
1071 .tfm = crypto_cipher_tfm(ctx->child),
1072 .fn = fn
1073 };
1074 - be128 *iv;
1075 u8 *wsrc;
1076 u8 *wdst;
1077
1078 @@ -109,20 +108,20 @@ static int crypt(struct blkcipher_desc *d,
1079 if (!w->nbytes)
1080 return err;
1081
1082 + s.t = (be128 *)w->iv;
1083 avail = w->nbytes;
1084
1085 wsrc = w->src.virt.addr;
1086 wdst = w->dst.virt.addr;
1087
1088 /* calculate first value of T */
1089 - iv = (be128 *)w->iv;
1090 - tw(crypto_cipher_tfm(ctx->tweak), (void *)&s.t, w->iv);
1091 + tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);
1092
1093 goto first;
1094
1095 for (;;) {
1096 do {
1097 - gf128mul_x_ble(&s.t, &s.t);
1098 + gf128mul_x_ble(s.t, s.t);
1099
1100 first:
1101 xts_round(&s, wdst, wsrc);
1102 diff --git a/drivers/acorn/char/defkeymap-l7200.c b/drivers/acorn/char/defkeymap-l7200.c
1103 index 28a5fbc..93d80a1 100644
1104 --- a/drivers/acorn/char/defkeymap-l7200.c
1105 +++ b/drivers/acorn/char/defkeymap-l7200.c
1106 @@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = {
1107 };
1108
1109 struct kbdiacruc accent_table[MAX_DIACR] = {
1110 - {'`', 'A', '\300'}, {'`', 'a', '\340'},
1111 - {'\'', 'A', '\301'}, {'\'', 'a', '\341'},
1112 - {'^', 'A', '\302'}, {'^', 'a', '\342'},
1113 - {'~', 'A', '\303'}, {'~', 'a', '\343'},
1114 - {'"', 'A', '\304'}, {'"', 'a', '\344'},
1115 - {'O', 'A', '\305'}, {'o', 'a', '\345'},
1116 - {'0', 'A', '\305'}, {'0', 'a', '\345'},
1117 - {'A', 'A', '\305'}, {'a', 'a', '\345'},
1118 - {'A', 'E', '\306'}, {'a', 'e', '\346'},
1119 - {',', 'C', '\307'}, {',', 'c', '\347'},
1120 - {'`', 'E', '\310'}, {'`', 'e', '\350'},
1121 - {'\'', 'E', '\311'}, {'\'', 'e', '\351'},
1122 - {'^', 'E', '\312'}, {'^', 'e', '\352'},
1123 - {'"', 'E', '\313'}, {'"', 'e', '\353'},
1124 - {'`', 'I', '\314'}, {'`', 'i', '\354'},
1125 - {'\'', 'I', '\315'}, {'\'', 'i', '\355'},
1126 - {'^', 'I', '\316'}, {'^', 'i', '\356'},
1127 - {'"', 'I', '\317'}, {'"', 'i', '\357'},
1128 - {'-', 'D', '\320'}, {'-', 'd', '\360'},
1129 - {'~', 'N', '\321'}, {'~', 'n', '\361'},
1130 - {'`', 'O', '\322'}, {'`', 'o', '\362'},
1131 - {'\'', 'O', '\323'}, {'\'', 'o', '\363'},
1132 - {'^', 'O', '\324'}, {'^', 'o', '\364'},
1133 - {'~', 'O', '\325'}, {'~', 'o', '\365'},
1134 - {'"', 'O', '\326'}, {'"', 'o', '\366'},
1135 - {'/', 'O', '\330'}, {'/', 'o', '\370'},
1136 - {'`', 'U', '\331'}, {'`', 'u', '\371'},
1137 - {'\'', 'U', '\332'}, {'\'', 'u', '\372'},
1138 - {'^', 'U', '\333'}, {'^', 'u', '\373'},
1139 - {'"', 'U', '\334'}, {'"', 'u', '\374'},
1140 - {'\'', 'Y', '\335'}, {'\'', 'y', '\375'},
1141 - {'T', 'H', '\336'}, {'t', 'h', '\376'},
1142 - {'s', 's', '\337'}, {'"', 'y', '\377'},
1143 - {'s', 'z', '\337'}, {'i', 'j', '\377'},
1144 + {'`', 'A', 0300}, {'`', 'a', 0340},
1145 + {'\'', 'A', 0301}, {'\'', 'a', 0341},
1146 + {'^', 'A', 0302}, {'^', 'a', 0342},
1147 + {'~', 'A', 0303}, {'~', 'a', 0343},
1148 + {'"', 'A', 0304}, {'"', 'a', 0344},
1149 + {'O', 'A', 0305}, {'o', 'a', 0345},
1150 + {'0', 'A', 0305}, {'0', 'a', 0345},
1151 + {'A', 'A', 0305}, {'a', 'a', 0345},
1152 + {'A', 'E', 0306}, {'a', 'e', 0346},
1153 + {',', 'C', 0307}, {',', 'c', 0347},
1154 + {'`', 'E', 0310}, {'`', 'e', 0350},
1155 + {'\'', 'E', 0311}, {'\'', 'e', 0351},
1156 + {'^', 'E', 0312}, {'^', 'e', 0352},
1157 + {'"', 'E', 0313}, {'"', 'e', 0353},
1158 + {'`', 'I', 0314}, {'`', 'i', 0354},
1159 + {'\'', 'I', 0315}, {'\'', 'i', 0355},
1160 + {'^', 'I', 0316}, {'^', 'i', 0356},
1161 + {'"', 'I', 0317}, {'"', 'i', 0357},
1162 + {'-', 'D', 0320}, {'-', 'd', 0360},
1163 + {'~', 'N', 0321}, {'~', 'n', 0361},
1164 + {'`', 'O', 0322}, {'`', 'o', 0362},
1165 + {'\'', 'O', 0323}, {'\'', 'o', 0363},
1166 + {'^', 'O', 0324}, {'^', 'o', 0364},
1167 + {'~', 'O', 0325}, {'~', 'o', 0365},
1168 + {'"', 'O', 0326}, {'"', 'o', 0366},
1169 + {'/', 'O', 0330}, {'/', 'o', 0370},
1170 + {'`', 'U', 0331}, {'`', 'u', 0371},
1171 + {'\'', 'U', 0332}, {'\'', 'u', 0372},
1172 + {'^', 'U', 0333}, {'^', 'u', 0373},
1173 + {'"', 'U', 0334}, {'"', 'u', 0374},
1174 + {'\'', 'Y', 0335}, {'\'', 'y', 0375},
1175 + {'T', 'H', 0336}, {'t', 'h', 0376},
1176 + {'s', 's', 0337}, {'"', 'y', 0377},
1177 + {'s', 'z', 0337}, {'i', 'j', 0377},
1178 };
1179
1180 unsigned int accent_table_size = 68;
1181 diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
1182 index 0713872..a742efa 100644
1183 --- a/drivers/ata/pata_hpt366.c
1184 +++ b/drivers/ata/pata_hpt366.c
1185 @@ -27,7 +27,7 @@
1186 #include <linux/libata.h>
1187
1188 #define DRV_NAME "pata_hpt366"
1189 -#define DRV_VERSION "0.6.1"
1190 +#define DRV_VERSION "0.6.2"
1191
1192 struct hpt_clock {
1193 u8 xfer_speed;
1194 @@ -180,9 +180,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
1195 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
1196 mask &= ~ATA_MASK_UDMA;
1197 if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
1198 - mask &= ~(0x07 << ATA_SHIFT_UDMA);
1199 + mask &= ~(0xF8 << ATA_SHIFT_UDMA);
1200 if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
1201 - mask &= ~(0x0F << ATA_SHIFT_UDMA);
1202 + mask &= ~(0xF0 << ATA_SHIFT_UDMA);
1203 }
1204 return ata_pci_default_filter(adev, mask);
1205 }
1206 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
1207 index c79f066..eac6a2b 100644
1208 --- a/drivers/ata/pata_hpt37x.c
1209 +++ b/drivers/ata/pata_hpt37x.c
1210 @@ -24,7 +24,7 @@
1211 #include <linux/libata.h>
1212
1213 #define DRV_NAME "pata_hpt37x"
1214 -#define DRV_VERSION "0.6.9"
1215 +#define DRV_VERSION "0.6.11"
1216
1217 struct hpt_clock {
1218 u8 xfer_speed;
1219 @@ -281,7 +281,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
1220 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
1221 mask &= ~ATA_MASK_UDMA;
1222 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
1223 - mask &= ~(0x1F << ATA_SHIFT_UDMA);
1224 + mask &= ~(0xE0 << ATA_SHIFT_UDMA);
1225 }
1226 return ata_pci_default_filter(adev, mask);
1227 }
1228 @@ -297,7 +297,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
1229 {
1230 if (adev->class == ATA_DEV_ATA) {
1231 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
1232 - mask &= ~ (0x1F << ATA_SHIFT_UDMA);
1233 + mask &= ~(0xE0 << ATA_SHIFT_UDMA);
1234 }
1235 return ata_pci_default_filter(adev, mask);
1236 }
1237 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
1238 index 8bed888..004cac7 100644
1239 --- a/drivers/ata/pata_serverworks.c
1240 +++ b/drivers/ata/pata_serverworks.c
1241 @@ -226,7 +226,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
1242
1243 for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
1244 if (!strcmp(p, model_num))
1245 - mask &= ~(0x1F << ATA_SHIFT_UDMA);
1246 + mask &= ~(0xE0 << ATA_SHIFT_UDMA);
1247 }
1248 return ata_pci_default_filter(adev, mask);
1249 }
1250 diff --git a/drivers/base/platform.c b/drivers/base/platform.c
1251 index fb56092..39d8b7b 100644
1252 --- a/drivers/base/platform.c
1253 +++ b/drivers/base/platform.c
1254 @@ -647,7 +647,7 @@ u64 dma_get_required_mask(struct device *dev)
1255 high_totalram += high_totalram - 1;
1256 mask = (((u64)high_totalram) << 32) + 0xffffffff;
1257 }
1258 - return mask & *dev->dma_mask;
1259 + return mask;
1260 }
1261 EXPORT_SYMBOL_GPL(dma_get_required_mask);
1262 #endif
1263 diff --git a/drivers/block/ub.c b/drivers/block/ub.c
1264 index 08e909d..7aca466 100644
1265 --- a/drivers/block/ub.c
1266 +++ b/drivers/block/ub.c
1267 @@ -657,7 +657,6 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
1268 if ((cmd = ub_get_cmd(lun)) == NULL)
1269 return -1;
1270 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
1271 - sg_init_table(cmd->sgv, UB_MAX_REQ_SG);
1272
1273 blkdev_dequeue_request(rq);
1274
1275 @@ -668,6 +667,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
1276 /*
1277 * get scatterlist from block layer
1278 */
1279 + sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
1280 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
1281 if (n_elem < 0) {
1282 /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
1283 diff --git a/drivers/char/defkeymap.c_shipped b/drivers/char/defkeymap.c_shipped
1284 index 0aa419a..d2208df 100644
1285 --- a/drivers/char/defkeymap.c_shipped
1286 +++ b/drivers/char/defkeymap.c_shipped
1287 @@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = {
1288 };
1289
1290 struct kbdiacruc accent_table[MAX_DIACR] = {
1291 - {'`', 'A', '\300'}, {'`', 'a', '\340'},
1292 - {'\'', 'A', '\301'}, {'\'', 'a', '\341'},
1293 - {'^', 'A', '\302'}, {'^', 'a', '\342'},
1294 - {'~', 'A', '\303'}, {'~', 'a', '\343'},
1295 - {'"', 'A', '\304'}, {'"', 'a', '\344'},
1296 - {'O', 'A', '\305'}, {'o', 'a', '\345'},
1297 - {'0', 'A', '\305'}, {'0', 'a', '\345'},
1298 - {'A', 'A', '\305'}, {'a', 'a', '\345'},
1299 - {'A', 'E', '\306'}, {'a', 'e', '\346'},
1300 - {',', 'C', '\307'}, {',', 'c', '\347'},
1301 - {'`', 'E', '\310'}, {'`', 'e', '\350'},
1302 - {'\'', 'E', '\311'}, {'\'', 'e', '\351'},
1303 - {'^', 'E', '\312'}, {'^', 'e', '\352'},
1304 - {'"', 'E', '\313'}, {'"', 'e', '\353'},
1305 - {'`', 'I', '\314'}, {'`', 'i', '\354'},
1306 - {'\'', 'I', '\315'}, {'\'', 'i', '\355'},
1307 - {'^', 'I', '\316'}, {'^', 'i', '\356'},
1308 - {'"', 'I', '\317'}, {'"', 'i', '\357'},
1309 - {'-', 'D', '\320'}, {'-', 'd', '\360'},
1310 - {'~', 'N', '\321'}, {'~', 'n', '\361'},
1311 - {'`', 'O', '\322'}, {'`', 'o', '\362'},
1312 - {'\'', 'O', '\323'}, {'\'', 'o', '\363'},
1313 - {'^', 'O', '\324'}, {'^', 'o', '\364'},
1314 - {'~', 'O', '\325'}, {'~', 'o', '\365'},
1315 - {'"', 'O', '\326'}, {'"', 'o', '\366'},
1316 - {'/', 'O', '\330'}, {'/', 'o', '\370'},
1317 - {'`', 'U', '\331'}, {'`', 'u', '\371'},
1318 - {'\'', 'U', '\332'}, {'\'', 'u', '\372'},
1319 - {'^', 'U', '\333'}, {'^', 'u', '\373'},
1320 - {'"', 'U', '\334'}, {'"', 'u', '\374'},
1321 - {'\'', 'Y', '\335'}, {'\'', 'y', '\375'},
1322 - {'T', 'H', '\336'}, {'t', 'h', '\376'},
1323 - {'s', 's', '\337'}, {'"', 'y', '\377'},
1324 - {'s', 'z', '\337'}, {'i', 'j', '\377'},
1325 + {'`', 'A', 0300}, {'`', 'a', 0340},
1326 + {'\'', 'A', 0301}, {'\'', 'a', 0341},
1327 + {'^', 'A', 0302}, {'^', 'a', 0342},
1328 + {'~', 'A', 0303}, {'~', 'a', 0343},
1329 + {'"', 'A', 0304}, {'"', 'a', 0344},
1330 + {'O', 'A', 0305}, {'o', 'a', 0345},
1331 + {'0', 'A', 0305}, {'0', 'a', 0345},
1332 + {'A', 'A', 0305}, {'a', 'a', 0345},
1333 + {'A', 'E', 0306}, {'a', 'e', 0346},
1334 + {',', 'C', 0307}, {',', 'c', 0347},
1335 + {'`', 'E', 0310}, {'`', 'e', 0350},
1336 + {'\'', 'E', 0311}, {'\'', 'e', 0351},
1337 + {'^', 'E', 0312}, {'^', 'e', 0352},
1338 + {'"', 'E', 0313}, {'"', 'e', 0353},
1339 + {'`', 'I', 0314}, {'`', 'i', 0354},
1340 + {'\'', 'I', 0315}, {'\'', 'i', 0355},
1341 + {'^', 'I', 0316}, {'^', 'i', 0356},
1342 + {'"', 'I', 0317}, {'"', 'i', 0357},
1343 + {'-', 'D', 0320}, {'-', 'd', 0360},
1344 + {'~', 'N', 0321}, {'~', 'n', 0361},
1345 + {'`', 'O', 0322}, {'`', 'o', 0362},
1346 + {'\'', 'O', 0323}, {'\'', 'o', 0363},
1347 + {'^', 'O', 0324}, {'^', 'o', 0364},
1348 + {'~', 'O', 0325}, {'~', 'o', 0365},
1349 + {'"', 'O', 0326}, {'"', 'o', 0366},
1350 + {'/', 'O', 0330}, {'/', 'o', 0370},
1351 + {'`', 'U', 0331}, {'`', 'u', 0371},
1352 + {'\'', 'U', 0332}, {'\'', 'u', 0372},
1353 + {'^', 'U', 0333}, {'^', 'u', 0373},
1354 + {'"', 'U', 0334}, {'"', 'u', 0374},
1355 + {'\'', 'Y', 0335}, {'\'', 'y', 0375},
1356 + {'T', 'H', 0336}, {'t', 'h', 0376},
1357 + {'s', 's', 0337}, {'"', 'y', 0377},
1358 + {'s', 'z', 0337}, {'i', 'j', 0377},
1359 };
1360
1361 unsigned int accent_table_size = 68;
1362 diff --git a/drivers/char/vt.c b/drivers/char/vt.c
1363 index 7a5badf..93cfe4a 100644
1364 --- a/drivers/char/vt.c
1365 +++ b/drivers/char/vt.c
1366 @@ -702,6 +702,7 @@ void redraw_screen(struct vc_data *vc, int is_switch)
1367 if (is_switch) {
1368 set_leds();
1369 compute_shiftstate();
1370 + notify_update(vc);
1371 }
1372 }
1373
1374 diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
1375 index 45e7b46..8cf542b 100644
1376 --- a/drivers/dma/ioat_dma.c
1377 +++ b/drivers/dma/ioat_dma.c
1378 @@ -726,6 +726,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
1379
1380 if (new) {
1381 new->len = len;
1382 + new->async_tx.ack = 0;
1383 return &new->async_tx;
1384 } else
1385 return NULL;
1386 @@ -749,6 +750,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
1387
1388 if (new) {
1389 new->len = len;
1390 + new->async_tx.ack = 0;
1391 return &new->async_tx;
1392 } else
1393 return NULL;
1394 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
1395 index e4c94f9..c8d3ffb 100644
1396 --- a/drivers/message/fusion/mptsas.c
1397 +++ b/drivers/message/fusion/mptsas.c
1398 @@ -1699,6 +1699,11 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1399 if (error)
1400 goto out_free_consistent;
1401
1402 + if (!buffer->NumPhys) {
1403 + error = -ENODEV;
1404 + goto out_free_consistent;
1405 + }
1406 +
1407 /* save config data */
1408 port_info->num_phys = buffer->NumPhys;
1409 port_info->phy_info = kcalloc(port_info->num_phys,
1410 diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
1411 index 9cc5a6b..55584ee 100644
1412 --- a/drivers/net/e1000e/netdev.c
1413 +++ b/drivers/net/e1000e/netdev.c
1414 @@ -1686,6 +1686,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1415 else
1416 rctl |= E1000_RCTL_LPE;
1417
1418 + /* Enable hardware CRC frame stripping */
1419 + rctl |= E1000_RCTL_SECRC;
1420 +
1421 /* Setup buffer sizes */
1422 rctl &= ~E1000_RCTL_SZ_4096;
1423 rctl |= E1000_RCTL_BSEX;
1424 @@ -1751,9 +1754,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1425
1426 /* Enable Packet split descriptors */
1427 rctl |= E1000_RCTL_DTYP_PS;
1428 -
1429 - /* Enable hardware CRC frame stripping */
1430 - rctl |= E1000_RCTL_SECRC;
1431
1432 psrctl |= adapter->rx_ps_bsize0 >>
1433 E1000_PSRCTL_BSIZE0_SHIFT;
1434 diff --git a/drivers/net/macb.c b/drivers/net/macb.c
1435 index e10528e..c796948 100644
1436 --- a/drivers/net/macb.c
1437 +++ b/drivers/net/macb.c
1438 @@ -148,7 +148,7 @@ static void macb_handle_link_change(struct net_device *dev)
1439
1440 if (phydev->duplex)
1441 reg |= MACB_BIT(FD);
1442 - if (phydev->speed)
1443 + if (phydev->speed == SPEED_100)
1444 reg |= MACB_BIT(SPD);
1445
1446 macb_writel(bp, NCFGR, reg);
1447 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
1448 index 5f6beab..226dc54 100644
1449 --- a/drivers/net/niu.c
1450 +++ b/drivers/net/niu.c
1451 @@ -33,8 +33,8 @@
1452
1453 #define DRV_MODULE_NAME "niu"
1454 #define PFX DRV_MODULE_NAME ": "
1455 -#define DRV_MODULE_VERSION "0.6"
1456 -#define DRV_MODULE_RELDATE "January 5, 2008"
1457 +#define DRV_MODULE_VERSION "0.7"
1458 +#define DRV_MODULE_RELDATE "February 18, 2008"
1459
1460 static char version[] __devinitdata =
1461 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
1462 @@ -1616,12 +1616,13 @@ static int niu_enable_alt_mac(struct niu *np, int index, int on)
1463 if (index >= niu_num_alt_addr(np))
1464 return -EINVAL;
1465
1466 - if (np->flags & NIU_FLAGS_XMAC)
1467 + if (np->flags & NIU_FLAGS_XMAC) {
1468 reg = XMAC_ADDR_CMPEN;
1469 - else
1470 + mask = 1 << index;
1471 + } else {
1472 reg = BMAC_ADDR_CMPEN;
1473 -
1474 - mask = 1 << index;
1475 + mask = 1 << (index + 1);
1476 + }
1477
1478 val = nr64_mac(reg);
1479 if (on)
1480 @@ -5147,7 +5148,12 @@ static void niu_set_rx_mode(struct net_device *dev)
1481 index++;
1482 }
1483 } else {
1484 - for (i = 0; i < niu_num_alt_addr(np); i++) {
1485 + int alt_start;
1486 + if (np->flags & NIU_FLAGS_XMAC)
1487 + alt_start = 0;
1488 + else
1489 + alt_start = 1;
1490 + for (i = alt_start; i < niu_num_alt_addr(np); i++) {
1491 err = niu_enable_alt_mac(np, i, 0);
1492 if (err)
1493 printk(KERN_WARNING PFX "%s: Error %d "
1494 diff --git a/drivers/net/niu.h b/drivers/net/niu.h
1495 index 0e8626a..59dc05f 100644
1496 --- a/drivers/net/niu.h
1497 +++ b/drivers/net/niu.h
1498 @@ -499,7 +499,7 @@
1499 #define BMAC_ADDR2 0x00110UL
1500 #define BMAC_ADDR2_ADDR2 0x000000000000ffffULL
1501
1502 -#define BMAC_NUM_ALT_ADDR 7
1503 +#define BMAC_NUM_ALT_ADDR 6
1504
1505 #define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL)
1506 #define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL
1507 diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
1508 index 559a9a9..ddcc0c4 100644
1509 --- a/drivers/net/wireless/b43/dma.c
1510 +++ b/drivers/net/wireless/b43/dma.c
1511 @@ -165,7 +165,7 @@ static void op64_fill_descriptor(struct b43_dmaring *ring,
1512 addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
1513 addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
1514 >> SSB_DMA_TRANSLATION_SHIFT;
1515 - addrhi |= ssb_dma_translation(ring->dev->dev);
1516 + addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
1517 if (slot == ring->nr_slots - 1)
1518 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
1519 if (start)
1520 @@ -426,9 +426,21 @@ static inline
1521 static int alloc_ringmemory(struct b43_dmaring *ring)
1522 {
1523 struct device *dev = ring->dev->dev->dev;
1524 -
1525 + gfp_t flags = GFP_KERNEL;
1526 +
1527 + /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
1528 + * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
1529 + * has shown that 4K is sufficient for the latter as long as the buffer
1530 + * does not cross an 8K boundary.
1531 + *
1532 + * For unknown reasons - possibly a hardware error - the BCM4311 rev
1533 + * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
1534 + * which accounts for the GFP_DMA flag below.
1535 + */
1536 + if (ring->dma64)
1537 + flags |= GFP_DMA;
1538 ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
1539 - &(ring->dmabase), GFP_KERNEL);
1540 + &(ring->dmabase), flags);
1541 if (!ring->descbase) {
1542 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
1543 return -ENOMEM;
1544 @@ -483,7 +495,7 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
1545 return 0;
1546 }
1547
1548 -/* Reset the RX DMA channel */
1549 +/* Reset the TX DMA channel */
1550 int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
1551 {
1552 int i;
1553 @@ -647,7 +659,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
1554 b43_dma_write(ring, B43_DMA64_TXRINGHI,
1555 ((ringbase >> 32) &
1556 ~SSB_DMA_TRANSLATION_MASK)
1557 - | trans);
1558 + | (trans << 1));
1559 } else {
1560 u32 ringbase = (u32) (ring->dmabase);
1561
1562 @@ -680,8 +692,9 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
1563 b43_dma_write(ring, B43_DMA64_RXRINGHI,
1564 ((ringbase >> 32) &
1565 ~SSB_DMA_TRANSLATION_MASK)
1566 - | trans);
1567 - b43_dma_write(ring, B43_DMA64_RXINDEX, 200);
1568 + | (trans << 1));
1569 + b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
1570 + sizeof(struct b43_dmadesc64));
1571 } else {
1572 u32 ringbase = (u32) (ring->dmabase);
1573
1574 @@ -695,11 +708,12 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
1575 b43_dma_write(ring, B43_DMA32_RXRING,
1576 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
1577 | trans);
1578 - b43_dma_write(ring, B43_DMA32_RXINDEX, 200);
1579 + b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
1580 + sizeof(struct b43_dmadesc32));
1581 }
1582 }
1583
1584 - out:
1585 +out:
1586 return err;
1587 }
1588
1589 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
1590 index 69795fd..36a1de2 100644
1591 --- a/drivers/net/wireless/b43/main.c
1592 +++ b/drivers/net/wireless/b43/main.c
1593 @@ -101,6 +101,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
1594 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 7),
1595 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9),
1596 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
1597 + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
1598 SSB_DEVTABLE_END
1599 };
1600
1601 @@ -3079,7 +3080,7 @@ static int b43_phy_versioning(struct b43_wldev *dev)
1602 unsupported = 1;
1603 break;
1604 case B43_PHYTYPE_G:
1605 - if (phy_rev > 8)
1606 + if (phy_rev > 9)
1607 unsupported = 1;
1608 break;
1609 default:
1610 diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
1611 index 389346c..07c7f31 100644
1612 --- a/drivers/s390/char/defkeymap.c
1613 +++ b/drivers/s390/char/defkeymap.c
1614 @@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = {
1615 };
1616
1617 struct kbdiacruc accent_table[MAX_DIACR] = {
1618 - {'^', 'c', '\003'}, {'^', 'd', '\004'},
1619 - {'^', 'z', '\032'}, {'^', '\012', '\000'},
1620 + {'^', 'c', 0003}, {'^', 'd', 0004},
1621 + {'^', 'z', 0032}, {'^', 0012, 0000},
1622 };
1623
1624 unsigned int accent_table_size = 4;
1625 diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
1626 index 38a1ee2..f40417b 100644
1627 --- a/drivers/scsi/advansys.c
1628 +++ b/drivers/scsi/advansys.c
1629 @@ -566,7 +566,7 @@ typedef struct asc_dvc_var {
1630 ASC_SCSI_BIT_ID_TYPE unit_not_ready;
1631 ASC_SCSI_BIT_ID_TYPE queue_full_or_busy;
1632 ASC_SCSI_BIT_ID_TYPE start_motor;
1633 - uchar overrun_buf[ASC_OVERRUN_BSIZE] __aligned(8);
1634 + uchar *overrun_buf;
1635 dma_addr_t overrun_dma;
1636 uchar scsi_reset_wait;
1637 uchar chip_no;
1638 @@ -6439,7 +6439,7 @@ static int AdvLoadMicrocode(AdvPortAddr iop_base, unsigned char *buf, int size,
1639 i += 2;
1640 len += 2;
1641 } else {
1642 - unsigned char off = buf[i] * 2;
1643 + unsigned int off = buf[i] * 2;
1644 unsigned short word = (buf[off + 1] << 8) | buf[off];
1645 AdvWriteWordAutoIncLram(iop_base, word);
1646 len += 2;
1647 @@ -13833,6 +13833,12 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
1648 */
1649 if (ASC_NARROW_BOARD(boardp)) {
1650 ASC_DBG(2, "AscInitAsc1000Driver()\n");
1651 +
1652 + asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL);
1653 + if (!asc_dvc_varp->overrun_buf) {
1654 + ret = -ENOMEM;
1655 + goto err_free_wide_mem;
1656 + }
1657 warn_code = AscInitAsc1000Driver(asc_dvc_varp);
1658
1659 if (warn_code || asc_dvc_varp->err_code) {
1660 @@ -13840,8 +13846,10 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
1661 "warn 0x%x, error 0x%x\n",
1662 asc_dvc_varp->init_state, warn_code,
1663 asc_dvc_varp->err_code);
1664 - if (asc_dvc_varp->err_code)
1665 + if (asc_dvc_varp->err_code) {
1666 ret = -ENODEV;
1667 + kfree(asc_dvc_varp->overrun_buf);
1668 + }
1669 }
1670 } else {
1671 if (advansys_wide_init_chip(shost))
1672 @@ -13894,6 +13902,7 @@ static int advansys_release(struct Scsi_Host *shost)
1673 dma_unmap_single(board->dev,
1674 board->dvc_var.asc_dvc_var.overrun_dma,
1675 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
1676 + kfree(board->dvc_var.asc_dvc_var.overrun_buf);
1677 } else {
1678 iounmap(board->ioremap_addr);
1679 advansys_wide_free_mem(board);
1680 diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
1681 index db6ab1a..eae2d97 100644
1682 --- a/drivers/scsi/aic94xx/aic94xx_scb.c
1683 +++ b/drivers/scsi/aic94xx/aic94xx_scb.c
1684 @@ -458,13 +458,19 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
1685 tc_abort = le16_to_cpu(tc_abort);
1686
1687 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
1688 - struct sas_task *task = ascb->uldd_task;
1689 + struct sas_task *task = a->uldd_task;
1690 +
1691 + if (a->tc_index != tc_abort)
1692 + continue;
1693
1694 - if (task && a->tc_index == tc_abort) {
1695 + if (task) {
1696 failed_dev = task->dev;
1697 sas_task_abort(task);
1698 - break;
1699 + } else {
1700 + ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n",
1701 + a->scb->header.opcode);
1702 }
1703 + break;
1704 }
1705
1706 if (!failed_dev) {
1707 @@ -478,7 +484,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
1708 * that the EH will wake up and do something.
1709 */
1710 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
1711 - struct sas_task *task = ascb->uldd_task;
1712 + struct sas_task *task = a->uldd_task;
1713
1714 if (task &&
1715 task->dev == failed_dev &&
1716 diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
1717 index d466a2d..dcd6c9a 100644
1718 --- a/drivers/scsi/arcmsr/arcmsr_hba.c
1719 +++ b/drivers/scsi/arcmsr/arcmsr_hba.c
1720 @@ -1380,17 +1380,16 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1721 switch(controlcode) {
1722
1723 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1724 - unsigned long *ver_addr;
1725 - dma_addr_t buf_handle;
1726 + unsigned char *ver_addr;
1727 uint8_t *pQbuffer, *ptmpQbuffer;
1728 int32_t allxfer_len = 0;
1729
1730 - ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
1731 + ver_addr = kmalloc(1032, GFP_ATOMIC);
1732 if (!ver_addr) {
1733 retvalue = ARCMSR_MESSAGE_FAIL;
1734 goto message_out;
1735 }
1736 - ptmpQbuffer = (uint8_t *) ver_addr;
1737 + ptmpQbuffer = ver_addr;
1738 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1739 && (allxfer_len < 1031)) {
1740 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
1741 @@ -1419,25 +1418,24 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1742 }
1743 arcmsr_iop_message_read(acb);
1744 }
1745 - memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
1746 + memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1747 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1748 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1749 - pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
1750 + kfree(ver_addr);
1751 }
1752 break;
1753
1754 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1755 - unsigned long *ver_addr;
1756 - dma_addr_t buf_handle;
1757 + unsigned char *ver_addr;
1758 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1759 uint8_t *pQbuffer, *ptmpuserbuffer;
1760
1761 - ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
1762 + ver_addr = kmalloc(1032, GFP_ATOMIC);
1763 if (!ver_addr) {
1764 retvalue = ARCMSR_MESSAGE_FAIL;
1765 goto message_out;
1766 }
1767 - ptmpuserbuffer = (uint8_t *)ver_addr;
1768 + ptmpuserbuffer = ver_addr;
1769 user_len = pcmdmessagefld->cmdmessage.Length;
1770 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
1771 wqbuf_lastindex = acb->wqbuf_lastindex;
1772 @@ -1483,7 +1481,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1773 retvalue = ARCMSR_MESSAGE_FAIL;
1774 }
1775 }
1776 - pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
1777 + kfree(ver_addr);
1778 }
1779 break;
1780
1781 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
1782 index 8eb78be..b8b67f6 100644
1783 --- a/drivers/scsi/gdth.c
1784 +++ b/drivers/scsi/gdth.c
1785 @@ -160,7 +160,7 @@ static void gdth_readapp_event(gdth_ha_str *ha, unchar application,
1786 static void gdth_clear_events(void);
1787
1788 static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
1789 - char *buffer, ushort count, int to_buffer);
1790 + char *buffer, ushort count);
1791 static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
1792 static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
1793
1794 @@ -183,7 +183,6 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
1795 unsigned int cmd, unsigned long arg);
1796
1797 static void gdth_flush(gdth_ha_str *ha);
1798 -static int gdth_halt(struct notifier_block *nb, ulong event, void *buf);
1799 static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *));
1800 static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
1801 struct gdth_cmndinfo *cmndinfo);
1802 @@ -418,12 +417,6 @@ static inline void gdth_set_sglist(struct scsi_cmnd *cmd,
1803 #include "gdth_proc.h"
1804 #include "gdth_proc.c"
1805
1806 -/* notifier block to get a notify on system shutdown/halt/reboot */
1807 -static struct notifier_block gdth_notifier = {
1808 - gdth_halt, NULL, 0
1809 -};
1810 -static int notifier_disabled = 0;
1811 -
1812 static gdth_ha_str *gdth_find_ha(int hanum)
1813 {
1814 gdth_ha_str *ha;
1815 @@ -446,8 +439,8 @@ static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
1816 for (i=0; i<GDTH_MAXCMDS; ++i) {
1817 if (ha->cmndinfo[i].index == 0) {
1818 priv = &ha->cmndinfo[i];
1819 - priv->index = i+1;
1820 memset(priv, 0, sizeof(*priv));
1821 + priv->index = i+1;
1822 break;
1823 }
1824 }
1825 @@ -494,7 +487,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
1826 gdth_ha_str *ha = shost_priv(sdev->host);
1827 Scsi_Cmnd *scp;
1828 struct gdth_cmndinfo cmndinfo;
1829 - struct scatterlist one_sg;
1830 DECLARE_COMPLETION_ONSTACK(wait);
1831 int rval;
1832
1833 @@ -508,13 +500,10 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
1834 /* use request field to save the ptr. to completion struct. */
1835 scp->request = (struct request *)&wait;
1836 scp->timeout_per_command = timeout*HZ;
1837 - sg_init_one(&one_sg, gdtcmd, sizeof(*gdtcmd));
1838 - gdth_set_sglist(scp, &one_sg);
1839 - gdth_set_sg_count(scp, 1);
1840 - gdth_set_bufflen(scp, sizeof(*gdtcmd));
1841 scp->cmd_len = 12;
1842 memcpy(scp->cmnd, cmnd, 12);
1843 cmndinfo.priority = IOCTL_PRI;
1844 + cmndinfo.internal_cmd_str = gdtcmd;
1845 cmndinfo.internal_command = 1;
1846
1847 TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
1848 @@ -2355,7 +2344,7 @@ static void gdth_next(gdth_ha_str *ha)
1849 * buffers, kmap_atomic() as needed.
1850 */
1851 static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
1852 - char *buffer, ushort count, int to_buffer)
1853 + char *buffer, ushort count)
1854 {
1855 ushort cpcount,i, max_sg = gdth_sg_count(scp);
1856 ushort cpsum,cpnow;
1857 @@ -2381,10 +2370,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
1858 }
1859 local_irq_save(flags);
1860 address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
1861 - if (to_buffer)
1862 - memcpy(buffer, address, cpnow);
1863 - else
1864 - memcpy(address, buffer, cpnow);
1865 + memcpy(address, buffer, cpnow);
1866 flush_dcache_page(sg_page(sl));
1867 kunmap_atomic(address, KM_BIO_SRC_IRQ);
1868 local_irq_restore(flags);
1869 @@ -2438,7 +2424,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
1870 strcpy(inq.vendor,ha->oem_name);
1871 sprintf(inq.product,"Host Drive #%02d",t);
1872 strcpy(inq.revision," ");
1873 - gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data), 0);
1874 + gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
1875 break;
1876
1877 case REQUEST_SENSE:
1878 @@ -2448,7 +2434,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
1879 sd.key = NO_SENSE;
1880 sd.info = 0;
1881 sd.add_length= 0;
1882 - gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data), 0);
1883 + gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
1884 break;
1885
1886 case MODE_SENSE:
1887 @@ -2460,7 +2446,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
1888 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
1889 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
1890 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
1891 - gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data), 0);
1892 + gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
1893 break;
1894
1895 case READ_CAPACITY:
1896 @@ -2470,7 +2456,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
1897 else
1898 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
1899 rdc.block_length = cpu_to_be32(SECTOR_SIZE);
1900 - gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data), 0);
1901 + gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
1902 break;
1903
1904 case SERVICE_ACTION_IN:
1905 @@ -2482,7 +2468,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
1906 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
1907 rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
1908 gdth_copy_internal_data(ha, scp, (char*)&rdc16,
1909 - sizeof(gdth_rdcap16_data), 0);
1910 + sizeof(gdth_rdcap16_data));
1911 } else {
1912 scp->result = DID_ABORT << 16;
1913 }
1914 @@ -2852,6 +2838,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
1915 static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
1916 {
1917 register gdth_cmd_str *cmdp;
1918 + struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
1919 int cmd_index;
1920
1921 cmdp= ha->pccb;
1922 @@ -2860,7 +2847,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
1923 if (ha->type==GDT_EISA && ha->cmd_cnt>0)
1924 return 0;
1925
1926 - gdth_copy_internal_data(ha, scp, (char *)cmdp, sizeof(gdth_cmd_str), 1);
1927 + *cmdp = *cmndinfo->internal_cmd_str;
1928 cmdp->RequestBuffer = scp;
1929
1930 /* search free command index */
1931 @@ -3793,6 +3780,8 @@ static void gdth_timeout(ulong data)
1932 gdth_ha_str *ha;
1933 ulong flags;
1934
1935 + BUG_ON(list_empty(&gdth_instances));
1936 +
1937 ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
1938 spin_lock_irqsave(&ha->smp_lock, flags);
1939
1940 @@ -4668,45 +4657,6 @@ static void gdth_flush(gdth_ha_str *ha)
1941 }
1942 }
1943
1944 -/* shutdown routine */
1945 -static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
1946 -{
1947 - gdth_ha_str *ha;
1948 -#ifndef __alpha__
1949 - gdth_cmd_str gdtcmd;
1950 - char cmnd[MAX_COMMAND_SIZE];
1951 -#endif
1952 -
1953 - if (notifier_disabled)
1954 - return NOTIFY_OK;
1955 -
1956 - TRACE2(("gdth_halt() event %d\n",(int)event));
1957 - if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
1958 - return NOTIFY_DONE;
1959 -
1960 - notifier_disabled = 1;
1961 - printk("GDT-HA: Flushing all host drives .. ");
1962 - list_for_each_entry(ha, &gdth_instances, list) {
1963 - gdth_flush(ha);
1964 -
1965 -#ifndef __alpha__
1966 - /* controller reset */
1967 - memset(cmnd, 0xff, MAX_COMMAND_SIZE);
1968 - gdtcmd.BoardNode = LOCALBOARD;
1969 - gdtcmd.Service = CACHESERVICE;
1970 - gdtcmd.OpCode = GDT_RESET;
1971 - TRACE2(("gdth_halt(): reset controller %d\n", ha->hanum));
1972 - gdth_execute(ha->shost, &gdtcmd, cmnd, 10, NULL);
1973 -#endif
1974 - }
1975 - printk("Done.\n");
1976 -
1977 -#ifdef GDTH_STATISTICS
1978 - del_timer(&gdth_timer);
1979 -#endif
1980 - return NOTIFY_OK;
1981 -}
1982 -
1983 /* configure lun */
1984 static int gdth_slave_configure(struct scsi_device *sdev)
1985 {
1986 @@ -5141,13 +5091,13 @@ static void gdth_remove_one(gdth_ha_str *ha)
1987
1988 scsi_remove_host(shp);
1989
1990 + gdth_flush(ha);
1991 +
1992 if (ha->sdev) {
1993 scsi_free_host_dev(ha->sdev);
1994 ha->sdev = NULL;
1995 }
1996
1997 - gdth_flush(ha);
1998 -
1999 if (shp->irq)
2000 free_irq(shp->irq,ha);
2001
2002 @@ -5173,6 +5123,24 @@ static void gdth_remove_one(gdth_ha_str *ha)
2003 scsi_host_put(shp);
2004 }
2005
2006 +static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
2007 +{
2008 + gdth_ha_str *ha;
2009 +
2010 + TRACE2(("gdth_halt() event %d\n", (int)event));
2011 + if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
2012 + return NOTIFY_DONE;
2013 +
2014 + list_for_each_entry(ha, &gdth_instances, list)
2015 + gdth_flush(ha);
2016 +
2017 + return NOTIFY_OK;
2018 +}
2019 +
2020 +static struct notifier_block gdth_notifier = {
2021 + gdth_halt, NULL, 0
2022 +};
2023 +
2024 static int __init gdth_init(void)
2025 {
2026 if (disable) {
2027 @@ -5235,7 +5203,6 @@ static int __init gdth_init(void)
2028 add_timer(&gdth_timer);
2029 #endif
2030 major = register_chrdev(0,"gdth", &gdth_fops);
2031 - notifier_disabled = 0;
2032 register_reboot_notifier(&gdth_notifier);
2033 gdth_polling = FALSE;
2034 return 0;
2035 @@ -5245,14 +5212,15 @@ static void __exit gdth_exit(void)
2036 {
2037 gdth_ha_str *ha;
2038
2039 - list_for_each_entry(ha, &gdth_instances, list)
2040 - gdth_remove_one(ha);
2041 + unregister_chrdev(major, "gdth");
2042 + unregister_reboot_notifier(&gdth_notifier);
2043
2044 #ifdef GDTH_STATISTICS
2045 - del_timer(&gdth_timer);
2046 + del_timer_sync(&gdth_timer);
2047 #endif
2048 - unregister_chrdev(major,"gdth");
2049 - unregister_reboot_notifier(&gdth_notifier);
2050 +
2051 + list_for_each_entry(ha, &gdth_instances, list)
2052 + gdth_remove_one(ha);
2053 }
2054
2055 module_init(gdth_init);
2056 diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
2057 index 1434c6b..26e4e92 100644
2058 --- a/drivers/scsi/gdth.h
2059 +++ b/drivers/scsi/gdth.h
2060 @@ -915,6 +915,7 @@ typedef struct {
2061 struct gdth_cmndinfo { /* per-command private info */
2062 int index;
2063 int internal_command; /* don't call scsi_done */
2064 + gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
2065 dma_addr_t sense_paddr; /* sense dma-addr */
2066 unchar priority;
2067 int timeout;
2068 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
2069 index de57734..ce0228e 100644
2070 --- a/drivers/scsi/gdth_proc.c
2071 +++ b/drivers/scsi/gdth_proc.c
2072 @@ -694,15 +694,13 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
2073 {
2074 ulong flags;
2075
2076 - spin_lock_irqsave(&ha->smp_lock, flags);
2077 -
2078 if (buf == ha->pscratch) {
2079 + spin_lock_irqsave(&ha->smp_lock, flags);
2080 ha->scratch_busy = FALSE;
2081 + spin_unlock_irqrestore(&ha->smp_lock, flags);
2082 } else {
2083 pci_free_consistent(ha->pdev, size, buf, paddr);
2084 }
2085 -
2086 - spin_unlock_irqrestore(&ha->smp_lock, flags);
2087 }
2088
2089 #ifdef GDTH_IOCTL_PROC
2090 diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
2091 index 5c5a9b2..f4e9c8d 100644
2092 --- a/drivers/scsi/ips.c
2093 +++ b/drivers/scsi/ips.c
2094 @@ -1580,7 +1580,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
2095 METHOD_TRACE("ips_make_passthru", 1);
2096
2097 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
2098 - length += sg[i].length;
2099 + length += sg->length;
2100
2101 if (length < sizeof (ips_passthru_t)) {
2102 /* wrong size */
2103 @@ -6842,13 +6842,10 @@ ips_register_scsi(int index)
2104 if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
2105 IPS_PRINTK(KERN_WARNING, ha->pcidev,
2106 "Unable to install interrupt handler\n");
2107 - scsi_host_put(sh);
2108 - return -1;
2109 + goto err_out_sh;
2110 }
2111
2112 kfree(oldha);
2113 - ips_sh[index] = sh;
2114 - ips_ha[index] = ha;
2115
2116 /* Store away needed values for later use */
2117 sh->io_port = ha->io_addr;
2118 @@ -6867,10 +6864,21 @@ ips_register_scsi(int index)
2119 sh->max_channel = ha->nbus - 1;
2120 sh->can_queue = ha->max_cmds - 1;
2121
2122 - scsi_add_host(sh, NULL);
2123 + if (scsi_add_host(sh, &ha->pcidev->dev))
2124 + goto err_out;
2125 +
2126 + ips_sh[index] = sh;
2127 + ips_ha[index] = ha;
2128 +
2129 scsi_scan_host(sh);
2130
2131 return 0;
2132 +
2133 +err_out:
2134 + free_irq(ha->pcidev->irq, ha);
2135 +err_out_sh:
2136 + scsi_host_put(sh);
2137 + return -1;
2138 }
2139
2140 /*---------------------------------------------------------------------------*/
2141 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2142 index a9ac5b1..273728e 100644
2143 --- a/drivers/scsi/scsi_lib.c
2144 +++ b/drivers/scsi/scsi_lib.c
2145 @@ -298,7 +298,6 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
2146 page = sg_page(sg);
2147 off = sg->offset;
2148 len = sg->length;
2149 - data_len += len;
2150
2151 while (len > 0 && data_len > 0) {
2152 /*
2153 diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
2154 index ff10808..e9d7959 100644
2155 --- a/drivers/spi/atmel_spi.c
2156 +++ b/drivers/spi/atmel_spi.c
2157 @@ -85,6 +85,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
2158 unsigned gpio = (unsigned) spi->controller_data;
2159 unsigned active = spi->mode & SPI_CS_HIGH;
2160 u32 mr;
2161 + int i;
2162 + u32 csr;
2163 + u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
2164 +
2165 + /* Make sure clock polarity is correct */
2166 + for (i = 0; i < spi->master->num_chipselect; i++) {
2167 + csr = spi_readl(as, CSR0 + 4 * i);
2168 + if ((csr ^ cpol) & SPI_BIT(CPOL))
2169 + spi_writel(as, CSR0 + 4 * i, csr ^ SPI_BIT(CPOL));
2170 + }
2171
2172 mr = spi_readl(as, MR);
2173 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
2174 diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
2175 index 1c2ab54..840e682 100644
2176 --- a/drivers/spi/pxa2xx_spi.c
2177 +++ b/drivers/spi/pxa2xx_spi.c
2178 @@ -48,13 +48,19 @@ MODULE_LICENSE("GPL");
2179 #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
2180 #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
2181
2182 -/* for testing SSCR1 changes that require SSP restart, basically
2183 - * everything except the service and interrupt enables */
2184 -#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_EBCEI | SSCR1_SCFR \
2185 +/*
2186 + * for testing SSCR1 changes that require SSP restart, basically
2187 + * everything except the service and interrupt enables, the pxa270 developer
2188 + * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
2189 + * list, but the PXA255 dev man says all bits without really meaning the
2190 + * service and interrupt enables
2191 + */
2192 +#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
2193 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
2194 - | SSCR1_RWOT | SSCR1_TRAIL | SSCR1_PINTE \
2195 - | SSCR1_STRF | SSCR1_EFWR |SSCR1_RFT \
2196 - | SSCR1_TFT | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
2197 + | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
2198 + | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
2199 + | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
2200 + | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
2201
2202 #define DEFINE_SSP_REG(reg, off) \
2203 static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
2204 @@ -961,9 +967,6 @@ static void pump_transfers(unsigned long data)
2205 if (drv_data->ssp_type == PXA25x_SSP)
2206 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
2207
2208 - /* Fix me, need to handle cs polarity */
2209 - drv_data->cs_control(PXA2XX_CS_ASSERT);
2210 -
2211 /* Clear status and start DMA engine */
2212 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
2213 write_SSSR(drv_data->clear_sr, reg);
2214 @@ -973,9 +976,6 @@ static void pump_transfers(unsigned long data)
2215 /* Ensure we have the correct interrupt handler */
2216 drv_data->transfer_handler = interrupt_transfer;
2217
2218 - /* Fix me, need to handle cs polarity */
2219 - drv_data->cs_control(PXA2XX_CS_ASSERT);
2220 -
2221 /* Clear status */
2222 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
2223 write_SSSR(drv_data->clear_sr, reg);
2224 @@ -986,16 +986,29 @@ static void pump_transfers(unsigned long data)
2225 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
2226 (cr1 & SSCR1_CHANGE_MASK)) {
2227
2228 + /* stop the SSP, and update the other bits */
2229 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
2230 if (drv_data->ssp_type != PXA25x_SSP)
2231 write_SSTO(chip->timeout, reg);
2232 - write_SSCR1(cr1, reg);
2233 + /* first set CR1 without interrupt and service enables */
2234 + write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
2235 + /* restart the SSP */
2236 write_SSCR0(cr0, reg);
2237 +
2238 } else {
2239 if (drv_data->ssp_type != PXA25x_SSP)
2240 write_SSTO(chip->timeout, reg);
2241 - write_SSCR1(cr1, reg);
2242 }
2243 +
2244 + /* FIXME, need to handle cs polarity,
2245 + * this driver uses struct pxa2xx_spi_chip.cs_control to
2246 + * specify a CS handling function, and it ignores most
2247 + * struct spi_device.mode[s], including SPI_CS_HIGH */
2248 + drv_data->cs_control(PXA2XX_CS_ASSERT);
2249 +
2250 + /* after chip select, release the data by enabling service
2251 + * requests and interrupts, without changing any mode bits */
2252 + write_SSCR1(cr1, reg);
2253 }
2254
2255 static void pump_messages(struct work_struct *work)
2256 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
2257 index b10f39c..d1df9e9 100644
2258 --- a/drivers/usb/host/ehci-q.c
2259 +++ b/drivers/usb/host/ehci-q.c
2260 @@ -315,10 +315,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
2261 if (likely (last->urb != urb)) {
2262 ehci_urb_done(ehci, last->urb, last_status);
2263 count++;
2264 + last_status = -EINPROGRESS;
2265 }
2266 ehci_qtd_free (ehci, last);
2267 last = NULL;
2268 - last_status = -EINPROGRESS;
2269 }
2270
2271 /* ignore urbs submitted during completions we reported */
2272 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2273 index 1382af9..c04beac 100644
2274 --- a/drivers/usb/serial/ftdi_sio.c
2275 +++ b/drivers/usb/serial/ftdi_sio.c
2276 @@ -310,6 +310,7 @@ struct ftdi_sio_quirk {
2277 };
2278
2279 static int ftdi_olimex_probe (struct usb_serial *serial);
2280 +static int ftdi_mtxorb_hack_setup (struct usb_serial *serial);
2281 static void ftdi_USB_UIRT_setup (struct ftdi_private *priv);
2282 static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv);
2283
2284 @@ -317,6 +318,10 @@ static struct ftdi_sio_quirk ftdi_olimex_quirk = {
2285 .probe = ftdi_olimex_probe,
2286 };
2287
2288 +static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = {
2289 + .probe = ftdi_mtxorb_hack_setup,
2290 +};
2291 +
2292 static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
2293 .port_probe = ftdi_USB_UIRT_setup,
2294 };
2295 @@ -379,6 +384,8 @@ static struct usb_device_id id_table_combined [] = {
2296 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) },
2297 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
2298 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
2299 + { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID),
2300 + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
2301 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
2302 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
2303 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
2304 @@ -492,6 +499,7 @@ static struct usb_device_id id_table_combined [] = {
2305 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
2306 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
2307 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
2308 + { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
2309 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
2310 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
2311 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
2312 @@ -1301,6 +1309,23 @@ static int ftdi_olimex_probe(struct usb_serial *serial)
2313 return 0;
2314 }
2315
2316 +/*
2317 + * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
2318 + * We have to correct it if we want to read from it.
2319 + */
2320 +static int ftdi_mtxorb_hack_setup(struct usb_serial *serial)
2321 +{
2322 + struct usb_host_endpoint *ep = serial->dev->ep_in[1];
2323 + struct usb_endpoint_descriptor *ep_desc = &ep->desc;
2324 +
2325 + if (ep->enabled && ep_desc->wMaxPacketSize == 0) {
2326 + ep_desc->wMaxPacketSize = 0x40;
2327 + info("Fixing invalid wMaxPacketSize on read pipe");
2328 + }
2329 +
2330 + return 0;
2331 +}
2332 +
2333 /* ftdi_shutdown is called from usbserial:usb_serial_disconnect
2334 * it is called when the usb device is disconnected
2335 *
2336 diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
2337 index f6053da..893b429 100644
2338 --- a/drivers/usb/serial/ftdi_sio.h
2339 +++ b/drivers/usb/serial/ftdi_sio.h
2340 @@ -98,6 +98,13 @@
2341 #define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */
2342 #define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */
2343
2344 +/*
2345 + * The following are the values for the Matrix Orbital VK204-25-USB
2346 + * display, which use the FT232RL.
2347 + */
2348 +#define MTXORB_VK_VID 0x1b3d
2349 +#define MTXORB_VK_PID 0x0158
2350 +
2351 /* Interbiometrics USB I/O Board */
2352 /* Developed for Interbiometrics by Rudolf Gugler */
2353 #define INTERBIOMETRICS_VID 0x1209
2354 diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
2355 index 889622b..45262f3 100644
2356 --- a/drivers/usb/storage/protocol.c
2357 +++ b/drivers/usb/storage/protocol.c
2358 @@ -194,7 +194,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
2359 * and the starting offset within the page, and update
2360 * the *offset and *index values for the next loop. */
2361 cnt = 0;
2362 - while (cnt < buflen) {
2363 + while (cnt < buflen && sg) {
2364 struct page *page = sg_page(sg) +
2365 ((sg->offset + *offset) >> PAGE_SHIFT);
2366 unsigned int poff =
2367 @@ -249,7 +249,8 @@ void usb_stor_set_xfer_buf(unsigned char *buffer,
2368 unsigned int offset = 0;
2369 struct scatterlist *sg = NULL;
2370
2371 - usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
2372 + buflen = min(buflen, srb->request_bufflen);
2373 + buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
2374 TO_XFER_BUF);
2375 if (buflen < srb->request_bufflen)
2376 srb->resid = srb->request_bufflen - buflen;
2377 diff --git a/fs/aio.c b/fs/aio.c
2378 index 9dec7d2..758f911 100644
2379 --- a/fs/aio.c
2380 +++ b/fs/aio.c
2381 @@ -997,6 +997,14 @@ put_rq:
2382 /* everything turned out well, dispose of the aiocb. */
2383 ret = __aio_put_req(ctx, iocb);
2384
2385 + /*
2386 + * We have to order our ring_info tail store above and test
2387 + * of the wait list below outside the wait lock. This is
2388 + * like in wake_up_bit() where clearing a bit has to be
2389 + * ordered with the unlocked test.
2390 + */
2391 + smp_mb();
2392 +
2393 if (waitqueue_active(&ctx->wait))
2394 wake_up(&ctx->wait);
2395
2396 diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
2397 index 32c5711..a985a8f 100644
2398 --- a/fs/ecryptfs/mmap.c
2399 +++ b/fs/ecryptfs/mmap.c
2400 @@ -263,52 +263,102 @@ out:
2401 return 0;
2402 }
2403
2404 -/* This function must zero any hole we create */
2405 +/**
2406 + * ecryptfs_prepare_write
2407 + * @file: The eCryptfs file
2408 + * @page: The eCryptfs page
2409 + * @from: The start byte from which we will write
2410 + * @to: The end byte to which we will write
2411 + *
2412 + * This function must zero any hole we create
2413 + *
2414 + * Returns zero on success; non-zero otherwise
2415 + */
2416 static int ecryptfs_prepare_write(struct file *file, struct page *page,
2417 unsigned from, unsigned to)
2418 {
2419 - int rc = 0;
2420 loff_t prev_page_end_size;
2421 + int rc = 0;
2422
2423 if (!PageUptodate(page)) {
2424 - rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
2425 - PAGE_CACHE_SIZE,
2426 - page->mapping->host);
2427 - if (rc) {
2428 - printk(KERN_ERR "%s: Error attemping to read lower "
2429 - "page segment; rc = [%d]\n", __FUNCTION__, rc);
2430 - ClearPageUptodate(page);
2431 - goto out;
2432 - } else
2433 + struct ecryptfs_crypt_stat *crypt_stat =
2434 + &ecryptfs_inode_to_private(
2435 + file->f_path.dentry->d_inode)->crypt_stat;
2436 +
2437 + if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
2438 + || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
2439 + rc = ecryptfs_read_lower_page_segment(
2440 + page, page->index, 0, PAGE_CACHE_SIZE,
2441 + page->mapping->host);
2442 + if (rc) {
2443 + printk(KERN_ERR "%s: Error attemping to read "
2444 + "lower page segment; rc = [%d]\n",
2445 + __FUNCTION__, rc);
2446 + ClearPageUptodate(page);
2447 + goto out;
2448 + } else
2449 + SetPageUptodate(page);
2450 + } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
2451 + if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
2452 + rc = ecryptfs_copy_up_encrypted_with_header(
2453 + page, crypt_stat);
2454 + if (rc) {
2455 + printk(KERN_ERR "%s: Error attempting "
2456 + "to copy the encrypted content "
2457 + "from the lower file whilst "
2458 + "inserting the metadata from "
2459 + "the xattr into the header; rc "
2460 + "= [%d]\n", __FUNCTION__, rc);
2461 + ClearPageUptodate(page);
2462 + goto out;
2463 + }
2464 + SetPageUptodate(page);
2465 + } else {
2466 + rc = ecryptfs_read_lower_page_segment(
2467 + page, page->index, 0, PAGE_CACHE_SIZE,
2468 + page->mapping->host);
2469 + if (rc) {
2470 + printk(KERN_ERR "%s: Error reading "
2471 + "page; rc = [%d]\n",
2472 + __FUNCTION__, rc);
2473 + ClearPageUptodate(page);
2474 + goto out;
2475 + }
2476 + SetPageUptodate(page);
2477 + }
2478 + } else {
2479 + rc = ecryptfs_decrypt_page(page);
2480 + if (rc) {
2481 + printk(KERN_ERR "%s: Error decrypting page "
2482 + "at index [%ld]; rc = [%d]\n",
2483 + __FUNCTION__, page->index, rc);
2484 + ClearPageUptodate(page);
2485 + goto out;
2486 + }
2487 SetPageUptodate(page);
2488 + }
2489 }
2490 -
2491 prev_page_end_size = ((loff_t)page->index << PAGE_CACHE_SHIFT);
2492 -
2493 - /*
2494 - * If creating a page or more of holes, zero them out via truncate.
2495 - * Note, this will increase i_size.
2496 - */
2497 + /* If creating a page or more of holes, zero them out via truncate.
2498 + * Note, this will increase i_size. */
2499 if (page->index != 0) {
2500 if (prev_page_end_size > i_size_read(page->mapping->host)) {
2501 rc = ecryptfs_truncate(file->f_path.dentry,
2502 prev_page_end_size);
2503 if (rc) {
2504 - printk(KERN_ERR "Error on attempt to "
2505 + printk(KERN_ERR "%s: Error on attempt to "
2506 "truncate to (higher) offset [%lld];"
2507 - " rc = [%d]\n", prev_page_end_size, rc);
2508 + " rc = [%d]\n", __FUNCTION__,
2509 + prev_page_end_size, rc);
2510 goto out;
2511 }
2512 }
2513 }
2514 - /*
2515 - * Writing to a new page, and creating a small hole from start of page?
2516 - * Zero it out.
2517 - */
2518 - if ((i_size_read(page->mapping->host) == prev_page_end_size) &&
2519 - (from != 0)) {
2520 + /* Writing to a new page, and creating a small hole from start
2521 + * of page? Zero it out. */
2522 + if ((i_size_read(page->mapping->host) == prev_page_end_size)
2523 + && (from != 0))
2524 zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
2525 - }
2526 out:
2527 return rc;
2528 }
2529 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
2530 index 80d2f52..dcac591 100644
2531 --- a/fs/fuse/dir.c
2532 +++ b/fs/fuse/dir.c
2533 @@ -905,7 +905,7 @@ static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
2534 }
2535
2536 if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
2537 - int err = generic_permission(inode, mask, NULL);
2538 + err = generic_permission(inode, mask, NULL);
2539
2540 /* If permission is denied, try to refresh file
2541 attributes. This is also needed, because the root
2542 diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
2543 index 37dbd64..defb932 100644
2544 --- a/fs/isofs/compress.c
2545 +++ b/fs/isofs/compress.c
2546 @@ -72,6 +72,17 @@ static int zisofs_readpage(struct file *file, struct page *page)
2547 offset = index & ~zisofs_block_page_mask;
2548 blockindex = offset >> zisofs_block_page_shift;
2549 maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2550 +
2551 + /*
2552 + * If this page is wholly outside i_size we just return zero;
2553 + * do_generic_file_read() will handle this for us
2554 + */
2555 + if (page->index >= maxpage) {
2556 + SetPageUptodate(page);
2557 + unlock_page(page);
2558 + return 0;
2559 + }
2560 +
2561 maxpage = min(zisofs_block_pages, maxpage-offset);
2562
2563 for ( i = 0 ; i < maxpage ; i++, offset++ ) {
2564 diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
2565 index c5d9694..9aaa4fa 100644
2566 --- a/fs/jbd/recovery.c
2567 +++ b/fs/jbd/recovery.c
2568 @@ -478,7 +478,7 @@ static int do_one_pass(journal_t *journal,
2569 memcpy(nbh->b_data, obh->b_data,
2570 journal->j_blocksize);
2571 if (flags & JFS_FLAG_ESCAPE) {
2572 - *((__be32 *)bh->b_data) =
2573 + *((__be32 *)nbh->b_data) =
2574 cpu_to_be32(JFS_MAGIC_NUMBER);
2575 }
2576
2577 diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
2578 index d0ce627..fa0d4e9 100644
2579 --- a/fs/jbd2/recovery.c
2580 +++ b/fs/jbd2/recovery.c
2581 @@ -488,7 +488,7 @@ static int do_one_pass(journal_t *journal,
2582 memcpy(nbh->b_data, obh->b_data,
2583 journal->j_blocksize);
2584 if (flags & JBD2_FLAG_ESCAPE) {
2585 - *((__be32 *)bh->b_data) =
2586 + *((__be32 *)nbh->b_data) =
2587 cpu_to_be32(JBD2_MAGIC_NUMBER);
2588 }
2589
2590 diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
2591 index 468f17a..429cec2 100644
2592 --- a/fs/nfsd/nfsfh.c
2593 +++ b/fs/nfsd/nfsfh.c
2594 @@ -231,6 +231,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
2595 fhp->fh_dentry = dentry;
2596 fhp->fh_export = exp;
2597 nfsd_nr_verified++;
2598 + cache_get(&exp->h);
2599 } else {
2600 /*
2601 * just rechecking permissions
2602 @@ -240,6 +241,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
2603 dprintk("nfsd: fh_verify - just checking\n");
2604 dentry = fhp->fh_dentry;
2605 exp = fhp->fh_export;
2606 + cache_get(&exp->h);
2607 /*
2608 * Set user creds for this exportpoint; necessary even
2609 * in the "just checking" case because this may be a
2610 @@ -251,8 +253,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
2611 if (error)
2612 goto out;
2613 }
2614 - cache_get(&exp->h);
2615 -
2616
2617 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
2618 if (error)
2619 diff --git a/fs/ufs/util.h b/fs/ufs/util.h
2620 index b26fc4d..23ceed8 100644
2621 --- a/fs/ufs/util.h
2622 +++ b/fs/ufs/util.h
2623 @@ -58,7 +58,7 @@ ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
2624 {
2625 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
2626 case UFS_ST_SUNOS:
2627 - if (fs32_to_cpu(sb, usb3->fs_postblformat == UFS_42POSTBLFMT)) {
2628 + if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) {
2629 usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value);
2630 break;
2631 }
2632 diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h
2633 index 1bd398d..c12c294 100644
2634 --- a/include/asm-arm/arch-pxa/pxa-regs.h
2635 +++ b/include/asm-arm/arch-pxa/pxa-regs.h
2636 @@ -1669,6 +1669,7 @@
2637 #define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
2638 #define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
2639 #define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interupt Enable */
2640 +#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */
2641 #define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
2642 #define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
2643
2644 diff --git a/include/asm-x86/apic_32.h b/include/asm-x86/apic_32.h
2645 index be158b2..04fbe7f 100644
2646 --- a/include/asm-x86/apic_32.h
2647 +++ b/include/asm-x86/apic_32.h
2648 @@ -109,7 +109,7 @@ extern void setup_boot_APIC_clock (void);
2649 extern void setup_secondary_APIC_clock (void);
2650 extern int APIC_init_uniprocessor (void);
2651
2652 -extern void enable_NMI_through_LVT0 (void * dummy);
2653 +extern void enable_NMI_through_LVT0(void);
2654
2655 #define ARCH_APICTIMER_STOPS_ON_C3 1
2656
2657 diff --git a/include/asm-x86/futex_32.h b/include/asm-x86/futex_32.h
2658 index 438ef0e..80964fd 100644
2659 --- a/include/asm-x86/futex_32.h
2660 +++ b/include/asm-x86/futex_32.h
2661 @@ -28,7 +28,7 @@
2662 "1: movl %2, %0\n\
2663 movl %0, %3\n" \
2664 insn "\n" \
2665 -"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
2666 +"2: lock ; cmpxchgl %3, %2\n\
2667 jnz 1b\n\
2668 3: .section .fixup,\"ax\"\n\
2669 4: mov %5, %1\n\
2670 @@ -68,7 +68,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
2671 #endif
2672 switch (op) {
2673 case FUTEX_OP_ADD:
2674 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
2675 + __futex_atomic_op1("lock ; xaddl %0, %2", ret,
2676 oldval, uaddr, oparg);
2677 break;
2678 case FUTEX_OP_OR:
2679 @@ -111,7 +111,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
2680 return -EFAULT;
2681
2682 __asm__ __volatile__(
2683 - "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
2684 + "1: lock ; cmpxchgl %3, %1 \n"
2685
2686 "2: .section .fixup, \"ax\" \n"
2687 "3: mov %2, %0 \n"
2688 diff --git a/include/asm-x86/futex_64.h b/include/asm-x86/futex_64.h
2689 index 5cdfb08..423c051 100644
2690 --- a/include/asm-x86/futex_64.h
2691 +++ b/include/asm-x86/futex_64.h
2692 @@ -27,7 +27,7 @@
2693 "1: movl %2, %0\n\
2694 movl %0, %3\n" \
2695 insn "\n" \
2696 -"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
2697 +"2: lock ; cmpxchgl %3, %2\n\
2698 jnz 1b\n\
2699 3: .section .fixup,\"ax\"\n\
2700 4: mov %5, %1\n\
2701 @@ -62,7 +62,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
2702 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
2703 break;
2704 case FUTEX_OP_ADD:
2705 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
2706 + __futex_atomic_op1("lock ; xaddl %0, %2", ret, oldval,
2707 uaddr, oparg);
2708 break;
2709 case FUTEX_OP_OR:
2710 @@ -101,7 +101,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
2711 return -EFAULT;
2712
2713 __asm__ __volatile__(
2714 - "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
2715 + "1: lock ; cmpxchgl %3, %1 \n"
2716
2717 "2: .section .fixup, \"ax\" \n"
2718 "3: mov %2, %0 \n"
2719 diff --git a/include/asm-x86/io_apic_64.h b/include/asm-x86/io_apic_64.h
2720 index e2c1367..1913ad0 100644
2721 --- a/include/asm-x86/io_apic_64.h
2722 +++ b/include/asm-x86/io_apic_64.h
2723 @@ -129,7 +129,7 @@ extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int);
2724
2725 extern int sis_apic_bug; /* dummy */
2726
2727 -void enable_NMI_through_LVT0 (void * dummy);
2728 +void enable_NMI_through_LVT0(void);
2729
2730 extern spinlock_t i8259A_lock;
2731
2732 diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
2733 index 13976b0..787ee2e 100644
2734 --- a/include/asm-x86/processor_32.h
2735 +++ b/include/asm-x86/processor_32.h
2736 @@ -712,9 +712,10 @@ static inline unsigned int cpuid_edx(unsigned int op)
2737 #define ASM_NOP6 K7_NOP6
2738 #define ASM_NOP7 K7_NOP7
2739 #define ASM_NOP8 K7_NOP8
2740 -#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
2741 +#elif (defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
2742 defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
2743 - defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
2744 + defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)) && \
2745 + !defined(CONFIG_X86_GENERIC)
2746 #define ASM_NOP1 P6_NOP1
2747 #define ASM_NOP2 P6_NOP2
2748 #define ASM_NOP3 P6_NOP3
2749 diff --git a/include/linux/Kbuild b/include/linux/Kbuild
2750 index 4b32bb1..f30fa92 100644
2751 --- a/include/linux/Kbuild
2752 +++ b/include/linux/Kbuild
2753 @@ -217,7 +217,6 @@ unifdef-y += i2o-dev.h
2754 unifdef-y += icmp.h
2755 unifdef-y += icmpv6.h
2756 unifdef-y += if_addr.h
2757 -unifdef-y += if_addrlabel.h
2758 unifdef-y += if_arp.h
2759 unifdef-y += if_bridge.h
2760 unifdef-y += if_ec.h
2761 diff --git a/include/linux/futex.h b/include/linux/futex.h
2762 index 92d420f..e5f3b84 100644
2763 --- a/include/linux/futex.h
2764 +++ b/include/linux/futex.h
2765 @@ -153,6 +153,7 @@ union futex_key {
2766 #ifdef CONFIG_FUTEX
2767 extern void exit_robust_list(struct task_struct *curr);
2768 extern void exit_pi_state_list(struct task_struct *curr);
2769 +extern int futex_cmpxchg_enabled;
2770 #else
2771 static inline void exit_robust_list(struct task_struct *curr)
2772 {
2773 diff --git a/include/linux/irq.h b/include/linux/irq.h
2774 index 4669be0..1fc1cb8 100644
2775 --- a/include/linux/irq.h
2776 +++ b/include/linux/irq.h
2777 @@ -367,6 +367,9 @@ set_irq_chained_handler(unsigned int irq,
2778 __set_irq_handler(irq, handle, 1, NULL);
2779 }
2780
2781 +extern void set_irq_noprobe(unsigned int irq);
2782 +extern void set_irq_probe(unsigned int irq);
2783 +
2784 /* Handle dynamic irq creation and destruction */
2785 extern int create_irq(void);
2786 extern void destroy_irq(unsigned int irq);
2787 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
2788 index 13410b2..c1d64c2 100644
2789 --- a/include/linux/moduleparam.h
2790 +++ b/include/linux/moduleparam.h
2791 @@ -62,6 +62,16 @@ struct kparam_array
2792 void *elem;
2793 };
2794
2795 +/* On alpha, ia64 and ppc64 relocations to global data cannot go into
2796 + read-only sections (which is part of respective UNIX ABI on these
2797 + platforms). So 'const' makes no sense and even causes compile failures
2798 + with some compilers. */
2799 +#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
2800 +#define __moduleparam_const
2801 +#else
2802 +#define __moduleparam_const const
2803 +#endif
2804 +
2805 /* This is the fundamental function for registering boot/module
2806 parameters. perm sets the visibility in sysfs: 000 means it's
2807 not there, read bits mean it's readable, write bits mean it's
2808 @@ -71,7 +81,7 @@ struct kparam_array
2809 static int __param_perm_check_##name __attribute__((unused)) = \
2810 BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \
2811 static const char __param_str_##name[] = prefix #name; \
2812 - static struct kernel_param const __param_##name \
2813 + static struct kernel_param __moduleparam_const __param_##name \
2814 __attribute_used__ \
2815 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
2816 = { __param_str_##name, perm, set, get, { arg } }
2817 diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
2818 index 70013c5..89cd011 100644
2819 --- a/include/net/inet_sock.h
2820 +++ b/include/net/inet_sock.h
2821 @@ -175,7 +175,8 @@ extern void build_ehash_secret(void);
2822 static inline unsigned int inet_ehashfn(const __be32 laddr, const __u16 lport,
2823 const __be32 faddr, const __be16 fport)
2824 {
2825 - return jhash_2words((__force __u32) laddr ^ (__force __u32) faddr,
2826 + return jhash_3words((__force __u32) laddr,
2827 + (__force __u32) faddr,
2828 ((__u32) lport) << 16 | (__force __u32)fport,
2829 inet_ehash_secret);
2830 }
2831 diff --git a/kernel/futex.c b/kernel/futex.c
2832 index 55d78b5..d166080 100644
2833 --- a/kernel/futex.c
2834 +++ b/kernel/futex.c
2835 @@ -60,6 +60,8 @@
2836
2837 #include "rtmutex_common.h"
2838
2839 +int __read_mostly futex_cmpxchg_enabled;
2840 +
2841 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
2842
2843 /*
2844 @@ -466,6 +468,8 @@ void exit_pi_state_list(struct task_struct *curr)
2845 struct futex_hash_bucket *hb;
2846 union futex_key key;
2847
2848 + if (!futex_cmpxchg_enabled)
2849 + return;
2850 /*
2851 * We are a ZOMBIE and nobody can enqueue itself on
2852 * pi_state_list anymore, but we have to be careful
2853 @@ -1854,6 +1858,8 @@ asmlinkage long
2854 sys_set_robust_list(struct robust_list_head __user *head,
2855 size_t len)
2856 {
2857 + if (!futex_cmpxchg_enabled)
2858 + return -ENOSYS;
2859 /*
2860 * The kernel knows only one size for now:
2861 */
2862 @@ -1878,6 +1884,9 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
2863 struct robust_list_head __user *head;
2864 unsigned long ret;
2865
2866 + if (!futex_cmpxchg_enabled)
2867 + return -ENOSYS;
2868 +
2869 if (!pid)
2870 head = current->robust_list;
2871 else {
2872 @@ -1980,6 +1989,9 @@ void exit_robust_list(struct task_struct *curr)
2873 unsigned long futex_offset;
2874 int rc;
2875
2876 + if (!futex_cmpxchg_enabled)
2877 + return;
2878 +
2879 /*
2880 * Fetch the list head (which was registered earlier, via
2881 * sys_set_robust_list()):
2882 @@ -2034,7 +2046,7 @@ void exit_robust_list(struct task_struct *curr)
2883 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2884 u32 __user *uaddr2, u32 val2, u32 val3)
2885 {
2886 - int ret;
2887 + int ret = -ENOSYS;
2888 int cmd = op & FUTEX_CMD_MASK;
2889 struct rw_semaphore *fshared = NULL;
2890
2891 @@ -2062,13 +2074,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2892 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2893 break;
2894 case FUTEX_LOCK_PI:
2895 - ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2896 + if (futex_cmpxchg_enabled)
2897 + ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2898 break;
2899 case FUTEX_UNLOCK_PI:
2900 - ret = futex_unlock_pi(uaddr, fshared);
2901 + if (futex_cmpxchg_enabled)
2902 + ret = futex_unlock_pi(uaddr, fshared);
2903 break;
2904 case FUTEX_TRYLOCK_PI:
2905 - ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2906 + if (futex_cmpxchg_enabled)
2907 + ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2908 break;
2909 default:
2910 ret = -ENOSYS;
2911 @@ -2123,8 +2138,29 @@ static struct file_system_type futex_fs_type = {
2912
2913 static int __init init(void)
2914 {
2915 - int i = register_filesystem(&futex_fs_type);
2916 + u32 curval;
2917 + int i;
2918 +
2919 + /*
2920 + * This will fail and we want it. Some arch implementations do
2921 + * runtime detection of the futex_atomic_cmpxchg_inatomic()
2922 + * functionality. We want to know that before we call in any
2923 + * of the complex code paths. Also we want to prevent
2924 + * registration of robust lists in that case. NULL is
2925 + * guaranteed to fault and we get -EFAULT on functional
2926 + * implementation, the non functional ones will return
2927 + * -ENOSYS.
2928 + */
2929 + curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2930 + if (curval == -EFAULT)
2931 + futex_cmpxchg_enabled = 1;
2932
2933 + for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2934 + plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2935 + spin_lock_init(&futex_queues[i].lock);
2936 + }
2937 +
2938 + i = register_filesystem(&futex_fs_type);
2939 if (i)
2940 return i;
2941
2942 @@ -2134,10 +2170,6 @@ static int __init init(void)
2943 return PTR_ERR(futex_mnt);
2944 }
2945
2946 - for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2947 - plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2948 - spin_lock_init(&futex_queues[i].lock);
2949 - }
2950 return 0;
2951 }
2952 __initcall(init);
2953 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
2954 index 8682c79..d95f79b 100644
2955 --- a/kernel/futex_compat.c
2956 +++ b/kernel/futex_compat.c
2957 @@ -54,6 +54,9 @@ void compat_exit_robust_list(struct task_struct *curr)
2958 compat_long_t futex_offset;
2959 int rc;
2960
2961 + if (!futex_cmpxchg_enabled)
2962 + return;
2963 +
2964 /*
2965 * Fetch the list head (which was registered earlier, via
2966 * sys_set_robust_list()):
2967 @@ -115,6 +118,9 @@ asmlinkage long
2968 compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
2969 compat_size_t len)
2970 {
2971 + if (!futex_cmpxchg_enabled)
2972 + return -ENOSYS;
2973 +
2974 if (unlikely(len != sizeof(*head)))
2975 return -EINVAL;
2976
2977 @@ -130,6 +136,9 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
2978 struct compat_robust_list_head __user *head;
2979 unsigned long ret;
2980
2981 + if (!futex_cmpxchg_enabled)
2982 + return -ENOSYS;
2983 +
2984 if (!pid)
2985 head = current->compat_robust_list;
2986 else {
2987 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
2988 index 465c69c..e4e1c99 100644
2989 --- a/kernel/irq/chip.c
2990 +++ b/kernel/irq/chip.c
2991 @@ -607,3 +607,39 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
2992 set_irq_chip(irq, chip);
2993 __set_irq_handler(irq, handle, 0, name);
2994 }
2995 +
2996 +void __init set_irq_noprobe(unsigned int irq)
2997 +{
2998 + struct irq_desc *desc;
2999 + unsigned long flags;
3000 +
3001 + if (irq >= NR_IRQS) {
3002 + printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
3003 +
3004 + return;
3005 + }
3006 +
3007 + desc = irq_desc + irq;
3008 +
3009 + spin_lock_irqsave(&desc->lock, flags);
3010 + desc->status |= IRQ_NOPROBE;
3011 + spin_unlock_irqrestore(&desc->lock, flags);
3012 +}
3013 +
3014 +void __init set_irq_probe(unsigned int irq)
3015 +{
3016 + struct irq_desc *desc;
3017 + unsigned long flags;
3018 +
3019 + if (irq >= NR_IRQS) {
3020 + printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
3021 +
3022 + return;
3023 + }
3024 +
3025 + desc = irq_desc + irq;
3026 +
3027 + spin_lock_irqsave(&desc->lock, flags);
3028 + desc->status &= ~IRQ_NOPROBE;
3029 + spin_unlock_irqrestore(&desc->lock, flags);
3030 +}
3031 diff --git a/kernel/relay.c b/kernel/relay.c
3032 index 7c03733..889102a 100644
3033 --- a/kernel/relay.c
3034 +++ b/kernel/relay.c
3035 @@ -1072,7 +1072,7 @@ static int subbuf_splice_actor(struct file *in,
3036 unsigned int flags,
3037 int *nonpad_ret)
3038 {
3039 - unsigned int pidx, poff, total_len, subbuf_pages, ret;
3040 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
3041 struct rchan_buf *rbuf = in->private_data;
3042 unsigned int subbuf_size = rbuf->chan->subbuf_size;
3043 uint64_t pos = (uint64_t) *ppos;
3044 @@ -1103,8 +1103,9 @@ static int subbuf_splice_actor(struct file *in,
3045 subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
3046 pidx = (read_start / PAGE_SIZE) % subbuf_pages;
3047 poff = read_start & ~PAGE_MASK;
3048 + nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS);
3049
3050 - for (total_len = 0; spd.nr_pages < subbuf_pages; spd.nr_pages++) {
3051 + for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
3052 unsigned int this_len, this_end, private;
3053 unsigned int cur_pos = read_start + total_len;
3054
3055 diff --git a/kernel/sched.c b/kernel/sched.c
3056 index e76b11c..5ba5db9 100644
3057 --- a/kernel/sched.c
3058 +++ b/kernel/sched.c
3059 @@ -4028,11 +4028,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3060 oldprio = p->prio;
3061 on_rq = p->se.on_rq;
3062 running = task_current(rq, p);
3063 - if (on_rq) {
3064 + if (on_rq)
3065 dequeue_task(rq, p, 0);
3066 - if (running)
3067 - p->sched_class->put_prev_task(rq, p);
3068 - }
3069 + if (running)
3070 + p->sched_class->put_prev_task(rq, p);
3071
3072 if (rt_prio(prio))
3073 p->sched_class = &rt_sched_class;
3074 @@ -4041,9 +4040,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3075
3076 p->prio = prio;
3077
3078 + if (running)
3079 + p->sched_class->set_curr_task(rq);
3080 if (on_rq) {
3081 - if (running)
3082 - p->sched_class->set_curr_task(rq);
3083 enqueue_task(rq, p, 0);
3084 /*
3085 * Reschedule if we are currently running on this runqueue and
3086 @@ -4339,18 +4338,17 @@ recheck:
3087 update_rq_clock(rq);
3088 on_rq = p->se.on_rq;
3089 running = task_current(rq, p);
3090 - if (on_rq) {
3091 + if (on_rq)
3092 deactivate_task(rq, p, 0);
3093 - if (running)
3094 - p->sched_class->put_prev_task(rq, p);
3095 - }
3096 + if (running)
3097 + p->sched_class->put_prev_task(rq, p);
3098
3099 oldprio = p->prio;
3100 __setscheduler(rq, p, policy, param->sched_priority);
3101
3102 + if (running)
3103 + p->sched_class->set_curr_task(rq);
3104 if (on_rq) {
3105 - if (running)
3106 - p->sched_class->set_curr_task(rq);
3107 activate_task(rq, p, 0);
3108 /*
3109 * Reschedule if we are currently running on this runqueue and
3110 @@ -7110,19 +7108,17 @@ void sched_move_task(struct task_struct *tsk)
3111 running = task_current(rq, tsk);
3112 on_rq = tsk->se.on_rq;
3113
3114 - if (on_rq) {
3115 + if (on_rq)
3116 dequeue_task(rq, tsk, 0);
3117 - if (unlikely(running))
3118 - tsk->sched_class->put_prev_task(rq, tsk);
3119 - }
3120 + if (unlikely(running))
3121 + tsk->sched_class->put_prev_task(rq, tsk);
3122
3123 set_task_cfs_rq(tsk, task_cpu(tsk));
3124
3125 - if (on_rq) {
3126 - if (unlikely(running))
3127 - tsk->sched_class->set_curr_task(rq);
3128 + if (unlikely(running))
3129 + tsk->sched_class->set_curr_task(rq);
3130 + if (on_rq)
3131 enqueue_task(rq, tsk, 0);
3132 - }
3133
3134 done:
3135 task_rq_unlock(rq, &flags);
3136 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
3137 index e3e0ee3..397ff8c 100644
3138 --- a/kernel/sysctl.c
3139 +++ b/kernel/sysctl.c
3140 @@ -306,7 +306,7 @@ static struct ctl_table kern_table[] = {
3141 .procname = "sched_nr_migrate",
3142 .data = &sysctl_sched_nr_migrate,
3143 .maxlen = sizeof(unsigned int),
3144 - .mode = 644,
3145 + .mode = 0644,
3146 .proc_handler = &proc_dointvec,
3147 },
3148 #endif
3149 diff --git a/mm/filemap.c b/mm/filemap.c
3150 index 69430d2..76b036f 100644
3151 --- a/mm/filemap.c
3152 +++ b/mm/filemap.c
3153 @@ -1725,21 +1725,27 @@ size_t iov_iter_copy_from_user(struct page *page,
3154 }
3155 EXPORT_SYMBOL(iov_iter_copy_from_user);
3156
3157 -static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
3158 +void iov_iter_advance(struct iov_iter *i, size_t bytes)
3159 {
3160 + BUG_ON(i->count < bytes);
3161 +
3162 if (likely(i->nr_segs == 1)) {
3163 i->iov_offset += bytes;
3164 + i->count -= bytes;
3165 } else {
3166 const struct iovec *iov = i->iov;
3167 size_t base = i->iov_offset;
3168
3169 /*
3170 * The !iov->iov_len check ensures we skip over unlikely
3171 - * zero-length segments.
3172 + * zero-length segments (without overruning the iovec).
3173 */
3174 - while (bytes || !iov->iov_len) {
3175 - int copy = min(bytes, iov->iov_len - base);
3176 + while (bytes || unlikely(!iov->iov_len && i->count)) {
3177 + int copy;
3178
3179 + copy = min(bytes, iov->iov_len - base);
3180 + BUG_ON(!i->count || i->count < copy);
3181 + i->count -= copy;
3182 bytes -= copy;
3183 base += copy;
3184 if (iov->iov_len == base) {
3185 @@ -1751,14 +1757,6 @@ static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
3186 i->iov_offset = base;
3187 }
3188 }
3189 -
3190 -void iov_iter_advance(struct iov_iter *i, size_t bytes)
3191 -{
3192 - BUG_ON(i->count < bytes);
3193 -
3194 - __iov_iter_advance_iov(i, bytes);
3195 - i->count -= bytes;
3196 -}
3197 EXPORT_SYMBOL(iov_iter_advance);
3198
3199 /*
3200 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3201 index 9c746cb..d95ce35 100644
3202 --- a/mm/hugetlb.c
3203 +++ b/mm/hugetlb.c
3204 @@ -119,6 +119,7 @@ static void free_huge_page(struct page *page)
3205 struct address_space *mapping;
3206
3207 mapping = (struct address_space *) page_private(page);
3208 + set_page_private(page, 0);
3209 BUG_ON(page_count(page));
3210 INIT_LIST_HEAD(&page->lru);
3211
3212 @@ -133,7 +134,6 @@ static void free_huge_page(struct page *page)
3213 spin_unlock(&hugetlb_lock);
3214 if (mapping)
3215 hugetlb_put_quota(mapping, 1);
3216 - set_page_private(page, 0);
3217 }
3218
3219 /*
3220 diff --git a/mm/slab.c b/mm/slab.c
3221 index ff31261..79c3be0 100644
3222 --- a/mm/slab.c
3223 +++ b/mm/slab.c
3224 @@ -2961,11 +2961,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
3225 struct array_cache *ac;
3226 int node;
3227
3228 - node = numa_node_id();
3229 -
3230 +retry:
3231 check_irq_off();
3232 + node = numa_node_id();
3233 ac = cpu_cache_get(cachep);
3234 -retry:
3235 batchcount = ac->batchcount;
3236 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
3237 /*
3238 diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
3239 index 2726adc..e13cf5e 100644
3240 --- a/net/bluetooth/hci_sysfs.c
3241 +++ b/net/bluetooth/hci_sysfs.c
3242 @@ -282,6 +282,7 @@ static void add_conn(struct work_struct *work)
3243 int i;
3244
3245 flush_workqueue(btdelconn);
3246 +
3247 if (device_add(&conn->dev) < 0) {
3248 BT_ERR("Failed to register connection device");
3249 return;
3250 @@ -317,7 +318,6 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
3251 INIT_WORK(&conn->work, add_conn);
3252
3253 queue_work(btaddconn, &conn->work);
3254 - schedule_work(&conn->work);
3255 }
3256
3257 static int __match_tty(struct device *dev, void *data)
3258 @@ -354,7 +354,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
3259 INIT_WORK(&conn->work, del_conn);
3260
3261 queue_work(btdelconn, &conn->work);
3262 - schedule_work(&conn->work);
3263 }
3264
3265 int hci_register_sysfs(struct hci_dev *hdev)
3266 @@ -408,6 +407,7 @@ int __init bt_sysfs_init(void)
3267 err = -ENOMEM;
3268 goto out;
3269 }
3270 +
3271 btdelconn = create_singlethread_workqueue("btdelconn");
3272 if (!btdelconn) {
3273 err = -ENOMEM;
3274 @@ -447,8 +447,12 @@ out:
3275 void bt_sysfs_cleanup(void)
3276 {
3277 destroy_workqueue(btaddconn);
3278 +
3279 destroy_workqueue(btdelconn);
3280 +
3281 class_destroy(bt_class);
3282 +
3283 bus_unregister(&bt_bus);
3284 +
3285 platform_device_unregister(bt_platform);
3286 }
3287 diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
3288 index 74262e9..1024511 100644
3289 --- a/net/bridge/netfilter/ebt_dnat.c
3290 +++ b/net/bridge/netfilter/ebt_dnat.c
3291 @@ -20,8 +20,8 @@ static int ebt_target_dnat(struct sk_buff *skb, unsigned int hooknr,
3292 {
3293 struct ebt_nat_info *info = (struct ebt_nat_info *)data;
3294
3295 - if (skb_make_writable(skb, 0))
3296 - return NF_DROP;
3297 + if (!skb_make_writable(skb, 0))
3298 + return EBT_DROP;
3299
3300 memcpy(eth_hdr(skb)->h_dest, info->mac, ETH_ALEN);
3301 return info->target;
3302 diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
3303 index 422cb83..88afc34 100644
3304 --- a/net/bridge/netfilter/ebt_redirect.c
3305 +++ b/net/bridge/netfilter/ebt_redirect.c
3306 @@ -21,8 +21,8 @@ static int ebt_target_redirect(struct sk_buff *skb, unsigned int hooknr,
3307 {
3308 struct ebt_redirect_info *info = (struct ebt_redirect_info *)data;
3309
3310 - if (skb_make_writable(skb, 0))
3311 - return NF_DROP;
3312 + if (!skb_make_writable(skb, 0))
3313 + return EBT_DROP;
3314
3315 if (hooknr != NF_BR_BROUTING)
3316 memcpy(eth_hdr(skb)->h_dest,
3317 diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
3318 index 425ac92..4c5a5a9 100644
3319 --- a/net/bridge/netfilter/ebt_snat.c
3320 +++ b/net/bridge/netfilter/ebt_snat.c
3321 @@ -22,8 +22,8 @@ static int ebt_target_snat(struct sk_buff *skb, unsigned int hooknr,
3322 {
3323 struct ebt_nat_info *info = (struct ebt_nat_info *) data;
3324
3325 - if (skb_make_writable(skb, 0))
3326 - return NF_DROP;
3327 + if (!skb_make_writable(skb, 0))
3328 + return EBT_DROP;
3329
3330 memcpy(eth_hdr(skb)->h_source, info->mac, ETH_ALEN);
3331 if (!(info->target & NAT_ARP_BIT) &&
3332 diff --git a/net/core/dev.c b/net/core/dev.c
3333 index 0879f52..4d44372 100644
3334 --- a/net/core/dev.c
3335 +++ b/net/core/dev.c
3336 @@ -1068,8 +1068,6 @@ int dev_close(struct net_device *dev)
3337 */
3338 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
3339
3340 - dev_deactivate(dev);
3341 -
3342 clear_bit(__LINK_STATE_START, &dev->state);
3343
3344 /* Synchronize to scheduled poll. We cannot touch poll list,
3345 @@ -1080,6 +1078,8 @@ int dev_close(struct net_device *dev)
3346 */
3347 smp_mb__after_clear_bit(); /* Commit netif_running(). */
3348
3349 + dev_deactivate(dev);
3350 +
3351 /*
3352 * Call the device specific close. This cannot fail.
3353 * Only if device is UP
3354 @@ -2906,7 +2906,7 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
3355 }
3356 }
3357
3358 - da = kmalloc(sizeof(*da), GFP_ATOMIC);
3359 + da = kzalloc(sizeof(*da), GFP_ATOMIC);
3360 if (da == NULL)
3361 return -ENOMEM;
3362 memcpy(da->da_addr, addr, alen);
3363 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
3364 index 82817e5..7794e17 100644
3365 --- a/net/ipv4/ip_sockglue.c
3366 +++ b/net/ipv4/ip_sockglue.c
3367 @@ -514,11 +514,6 @@ static int do_ip_setsockopt(struct sock *sk, int level,
3368 val &= ~3;
3369 val |= inet->tos & 3;
3370 }
3371 - if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
3372 - !capable(CAP_NET_ADMIN)) {
3373 - err = -EPERM;
3374 - break;
3375 - }
3376 if (inet->tos != val) {
3377 inet->tos = val;
3378 sk->sk_priority = rt_tos2priority(val);
3379 diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
3380 index 80cab8c..b284b4e 100644
3381 --- a/net/ipv4/ipcomp.c
3382 +++ b/net/ipv4/ipcomp.c
3383 @@ -108,8 +108,11 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
3384 const int cpu = get_cpu();
3385 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
3386 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
3387 - int err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
3388 + int err;
3389
3390 + local_bh_disable();
3391 + err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
3392 + local_bh_enable();
3393 if (err)
3394 goto out;
3395
3396 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
3397 index b8f7763..15dc11e 100644
3398 --- a/net/ipv4/ipconfig.c
3399 +++ b/net/ipv4/ipconfig.c
3400 @@ -739,9 +739,9 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
3401 printk("Unknown ARP type 0x%04x for device %s\n", dev->type, dev->name);
3402 b->htype = dev->type; /* can cause undefined behavior */
3403 }
3404 +
3405 + /* server_ip and your_ip address are both already zero per RFC2131 */
3406 b->hlen = dev->addr_len;
3407 - b->your_ip = NONE;
3408 - b->server_ip = NONE;
3409 memcpy(b->hw_addr, dev->dev_addr, dev->addr_len);
3410 b->secs = htons(jiffies_diff / HZ);
3411 b->xid = d->xid;
3412 diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
3413 index 45fa4e2..3f4222b 100644
3414 --- a/net/ipv4/netfilter/arpt_mangle.c
3415 +++ b/net/ipv4/netfilter/arpt_mangle.c
3416 @@ -19,7 +19,7 @@ target(struct sk_buff *skb,
3417 unsigned char *arpptr;
3418 int pln, hln;
3419
3420 - if (skb_make_writable(skb, skb->len))
3421 + if (!skb_make_writable(skb, skb->len))
3422 return NF_DROP;
3423
3424 arp = arp_hdr(skb);
3425 diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
3426 index 14d64a3..16d0fb3 100644
3427 --- a/net/ipv4/netfilter/ip_queue.c
3428 +++ b/net/ipv4/netfilter/ip_queue.c
3429 @@ -336,8 +336,8 @@ static int
3430 ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
3431 {
3432 int diff;
3433 - int err;
3434 struct iphdr *user_iph = (struct iphdr *)v->payload;
3435 + struct sk_buff *nskb;
3436
3437 if (v->data_len < sizeof(*user_iph))
3438 return 0;
3439 @@ -349,14 +349,16 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
3440 if (v->data_len > 0xFFFF)
3441 return -EINVAL;
3442 if (diff > skb_tailroom(e->skb)) {
3443 - err = pskb_expand_head(e->skb, 0,
3444 + nskb = skb_copy_expand(e->skb, 0,
3445 diff - skb_tailroom(e->skb),
3446 GFP_ATOMIC);
3447 - if (err) {
3448 + if (!nskb) {
3449 printk(KERN_WARNING "ip_queue: error "
3450 - "in mangle, dropping packet: %d\n", -err);
3451 - return err;
3452 + "in mangle, dropping packet\n");
3453 + return -ENOMEM;
3454 }
3455 + kfree_skb(e->skb);
3456 + e->skb = nskb;
3457 }
3458 skb_put(e->skb, diff);
3459 }
3460 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3461 index 2f59baa..5b4095b 100644
3462 --- a/net/ipv6/ip6_output.c
3463 +++ b/net/ipv6/ip6_output.c
3464 @@ -593,7 +593,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
3465 * or if the skb it not generated by a local socket. (This last
3466 * check should be redundant, but it's free.)
3467 */
3468 - if (!np || np->pmtudisc >= IPV6_PMTUDISC_DO) {
3469 + if (!skb->local_df) {
3470 skb->dev = skb->dst->dev;
3471 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
3472 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
3473 @@ -1389,6 +1389,10 @@ int ip6_push_pending_frames(struct sock *sk)
3474 tmp_skb->sk = NULL;
3475 }
3476
3477 + /* Allow local fragmentation. */
3478 + if (np->pmtudisc < IPV6_PMTUDISC_DO)
3479 + skb->local_df = 1;
3480 +
3481 ipv6_addr_copy(final_dst, &fl->fl6_dst);
3482 __skb_pull(skb, skb_network_header_len(skb));
3483 if (opt && opt->opt_flen)
3484 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
3485 index 5383b33..81941a1 100644
3486 --- a/net/ipv6/ip6_tunnel.c
3487 +++ b/net/ipv6/ip6_tunnel.c
3488 @@ -550,6 +550,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
3489 ip_rt_put(rt);
3490 goto out;
3491 }
3492 + skb2->dst = (struct dst_entry *)rt;
3493 } else {
3494 ip_rt_put(rt);
3495 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
3496 diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
3497 index 1c5b09f..f46c38f 100644
3498 --- a/net/ipv6/ipcomp6.c
3499 +++ b/net/ipv6/ipcomp6.c
3500 @@ -146,7 +146,9 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
3501 scratch = *per_cpu_ptr(ipcomp6_scratches, cpu);
3502 tfm = *per_cpu_ptr(ipcd->tfms, cpu);
3503
3504 + local_bh_disable();
3505 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
3506 + local_bh_enable();
3507 if (err || (dlen + sizeof(*ipch)) >= plen) {
3508 put_cpu();
3509 goto out_ok;
3510 diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
3511 index e273605..710a04f 100644
3512 --- a/net/ipv6/netfilter/ip6_queue.c
3513 +++ b/net/ipv6/netfilter/ip6_queue.c
3514 @@ -333,8 +333,8 @@ static int
3515 ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
3516 {
3517 int diff;
3518 - int err;
3519 struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload;
3520 + struct sk_buff *nskb;
3521
3522 if (v->data_len < sizeof(*user_iph))
3523 return 0;
3524 @@ -346,14 +346,16 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
3525 if (v->data_len > 0xFFFF)
3526 return -EINVAL;
3527 if (diff > skb_tailroom(e->skb)) {
3528 - err = pskb_expand_head(e->skb, 0,
3529 + nskb = skb_copy_expand(e->skb, 0,
3530 diff - skb_tailroom(e->skb),
3531 GFP_ATOMIC);
3532 - if (err) {
3533 + if (!nskb) {
3534 printk(KERN_WARNING "ip6_queue: OOM "
3535 "in mangle, dropping packet\n");
3536 - return err;
3537 + return -ENOMEM;
3538 }
3539 + kfree_skb(e->skb);
3540 + e->skb = nskb;
3541 }
3542 skb_put(e->skb, diff);
3543 }
3544 diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
3545 index 6569767..dc22909 100644
3546 --- a/net/ipv6/xfrm6_output.c
3547 +++ b/net/ipv6/xfrm6_output.c
3548 @@ -34,7 +34,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
3549 if (mtu < IPV6_MIN_MTU)
3550 mtu = IPV6_MIN_MTU;
3551
3552 - if (skb->len > mtu) {
3553 + if (!skb->local_df && skb->len > mtu) {
3554 skb->dev = dst->dev;
3555 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
3556 ret = -EMSGSIZE;
3557 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
3558 index 2c7bd2e..dc43df1 100644
3559 --- a/net/netfilter/nfnetlink_log.c
3560 +++ b/net/netfilter/nfnetlink_log.c
3561 @@ -594,7 +594,7 @@ nfulnl_log_packet(unsigned int pf,
3562 /* FIXME: do we want to make the size calculation conditional based on
3563 * what is actually present? way more branches and checks, but more
3564 * memory efficient... */
3565 - size = NLMSG_ALIGN(sizeof(struct nfgenmsg))
3566 + size = NLMSG_SPACE(sizeof(struct nfgenmsg))
3567 + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
3568 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
3569 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
3570 diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
3571 index 3ceeffc..7c3646c 100644
3572 --- a/net/netfilter/nfnetlink_queue.c
3573 +++ b/net/netfilter/nfnetlink_queue.c
3574 @@ -353,7 +353,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
3575
3576 QDEBUG("entered\n");
3577
3578 - size = NLMSG_ALIGN(sizeof(struct nfgenmsg))
3579 + size = NLMSG_SPACE(sizeof(struct nfgenmsg))
3580 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
3581 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
3582 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
3583 @@ -616,8 +616,8 @@ err_out_put:
3584 static int
3585 nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
3586 {
3587 + struct sk_buff *nskb;
3588 int diff;
3589 - int err;
3590
3591 diff = data_len - e->skb->len;
3592 if (diff < 0) {
3593 @@ -627,14 +627,16 @@ nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
3594 if (data_len > 0xFFFF)
3595 return -EINVAL;
3596 if (diff > skb_tailroom(e->skb)) {
3597 - err = pskb_expand_head(e->skb, 0,
3598 + nskb = skb_copy_expand(e->skb, 0,
3599 diff - skb_tailroom(e->skb),
3600 GFP_ATOMIC);
3601 - if (err) {
3602 + if (!nskb) {
3603 printk(KERN_WARNING "nf_queue: OOM "
3604 "in mangle, dropping packet\n");
3605 - return err;
3606 + return -ENOMEM;
3607 }
3608 + kfree_skb(e->skb);
3609 + e->skb = nskb;
3610 }
3611 skb_put(e->skb, diff);
3612 }
3613 diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
3614 index f9c55dc..5222a97 100644
3615 --- a/net/netfilter/xt_time.c
3616 +++ b/net/netfilter/xt_time.c
3617 @@ -95,8 +95,11 @@ static inline void localtime_2(struct xtm *r, time_t time)
3618 */
3619 r->dse = time / 86400;
3620
3621 - /* 1970-01-01 (w=0) was a Thursday (4). */
3622 - r->weekday = (4 + r->dse) % 7;
3623 + /*
3624 + * 1970-01-01 (w=0) was a Thursday (4).
3625 + * -1 and +1 map Sunday properly onto 7.
3626 + */
3627 + r->weekday = (4 + r->dse - 1) % 7 + 1;
3628 }
3629
3630 static void localtime_3(struct xtm *r, time_t time)
3631 diff --git a/security/commoncap.c b/security/commoncap.c
3632 index ea61bc7..e87422e 100644
3633 --- a/security/commoncap.c
3634 +++ b/security/commoncap.c
3635 @@ -539,7 +539,7 @@ int cap_task_kill(struct task_struct *p, struct siginfo *info,
3636 * allowed.
3637 * We must preserve legacy signal behavior in this case.
3638 */
3639 - if (p->euid == 0 && p->uid == current->uid)
3640 + if (p->uid == current->uid)
3641 return 0;
3642
3643 /* sigcont is permitted within same session */