Magellan Linux

Contents of /trunk/kernel26-magellan/patches-2.6.33-r3/0101-2.6.33.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1055 - (show annotations) (download)
Wed Jul 7 10:02:52 2010 UTC (13 years, 9 months ago) by niro
File size: 204495 byte(s)
2.6.33-magellan-r3; udpated to linux-2.6.33.6

1 diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
2 index 3015da0..fe09a2c 100644
3 --- a/Documentation/filesystems/tmpfs.txt
4 +++ b/Documentation/filesystems/tmpfs.txt
5 @@ -82,11 +82,13 @@ tmpfs has a mount option to set the NUMA memory allocation policy for
6 all files in that instance (if CONFIG_NUMA is enabled) - which can be
7 adjusted on the fly via 'mount -o remount ...'
8
9 -mpol=default prefers to allocate memory from the local node
10 +mpol=default use the process allocation policy
11 + (see set_mempolicy(2))
12 mpol=prefer:Node prefers to allocate memory from the given Node
13 mpol=bind:NodeList allocates memory only from nodes in NodeList
14 mpol=interleave prefers to allocate from each node in turn
15 mpol=interleave:NodeList allocates from each node of NodeList in turn
16 +mpol=local prefers to allocate memory from the local node
17
18 NodeList format is a comma-separated list of decimal numbers and ranges,
19 a range being two hyphen-separated decimal numbers, the smallest and
20 @@ -134,3 +136,5 @@ Author:
21 Christoph Rohland <cr@sap.com>, 1.12.01
22 Updated:
23 Hugh Dickins, 4 June 2007
24 +Updated:
25 + KOSAKI Motohiro, 16 Mar 2010
26 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
27 index 4fddc50..6b84a04 100644
28 --- a/arch/arm/boot/compressed/head.S
29 +++ b/arch/arm/boot/compressed/head.S
30 @@ -170,8 +170,8 @@ not_angel:
31
32 .text
33 adr r0, LC0
34 - ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} )
35 - THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, ip} )
36 + ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
37 + THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
38 THUMB( ldr sp, [r0, #28] )
39 subs r0, r0, r1 @ calculate the delta offset
40
41 @@ -182,12 +182,13 @@ not_angel:
42 /*
43 * We're running at a different address. We need to fix
44 * up various pointers:
45 - * r5 - zImage base address
46 - * r6 - GOT start
47 + * r5 - zImage base address (_start)
48 + * r6 - size of decompressed image
49 + * r11 - GOT start
50 * ip - GOT end
51 */
52 add r5, r5, r0
53 - add r6, r6, r0
54 + add r11, r11, r0
55 add ip, ip, r0
56
57 #ifndef CONFIG_ZBOOT_ROM
58 @@ -205,10 +206,10 @@ not_angel:
59 /*
60 * Relocate all entries in the GOT table.
61 */
62 -1: ldr r1, [r6, #0] @ relocate entries in the GOT
63 +1: ldr r1, [r11, #0] @ relocate entries in the GOT
64 add r1, r1, r0 @ table. This fixes up the
65 - str r1, [r6], #4 @ C references.
66 - cmp r6, ip
67 + str r1, [r11], #4 @ C references.
68 + cmp r11, ip
69 blo 1b
70 #else
71
72 @@ -216,12 +217,12 @@ not_angel:
73 * Relocate entries in the GOT table. We only relocate
74 * the entries that are outside the (relocated) BSS region.
75 */
76 -1: ldr r1, [r6, #0] @ relocate entries in the GOT
77 +1: ldr r1, [r11, #0] @ relocate entries in the GOT
78 cmp r1, r2 @ entry < bss_start ||
79 cmphs r3, r1 @ _end < entry
80 addlo r1, r1, r0 @ table. This fixes up the
81 - str r1, [r6], #4 @ C references.
82 - cmp r6, ip
83 + str r1, [r11], #4 @ C references.
84 + cmp r11, ip
85 blo 1b
86 #endif
87
88 @@ -247,6 +248,7 @@ not_relocated: mov r0, #0
89 * Check to see if we will overwrite ourselves.
90 * r4 = final kernel address
91 * r5 = start of this image
92 + * r6 = size of decompressed image
93 * r2 = end of malloc space (and therefore this image)
94 * We basically want:
95 * r4 >= r2 -> OK
96 @@ -254,8 +256,7 @@ not_relocated: mov r0, #0
97 */
98 cmp r4, r2
99 bhs wont_overwrite
100 - sub r3, sp, r5 @ > compressed kernel size
101 - add r0, r4, r3, lsl #2 @ allow for 4x expansion
102 + add r0, r4, r6
103 cmp r0, r5
104 bls wont_overwrite
105
106 @@ -271,7 +272,6 @@ not_relocated: mov r0, #0
107 * r1-r3 = unused
108 * r4 = kernel execution address
109 * r5 = decompressed kernel start
110 - * r6 = processor ID
111 * r7 = architecture ID
112 * r8 = atags pointer
113 * r9-r12,r14 = corrupted
114 @@ -312,7 +312,8 @@ LC0: .word LC0 @ r1
115 .word _end @ r3
116 .word zreladdr @ r4
117 .word _start @ r5
118 - .word _got_start @ r6
119 + .word _image_size @ r6
120 + .word _got_start @ r11
121 .word _got_end @ ip
122 .word user_stack+4096 @ sp
123 LC1: .word reloc_end - reloc_start
124 @@ -336,7 +337,6 @@ params: ldr r0, =params_phys
125 *
126 * On entry,
127 * r4 = kernel execution address
128 - * r6 = processor ID
129 * r7 = architecture number
130 * r8 = atags pointer
131 * r9 = run-time address of "start" (???)
132 @@ -542,7 +542,6 @@ __common_mmu_cache_on:
133 * r1-r3 = unused
134 * r4 = kernel execution address
135 * r5 = decompressed kernel start
136 - * r6 = processor ID
137 * r7 = architecture ID
138 * r8 = atags pointer
139 * r9-r12,r14 = corrupted
140 @@ -581,19 +580,19 @@ call_kernel: bl cache_clean_flush
141 * r1 = corrupted
142 * r2 = corrupted
143 * r3 = block offset
144 - * r6 = corrupted
145 + * r9 = corrupted
146 * r12 = corrupted
147 */
148
149 call_cache_fn: adr r12, proc_types
150 #ifdef CONFIG_CPU_CP15
151 - mrc p15, 0, r6, c0, c0 @ get processor ID
152 + mrc p15, 0, r9, c0, c0 @ get processor ID
153 #else
154 - ldr r6, =CONFIG_PROCESSOR_ID
155 + ldr r9, =CONFIG_PROCESSOR_ID
156 #endif
157 1: ldr r1, [r12, #0] @ get value
158 ldr r2, [r12, #4] @ get mask
159 - eor r1, r1, r6 @ (real ^ match)
160 + eor r1, r1, r9 @ (real ^ match)
161 tst r1, r2 @ & mask
162 ARM( addeq pc, r12, r3 ) @ call cache function
163 THUMB( addeq r12, r3 )
164 @@ -778,8 +777,7 @@ proc_types:
165 * Turn off the Cache and MMU. ARMv3 does not support
166 * reading the control register, but ARMv4 does.
167 *
168 - * On entry, r6 = processor ID
169 - * On exit, r0, r1, r2, r3, r12 corrupted
170 + * On exit, r0, r1, r2, r3, r9, r12 corrupted
171 * This routine must preserve: r4, r6, r7
172 */
173 .align 5
174 @@ -852,10 +850,8 @@ __armv3_mmu_cache_off:
175 /*
176 * Clean and flush the cache to maintain consistency.
177 *
178 - * On entry,
179 - * r6 = processor ID
180 * On exit,
181 - * r1, r2, r3, r11, r12 corrupted
182 + * r1, r2, r3, r9, r11, r12 corrupted
183 * This routine must preserve:
184 * r0, r4, r5, r6, r7
185 */
186 @@ -967,7 +963,7 @@ __armv4_mmu_cache_flush:
187 mov r2, #64*1024 @ default: 32K dcache size (*2)
188 mov r11, #32 @ default: 32 byte line size
189 mrc p15, 0, r3, c0, c0, 1 @ read cache type
190 - teq r3, r6 @ cache ID register present?
191 + teq r3, r9 @ cache ID register present?
192 beq no_cache_id
193 mov r1, r3, lsr #18
194 and r1, r1, #7
195 diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
196 index a5924b9..cbed030 100644
197 --- a/arch/arm/boot/compressed/vmlinux.lds.in
198 +++ b/arch/arm/boot/compressed/vmlinux.lds.in
199 @@ -36,6 +36,9 @@ SECTIONS
200
201 _etext = .;
202
203 + /* Assume size of decompressed image is 4x the compressed image */
204 + _image_size = (_etext - _text) * 4;
205 +
206 _got_start = .;
207 .got : { *(.got) }
208 _got_end = .;
209 diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
210 index 1eb85fb..a3c0a32 100644
211 --- a/arch/powerpc/kernel/perf_event.c
212 +++ b/arch/powerpc/kernel/perf_event.c
213 @@ -1164,10 +1164,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
214 * Finally record data if requested.
215 */
216 if (record) {
217 - struct perf_sample_data data = {
218 - .addr = ~0ULL,
219 - .period = event->hw.last_period,
220 - };
221 + struct perf_sample_data data;
222 +
223 + perf_sample_data_init(&data, ~0ULL);
224 + data.period = event->hw.last_period;
225
226 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
227 perf_get_data_addr(regs, &data.addr);
228 diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
229 index b51b1fc..d3cc94f 100644
230 --- a/arch/sh/boot/compressed/misc.c
231 +++ b/arch/sh/boot/compressed/misc.c
232 @@ -132,7 +132,7 @@ void decompress_kernel(void)
233 output_addr = (CONFIG_MEMORY_START + 0x2000);
234 #else
235 output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
236 -#ifdef CONFIG_29BIT
237 +#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY)
238 output_addr |= P2SEG;
239 #endif
240 #endif
241 diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
242 index e856456..8c70d3e 100644
243 --- a/arch/sparc/kernel/perf_event.c
244 +++ b/arch/sparc/kernel/perf_event.c
245 @@ -1189,7 +1189,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
246
247 regs = args->regs;
248
249 - data.addr = 0;
250 + perf_sample_data_init(&data, 0);
251
252 cpuc = &__get_cpu_var(cpu_hw_events);
253
254 @@ -1337,7 +1337,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
255 callchain_store(entry, PERF_CONTEXT_USER);
256 callchain_store(entry, regs->tpc);
257
258 - ufp = regs->u_regs[UREG_I6];
259 + ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
260 do {
261 struct sparc_stackf32 *usf, sf;
262 unsigned long pc;
263 diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
264 index 4b7c937..2d8b70d 100644
265 --- a/arch/sparc/prom/p1275.c
266 +++ b/arch/sparc/prom/p1275.c
267 @@ -32,10 +32,9 @@ extern void prom_cif_interface(void);
268 extern void prom_cif_callback(void);
269
270 /*
271 - * This provides SMP safety on the p1275buf. prom_callback() drops this lock
272 - * to allow recursuve acquisition.
273 + * This provides SMP safety on the p1275buf.
274 */
275 -DEFINE_SPINLOCK(prom_entry_lock);
276 +DEFINE_RAW_SPINLOCK(prom_entry_lock);
277
278 long p1275_cmd(const char *service, long fmt, ...)
279 {
280 @@ -47,7 +46,9 @@ long p1275_cmd(const char *service, long fmt, ...)
281
282 p = p1275buf.prom_buffer;
283
284 - spin_lock_irqsave(&prom_entry_lock, flags);
285 + raw_local_save_flags(flags);
286 + raw_local_irq_restore(PIL_NMI);
287 + raw_spin_lock(&prom_entry_lock);
288
289 p1275buf.prom_args[0] = (unsigned long)p; /* service */
290 strcpy (p, service);
291 @@ -139,7 +140,8 @@ long p1275_cmd(const char *service, long fmt, ...)
292 va_end(list);
293 x = p1275buf.prom_args [nargs + 3];
294
295 - spin_unlock_irqrestore(&prom_entry_lock, flags);
296 + raw_spin_unlock(&prom_entry_lock);
297 + raw_local_irq_restore(flags);
298
299 return x;
300 }
301 diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
302 index 14f9890..c22a164 100644
303 --- a/arch/x86/include/asm/fixmap.h
304 +++ b/arch/x86/include/asm/fixmap.h
305 @@ -82,6 +82,9 @@ enum fixed_addresses {
306 #endif
307 FIX_DBGP_BASE,
308 FIX_EARLYCON_MEM_BASE,
309 +#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
310 + FIX_OHCI1394_BASE,
311 +#endif
312 #ifdef CONFIG_X86_LOCAL_APIC
313 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
314 #endif
315 @@ -126,9 +129,6 @@ enum fixed_addresses {
316 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
317 (__end_of_permanent_fixed_addresses & 255),
318 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
319 -#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
320 - FIX_OHCI1394_BASE,
321 -#endif
322 #ifdef CONFIG_X86_32
323 FIX_WP_TEST,
324 #endif
325 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
326 index 1cd58cd..4604e6a 100644
327 --- a/arch/x86/include/asm/msr-index.h
328 +++ b/arch/x86/include/asm/msr-index.h
329 @@ -105,6 +105,8 @@
330 #define MSR_AMD64_PATCH_LEVEL 0x0000008b
331 #define MSR_AMD64_NB_CFG 0xc001001f
332 #define MSR_AMD64_PATCH_LOADER 0xc0010020
333 +#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
334 +#define MSR_AMD64_OSVW_STATUS 0xc0010141
335 #define MSR_AMD64_IBSFETCHCTL 0xc0011030
336 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
337 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
338 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
339 index 879666f..7e1cca1 100644
340 --- a/arch/x86/kernel/cpu/intel.c
341 +++ b/arch/x86/kernel/cpu/intel.c
342 @@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
343 if (c->x86_power & (1 << 8)) {
344 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
345 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
346 - sched_clock_stable = 1;
347 + if (!check_tsc_unstable())
348 + sched_clock_stable = 1;
349 }
350
351 /*
352 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
353 index 8c1c070..98819b3 100644
354 --- a/arch/x86/kernel/cpu/perf_event.c
355 +++ b/arch/x86/kernel/cpu/perf_event.c
356 @@ -1636,10 +1636,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
357
358 ds->bts_index = ds->bts_buffer_base;
359
360 + perf_sample_data_init(&data, 0);
361
362 data.period = event->hw.last_period;
363 - data.addr = 0;
364 - data.raw = NULL;
365 regs.ip = 0;
366
367 /*
368 @@ -1756,8 +1755,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
369 int idx, handled = 0;
370 u64 val;
371
372 - data.addr = 0;
373 - data.raw = NULL;
374 + perf_sample_data_init(&data, 0);
375
376 cpuc = &__get_cpu_var(cpu_hw_events);
377
378 @@ -1802,8 +1800,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
379 int bit, loops;
380 u64 ack, status;
381
382 - data.addr = 0;
383 - data.raw = NULL;
384 + perf_sample_data_init(&data, 0);
385
386 cpuc = &__get_cpu_var(cpu_hw_events);
387
388 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
389 index 0ad9597..a6c906c 100644
390 --- a/arch/x86/kernel/dumpstack_64.c
391 +++ b/arch/x86/kernel/dumpstack_64.c
392 @@ -125,9 +125,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
393 {
394 #ifdef CONFIG_FRAME_POINTER
395 struct stack_frame *frame = (struct stack_frame *)bp;
396 + unsigned long next;
397
398 - if (!in_irq_stack(stack, irq_stack, irq_stack_end))
399 - return (unsigned long)frame->next_frame;
400 + if (!in_irq_stack(stack, irq_stack, irq_stack_end)) {
401 + if (!probe_kernel_address(&frame->next_frame, next))
402 + return next;
403 + else
404 + WARN_ONCE(1, "Perf: bad frame pointer = %p in "
405 + "callchain\n", &frame->next_frame);
406 + }
407 #endif
408 return bp;
409 }
410 diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
411 index bb6006e..1e8cead 100644
412 --- a/arch/x86/kernel/hw_breakpoint.c
413 +++ b/arch/x86/kernel/hw_breakpoint.c
414 @@ -531,8 +531,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
415 {
416 /* TODO */
417 }
418 -
419 -void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
420 -{
421 - /* TODO */
422 -}
423 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
424 index c9b3522..999c8a6 100644
425 --- a/arch/x86/kernel/process.c
426 +++ b/arch/x86/kernel/process.c
427 @@ -519,21 +519,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
428 }
429
430 /*
431 - * Check for AMD CPUs, which have potentially C1E support
432 + * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
433 + * For more information see
434 + * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
435 + * - Erratum #365 for family 0x11 (not affected because C1e not in use)
436 */
437 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
438 {
439 + u64 val;
440 if (c->x86_vendor != X86_VENDOR_AMD)
441 - return 0;
442 -
443 - if (c->x86 < 0x0F)
444 - return 0;
445 + goto no_c1e_idle;
446
447 /* Family 0x0f models < rev F do not have C1E */
448 - if (c->x86 == 0x0f && c->x86_model < 0x40)
449 - return 0;
450 + if (c->x86 == 0x0F && c->x86_model >= 0x40)
451 + return 1;
452
453 - return 1;
454 + if (c->x86 == 0x10) {
455 + /*
456 + * check OSVW bit for CPUs that are not affected
457 + * by erratum #400
458 + */
459 + rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
460 + if (val >= 2) {
461 + rdmsrl(MSR_AMD64_OSVW_STATUS, val);
462 + if (!(val & BIT(1)))
463 + goto no_c1e_idle;
464 + }
465 + return 1;
466 + }
467 +
468 +no_c1e_idle:
469 + return 0;
470 }
471
472 static cpumask_var_t c1e_mask;
473 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
474 index a1e1bc9..e900908 100644
475 --- a/arch/x86/kvm/x86.c
476 +++ b/arch/x86/kvm/x86.c
477 @@ -1351,6 +1351,7 @@ int kvm_dev_ioctl_check_extension(long ext)
478 case KVM_CAP_XEN_HVM:
479 case KVM_CAP_ADJUST_CLOCK:
480 case KVM_CAP_VCPU_EVENTS:
481 + case KVM_CAP_X86_ROBUST_SINGLESTEP:
482 r = 1;
483 break;
484 case KVM_CAP_COALESCED_MMIO:
485 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
486 index 1d4eb93..cf07c26 100644
487 --- a/arch/x86/mm/pageattr.c
488 +++ b/arch/x86/mm/pageattr.c
489 @@ -291,8 +291,29 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
490 */
491 if (kernel_set_to_readonly &&
492 within(address, (unsigned long)_text,
493 - (unsigned long)__end_rodata_hpage_align))
494 - pgprot_val(forbidden) |= _PAGE_RW;
495 + (unsigned long)__end_rodata_hpage_align)) {
496 + unsigned int level;
497 +
498 + /*
499 + * Don't enforce the !RW mapping for the kernel text mapping,
500 + * if the current mapping is already using small page mapping.
501 + * No need to work hard to preserve large page mappings in this
502 + * case.
503 + *
504 + * This also fixes the Linux Xen paravirt guest boot failure
505 + * (because of unexpected read-only mappings for kernel identity
506 + * mappings). In this paravirt guest case, the kernel text
507 + * mapping and the kernel identity mapping share the same
508 + * page-table pages. Thus we can't really use different
509 + * protections for the kernel text and identity mappings. Also,
510 + * these shared mappings are made of small page mappings.
511 + * Thus this don't enforce !RW mapping for small page kernel
512 + * text mapping logic will help Linux Xen parvirt guest boot
513 + * aswell.
514 + */
515 + if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
516 + pgprot_val(forbidden) |= _PAGE_RW;
517 + }
518 #endif
519
520 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
521 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
522 index a6a736a..9e2feb6 100644
523 --- a/drivers/ata/ahci.c
524 +++ b/drivers/ata/ahci.c
525 @@ -2831,6 +2831,14 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
526 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
527 * to the harddisk doesn't become online after
528 * resuming from STR. Warn and fail suspend.
529 + *
530 + * http://bugzilla.kernel.org/show_bug.cgi?id=12276
531 + *
532 + * Use dates instead of versions to match as HP is
533 + * apparently recycling both product and version
534 + * strings.
535 + *
536 + * http://bugzilla.kernel.org/show_bug.cgi?id=15462
537 */
538 {
539 .ident = "dv4",
540 @@ -2839,7 +2847,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
541 DMI_MATCH(DMI_PRODUCT_NAME,
542 "HP Pavilion dv4 Notebook PC"),
543 },
544 - .driver_data = "F.30", /* cutoff BIOS version */
545 + .driver_data = "20090105", /* F.30 */
546 },
547 {
548 .ident = "dv5",
549 @@ -2848,7 +2856,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
550 DMI_MATCH(DMI_PRODUCT_NAME,
551 "HP Pavilion dv5 Notebook PC"),
552 },
553 - .driver_data = "F.16", /* cutoff BIOS version */
554 + .driver_data = "20090506", /* F.16 */
555 },
556 {
557 .ident = "dv6",
558 @@ -2857,7 +2865,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
559 DMI_MATCH(DMI_PRODUCT_NAME,
560 "HP Pavilion dv6 Notebook PC"),
561 },
562 - .driver_data = "F.21", /* cutoff BIOS version */
563 + .driver_data = "20090423", /* F.21 */
564 },
565 {
566 .ident = "HDX18",
567 @@ -2866,7 +2874,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
568 DMI_MATCH(DMI_PRODUCT_NAME,
569 "HP HDX18 Notebook PC"),
570 },
571 - .driver_data = "F.23", /* cutoff BIOS version */
572 + .driver_data = "20090430", /* F.23 */
573 },
574 /*
575 * Acer eMachines G725 has the same problem. BIOS
576 @@ -2874,6 +2882,8 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
577 * work. Inbetween, there are V1.06, V2.06 and V3.03
578 * that we don't have much idea about. For now,
579 * blacklist anything older than V3.04.
580 + *
581 + * http://bugzilla.kernel.org/show_bug.cgi?id=15104
582 */
583 {
584 .ident = "G725",
585 @@ -2881,19 +2891,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
586 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
587 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
588 },
589 - .driver_data = "V3.04", /* cutoff BIOS version */
590 + .driver_data = "20091216", /* V3.04 */
591 },
592 { } /* terminate list */
593 };
594 const struct dmi_system_id *dmi = dmi_first_match(sysids);
595 - const char *ver;
596 + int year, month, date;
597 + char buf[9];
598
599 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
600 return false;
601
602 - ver = dmi_get_system_info(DMI_BIOS_VERSION);
603 + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
604 + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
605
606 - return !ver || strcmp(ver, dmi->driver_data) < 0;
607 + return strcmp(buf, dmi->driver_data) < 0;
608 }
609
610 static bool ahci_broken_online(struct pci_dev *pdev)
611 diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
612 index 0d97890..be7c395 100644
613 --- a/drivers/ata/pata_via.c
614 +++ b/drivers/ata/pata_via.c
615 @@ -588,6 +588,10 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
616 u8 rev = isa->revision;
617 pci_dev_put(isa);
618
619 + if ((id->device == 0x0415 || id->device == 0x3164) &&
620 + (config->id != id->device))
621 + continue;
622 +
623 if (rev >= config->rev_min && rev <= config->rev_max)
624 break;
625 }
626 diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
627 index 66fa4e1..f27c4d6 100644
628 --- a/drivers/char/tty_buffer.c
629 +++ b/drivers/char/tty_buffer.c
630 @@ -247,7 +247,8 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
631 {
632 int copied = 0;
633 do {
634 - int space = tty_buffer_request_room(tty, size - copied);
635 + int goal = min(size - copied, TTY_BUFFER_PAGE);
636 + int space = tty_buffer_request_room(tty, goal);
637 struct tty_buffer *tb = tty->buf.tail;
638 /* If there is no space then tb may be NULL */
639 if (unlikely(space == 0))
640 @@ -283,7 +284,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
641 {
642 int copied = 0;
643 do {
644 - int space = tty_buffer_request_room(tty, size - copied);
645 + int goal = min(size - copied, TTY_BUFFER_PAGE);
646 + int space = tty_buffer_request_room(tty, goal);
647 struct tty_buffer *tb = tty->buf.tail;
648 /* If there is no space then tb may be NULL */
649 if (unlikely(space == 0))
650 diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
651 index 8fc91a0..f5b6d9f 100644
652 --- a/drivers/edac/edac_mce_amd.c
653 +++ b/drivers/edac/edac_mce_amd.c
654 @@ -316,7 +316,12 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
655 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
656 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
657 } else {
658 - pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1));
659 + u8 assoc_cpus = regs->nbsh & 0xf;
660 +
661 + if (assoc_cpus > 0)
662 + pr_cont(", core: %d", fls(assoc_cpus) - 1);
663 +
664 + pr_cont("\n");
665 }
666
667 pr_emerg("%s.\n", EXT_ERR_MSG(xec));
668 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
669 index ec8a0d7..fd099a1 100644
670 --- a/drivers/gpu/drm/i915/i915_gem.c
671 +++ b/drivers/gpu/drm/i915/i915_gem.c
672 @@ -1470,9 +1470,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
673 obj_priv->dirty = 0;
674
675 for (i = 0; i < page_count; i++) {
676 - if (obj_priv->pages[i] == NULL)
677 - break;
678 -
679 if (obj_priv->dirty)
680 set_page_dirty(obj_priv->pages[i]);
681
682 @@ -2228,7 +2225,6 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
683 struct address_space *mapping;
684 struct inode *inode;
685 struct page *page;
686 - int ret;
687
688 if (obj_priv->pages_refcount++ != 0)
689 return 0;
690 @@ -2251,11 +2247,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
691 mapping_gfp_mask (mapping) |
692 __GFP_COLD |
693 gfpmask);
694 - if (IS_ERR(page)) {
695 - ret = PTR_ERR(page);
696 - i915_gem_object_put_pages(obj);
697 - return ret;
698 - }
699 + if (IS_ERR(page))
700 + goto err_pages;
701 +
702 obj_priv->pages[i] = page;
703 }
704
705 @@ -2263,6 +2257,15 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
706 i915_gem_object_do_bit_17_swizzle(obj);
707
708 return 0;
709 +
710 +err_pages:
711 + while (i--)
712 + page_cache_release(obj_priv->pages[i]);
713 +
714 + drm_free_large(obj_priv->pages);
715 + obj_priv->pages = NULL;
716 + obj_priv->pages_refcount--;
717 + return PTR_ERR(page);
718 }
719
720 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
721 diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
722 index 2639591..63f569b 100644
723 --- a/drivers/gpu/drm/i915/intel_overlay.c
724 +++ b/drivers/gpu/drm/i915/intel_overlay.c
725 @@ -1083,14 +1083,18 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
726
727 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
728 DRM_MODE_OBJECT_CRTC);
729 - if (!drmmode_obj)
730 - return -ENOENT;
731 + if (!drmmode_obj) {
732 + ret = -ENOENT;
733 + goto out_free;
734 + }
735 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
736
737 new_bo = drm_gem_object_lookup(dev, file_priv,
738 put_image_rec->bo_handle);
739 - if (!new_bo)
740 - return -ENOENT;
741 + if (!new_bo) {
742 + ret = -ENOENT;
743 + goto out_free;
744 + }
745
746 mutex_lock(&dev->mode_config.mutex);
747 mutex_lock(&dev->struct_mutex);
748 @@ -1180,6 +1184,7 @@ out_unlock:
749 mutex_unlock(&dev->struct_mutex);
750 mutex_unlock(&dev->mode_config.mutex);
751 drm_gem_object_unreference(new_bo);
752 +out_free:
753 kfree(params);
754
755 return ret;
756 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
757 index d2f6335..a378bc3 100644
758 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
759 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
760 @@ -239,12 +239,14 @@ nouveau_connector_detect(struct drm_connector *connector)
761 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
762 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
763 if (nv_encoder && nv_connector->native_mode) {
764 + unsigned status = connector_status_connected;
765 +
766 #ifdef CONFIG_ACPI
767 if (!nouveau_ignorelid && !acpi_lid_open())
768 - return connector_status_disconnected;
769 + status = connector_status_unknown;
770 #endif
771 nouveau_connector_set_encoder(connector, nv_encoder);
772 - return connector_status_connected;
773 + return status;
774 }
775
776 /* Cleanup the previous EDID block. */
777 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
778 index 2d7bcee..cb4290a 100644
779 --- a/drivers/hwmon/coretemp.c
780 +++ b/drivers/hwmon/coretemp.c
781 @@ -228,7 +228,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
782 if (err) {
783 dev_warn(dev,
784 "Unable to access MSR 0xEE, for Tjmax, left"
785 - " at default");
786 + " at default\n");
787 } else if (eax & 0x40000000) {
788 tjmax = tjmax_ee;
789 }
790 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
791 index df6ab55..5574be2 100644
792 --- a/drivers/i2c/busses/i2c-i801.c
793 +++ b/drivers/i2c/busses/i2c-i801.c
794 @@ -415,9 +415,11 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
795 data->block[0] = 32; /* max for SMBus block reads */
796 }
797
798 + /* Experience has shown that the block buffer can only be used for
799 + SMBus (not I2C) block transactions, even though the datasheet
800 + doesn't mention this limitation. */
801 if ((i801_features & FEATURE_BLOCK_BUFFER)
802 - && !(command == I2C_SMBUS_I2C_BLOCK_DATA
803 - && read_write == I2C_SMBUS_READ)
804 + && command != I2C_SMBUS_I2C_BLOCK_DATA
805 && i801_set_block_buffer_mode() == 0)
806 result = i801_block_transaction_by_block(data, read_write,
807 hwpec);
808 diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
809 index 1c440a7..b289ec9 100644
810 --- a/drivers/i2c/busses/i2c-powermac.c
811 +++ b/drivers/i2c/busses/i2c-powermac.c
812 @@ -122,9 +122,14 @@ static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap,
813
814 rc = pmac_i2c_xfer(bus, addrdir, subsize, subaddr, buf, len);
815 if (rc) {
816 - dev_err(&adap->dev,
817 - "I2C transfer at 0x%02x failed, size %d, err %d\n",
818 - addrdir >> 1, size, rc);
819 + if (rc == -ENXIO)
820 + dev_dbg(&adap->dev,
821 + "I2C transfer at 0x%02x failed, size %d, "
822 + "err %d\n", addrdir >> 1, size, rc);
823 + else
824 + dev_err(&adap->dev,
825 + "I2C transfer at 0x%02x failed, size %d, "
826 + "err %d\n", addrdir >> 1, size, rc);
827 goto bail;
828 }
829
830 @@ -175,10 +180,16 @@ static int i2c_powermac_master_xfer( struct i2c_adapter *adap,
831 goto bail;
832 }
833 rc = pmac_i2c_xfer(bus, addrdir, 0, 0, msgs->buf, msgs->len);
834 - if (rc < 0)
835 - dev_err(&adap->dev, "I2C %s 0x%02x failed, err %d\n",
836 - addrdir & 1 ? "read from" : "write to", addrdir >> 1,
837 - rc);
838 + if (rc < 0) {
839 + if (rc == -ENXIO)
840 + dev_dbg(&adap->dev, "I2C %s 0x%02x failed, err %d\n",
841 + addrdir & 1 ? "read from" : "write to",
842 + addrdir >> 1, rc);
843 + else
844 + dev_err(&adap->dev, "I2C %s 0x%02x failed, err %d\n",
845 + addrdir & 1 ? "read from" : "write to",
846 + addrdir >> 1, rc);
847 + }
848 bail:
849 pmac_i2c_close(bus);
850 return rc < 0 ? rc : 1;
851 diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
852 index 0f67f1a..d7e6f09 100644
853 --- a/drivers/ide/icside.c
854 +++ b/drivers/ide/icside.c
855 @@ -65,6 +65,8 @@ static struct cardinfo icside_cardinfo_v6_2 = {
856 };
857
858 struct icside_state {
859 + unsigned int channel;
860 + unsigned int enabled;
861 void __iomem *irq_port;
862 void __iomem *ioc_base;
863 unsigned int sel;
864 @@ -114,11 +116,18 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
865 struct icside_state *state = ec->irq_data;
866 void __iomem *base = state->irq_port;
867
868 - writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
869 - readb(base + ICS_ARCIN_V6_INTROFFSET_2);
870 + state->enabled = 1;
871
872 - writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
873 - readb(base + ICS_ARCIN_V6_INTROFFSET_1);
874 + switch (state->channel) {
875 + case 0:
876 + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
877 + readb(base + ICS_ARCIN_V6_INTROFFSET_2);
878 + break;
879 + case 1:
880 + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
881 + readb(base + ICS_ARCIN_V6_INTROFFSET_1);
882 + break;
883 + }
884 }
885
886 /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
887 @@ -128,6 +137,8 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
888 {
889 struct icside_state *state = ec->irq_data;
890
891 + state->enabled = 0;
892 +
893 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
894 readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
895 }
896 @@ -149,6 +160,44 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
897 .irqpending = icside_irqpending_arcin_v6,
898 };
899
900 +/*
901 + * Handle routing of interrupts. This is called before
902 + * we write the command to the drive.
903 + */
904 +static void icside_maskproc(ide_drive_t *drive, int mask)
905 +{
906 + ide_hwif_t *hwif = drive->hwif;
907 + struct expansion_card *ec = ECARD_DEV(hwif->dev);
908 + struct icside_state *state = ecard_get_drvdata(ec);
909 + unsigned long flags;
910 +
911 + local_irq_save(flags);
912 +
913 + state->channel = hwif->channel;
914 +
915 + if (state->enabled && !mask) {
916 + switch (hwif->channel) {
917 + case 0:
918 + writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
919 + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
920 + break;
921 + case 1:
922 + writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
923 + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
924 + break;
925 + }
926 + } else {
927 + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
928 + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
929 + }
930 +
931 + local_irq_restore(flags);
932 +}
933 +
934 +static const struct ide_port_ops icside_v6_no_dma_port_ops = {
935 + .maskproc = icside_maskproc,
936 +};
937 +
938 #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
939 /*
940 * SG-DMA support.
941 @@ -228,6 +277,7 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
942
943 static const struct ide_port_ops icside_v6_port_ops = {
944 .set_dma_mode = icside_set_dma_mode,
945 + .maskproc = icside_maskproc,
946 };
947
948 static void icside_dma_host_set(ide_drive_t *drive, int on)
949 @@ -272,6 +322,11 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
950 BUG_ON(dma_channel_active(ec->dma));
951
952 /*
953 + * Ensure that we have the right interrupt routed.
954 + */
955 + icside_maskproc(drive, 0);
956 +
957 + /*
958 * Route the DMA signals to the correct interface.
959 */
960 writeb(state->sel | hwif->channel, state->ioc_base);
961 @@ -399,6 +454,7 @@ err_free:
962
963 static const struct ide_port_info icside_v6_port_info __initdata = {
964 .init_dma = icside_dma_off_init,
965 + .port_ops = &icside_v6_no_dma_port_ops,
966 .dma_ops = &icside_v6_dma_ops,
967 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
968 .mwdma_mask = ATA_MWDMA2,
969 diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
970 index 4d76ba4..0c11237 100644
971 --- a/drivers/ide/ide-probe.c
972 +++ b/drivers/ide/ide-probe.c
973 @@ -695,14 +695,8 @@ static int ide_probe_port(ide_hwif_t *hwif)
974 if (irqd)
975 disable_irq(hwif->irq);
976
977 - rc = ide_port_wait_ready(hwif);
978 - if (rc == -ENODEV) {
979 - printk(KERN_INFO "%s: no devices on the port\n", hwif->name);
980 - goto out;
981 - } else if (rc == -EBUSY)
982 - printk(KERN_ERR "%s: not ready before the probe\n", hwif->name);
983 - else
984 - rc = -ENODEV;
985 + if (ide_port_wait_ready(hwif) == -EBUSY)
986 + printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
987
988 /*
989 * Second drive should only exist if first drive was found,
990 @@ -713,7 +707,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
991 if (drive->dev_flags & IDE_DFLAG_PRESENT)
992 rc = 0;
993 }
994 -out:
995 +
996 /*
997 * Use cached IRQ number. It might be (and is...) changed by probe
998 * code above
999 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
1000 index 35161dd..e3bca38 100644
1001 --- a/drivers/ide/pdc202xx_old.c
1002 +++ b/drivers/ide/pdc202xx_old.c
1003 @@ -100,13 +100,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
1004 * bit 7: error, bit 6: interrupting,
1005 * bit 5: FIFO full, bit 4: FIFO empty
1006 */
1007 - return ((sc1d & 0x50) == 0x40) ? 1 : 0;
1008 + return ((sc1d & 0x50) == 0x50) ? 1 : 0;
1009 } else {
1010 /*
1011 * bit 3: error, bit 2: interrupting,
1012 * bit 1: FIFO full, bit 0: FIFO empty
1013 */
1014 - return ((sc1d & 0x05) == 0x04) ? 1 : 0;
1015 + return ((sc1d & 0x05) == 0x05) ? 1 : 0;
1016 }
1017 }
1018
1019 diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
1020 index f93c2c0..f6dad83 100644
1021 --- a/drivers/input/mouse/alps.c
1022 +++ b/drivers/input/mouse/alps.c
1023 @@ -63,6 +63,8 @@ static const struct alps_model_info alps_model_data[] = {
1024 { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
1025 ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
1026 { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
1027 + { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
1028 + ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
1029 };
1030
1031 /*
1032 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1033 index 2a5982e..525b9b9 100644
1034 --- a/drivers/input/serio/i8042-x86ia64io.h
1035 +++ b/drivers/input/serio/i8042-x86ia64io.h
1036 @@ -442,6 +442,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
1037 },
1038 },
1039 {
1040 + /* Medion Akoya E1222 */
1041 + .matches = {
1042 + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
1043 + DMI_MATCH(DMI_PRODUCT_NAME, "E122X"),
1044 + },
1045 + },
1046 + {
1047 /* Mivvy M310 */
1048 .matches = {
1049 DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"),
1050 diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
1051 index 3f5cd06..6b6c25d 100644
1052 --- a/drivers/isdn/gigaset/capi.c
1053 +++ b/drivers/isdn/gigaset/capi.c
1054 @@ -1313,7 +1313,7 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
1055 }
1056
1057 /* check parameter: CIP Value */
1058 - if (cmsg->CIPValue > ARRAY_SIZE(cip2bchlc) ||
1059 + if (cmsg->CIPValue >= ARRAY_SIZE(cip2bchlc) ||
1060 (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
1061 dev_notice(cs->dev, "%s: unknown CIP value %d\n",
1062 "CONNECT_REQ", cmsg->CIPValue);
1063 @@ -2215,36 +2215,24 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
1064 }
1065
1066
1067 -static struct capi_driver capi_driver_gigaset = {
1068 - .name = "gigaset",
1069 - .revision = "1.0",
1070 -};
1071 -
1072 /**
1073 - * gigaset_isdn_register() - register to LL
1074 + * gigaset_isdn_regdev() - register device to LL
1075 * @cs: device descriptor structure.
1076 * @isdnid: device name.
1077 *
1078 - * Called by main module to register the device with the LL.
1079 - *
1080 * Return value: 1 for success, 0 for failure
1081 */
1082 -int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1083 +int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
1084 {
1085 struct gigaset_capi_ctr *iif;
1086 int rc;
1087
1088 - pr_info("Kernel CAPI interface\n");
1089 -
1090 iif = kmalloc(sizeof(*iif), GFP_KERNEL);
1091 if (!iif) {
1092 pr_err("%s: out of memory\n", __func__);
1093 return 0;
1094 }
1095
1096 - /* register driver with CAPI (ToDo: what for?) */
1097 - register_capi_driver(&capi_driver_gigaset);
1098 -
1099 /* prepare controller structure */
1100 iif->ctr.owner = THIS_MODULE;
1101 iif->ctr.driverdata = cs;
1102 @@ -2265,7 +2253,6 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1103 rc = attach_capi_ctr(&iif->ctr);
1104 if (rc) {
1105 pr_err("attach_capi_ctr failed (%d)\n", rc);
1106 - unregister_capi_driver(&capi_driver_gigaset);
1107 kfree(iif);
1108 return 0;
1109 }
1110 @@ -2276,17 +2263,36 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1111 }
1112
1113 /**
1114 - * gigaset_isdn_unregister() - unregister from LL
1115 + * gigaset_isdn_unregdev() - unregister device from LL
1116 * @cs: device descriptor structure.
1117 - *
1118 - * Called by main module to unregister the device from the LL.
1119 */
1120 -void gigaset_isdn_unregister(struct cardstate *cs)
1121 +void gigaset_isdn_unregdev(struct cardstate *cs)
1122 {
1123 struct gigaset_capi_ctr *iif = cs->iif;
1124
1125 detach_capi_ctr(&iif->ctr);
1126 kfree(iif);
1127 cs->iif = NULL;
1128 +}
1129 +
1130 +static struct capi_driver capi_driver_gigaset = {
1131 + .name = "gigaset",
1132 + .revision = "1.0",
1133 +};
1134 +
1135 +/**
1136 + * gigaset_isdn_regdrv() - register driver to LL
1137 + */
1138 +void gigaset_isdn_regdrv(void)
1139 +{
1140 + pr_info("Kernel CAPI interface\n");
1141 + register_capi_driver(&capi_driver_gigaset);
1142 +}
1143 +
1144 +/**
1145 + * gigaset_isdn_unregdrv() - unregister driver from LL
1146 + */
1147 +void gigaset_isdn_unregdrv(void)
1148 +{
1149 unregister_capi_driver(&capi_driver_gigaset);
1150 }
1151 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
1152 index 664b0c5..0427fac 100644
1153 --- a/drivers/isdn/gigaset/common.c
1154 +++ b/drivers/isdn/gigaset/common.c
1155 @@ -505,7 +505,7 @@ void gigaset_freecs(struct cardstate *cs)
1156 case 2: /* error in initcshw */
1157 /* Deregister from LL */
1158 make_invalid(cs, VALID_ID);
1159 - gigaset_isdn_unregister(cs);
1160 + gigaset_isdn_unregdev(cs);
1161
1162 /* fall through */
1163 case 1: /* error when registering to LL */
1164 @@ -767,7 +767,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
1165 cs->cmdbytes = 0;
1166
1167 gig_dbg(DEBUG_INIT, "setting up iif");
1168 - if (!gigaset_isdn_register(cs, modulename)) {
1169 + if (!gigaset_isdn_regdev(cs, modulename)) {
1170 pr_err("error registering ISDN device\n");
1171 goto error;
1172 }
1173 @@ -1214,11 +1214,13 @@ static int __init gigaset_init_module(void)
1174 gigaset_debuglevel = DEBUG_DEFAULT;
1175
1176 pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
1177 + gigaset_isdn_regdrv();
1178 return 0;
1179 }
1180
1181 static void __exit gigaset_exit_module(void)
1182 {
1183 + gigaset_isdn_unregdrv();
1184 }
1185
1186 module_init(gigaset_init_module);
1187 diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
1188 index 5b27c99..bd0b1ea 100644
1189 --- a/drivers/isdn/gigaset/dummyll.c
1190 +++ b/drivers/isdn/gigaset/dummyll.c
1191 @@ -57,12 +57,20 @@ void gigaset_isdn_stop(struct cardstate *cs)
1192 {
1193 }
1194
1195 -int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1196 +int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
1197 {
1198 - pr_info("no ISDN subsystem interface\n");
1199 return 1;
1200 }
1201
1202 -void gigaset_isdn_unregister(struct cardstate *cs)
1203 +void gigaset_isdn_unregdev(struct cardstate *cs)
1204 +{
1205 +}
1206 +
1207 +void gigaset_isdn_regdrv(void)
1208 +{
1209 + pr_info("no ISDN subsystem interface\n");
1210 +}
1211 +
1212 +void gigaset_isdn_unregdrv(void)
1213 {
1214 }
1215 diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
1216 index ddeb045..0304d02 100644
1217 --- a/drivers/isdn/gigaset/ev-layer.c
1218 +++ b/drivers/isdn/gigaset/ev-layer.c
1219 @@ -1259,14 +1259,10 @@ static void do_action(int action, struct cardstate *cs,
1220 * note that bcs may be NULL if no B channel is free
1221 */
1222 at_state2->ConState = 700;
1223 - kfree(at_state2->str_var[STR_NMBR]);
1224 - at_state2->str_var[STR_NMBR] = NULL;
1225 - kfree(at_state2->str_var[STR_ZCPN]);
1226 - at_state2->str_var[STR_ZCPN] = NULL;
1227 - kfree(at_state2->str_var[STR_ZBC]);
1228 - at_state2->str_var[STR_ZBC] = NULL;
1229 - kfree(at_state2->str_var[STR_ZHLC]);
1230 - at_state2->str_var[STR_ZHLC] = NULL;
1231 + for (i = 0; i < STR_NUM; ++i) {
1232 + kfree(at_state2->str_var[i]);
1233 + at_state2->str_var[i] = NULL;
1234 + }
1235 at_state2->int_var[VAR_ZCTP] = -1;
1236
1237 spin_lock_irqsave(&cs->lock, flags);
1238 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
1239 index e963a6c..62909b2 100644
1240 --- a/drivers/isdn/gigaset/gigaset.h
1241 +++ b/drivers/isdn/gigaset/gigaset.h
1242 @@ -674,8 +674,10 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
1243 */
1244
1245 /* Called from common.c for setting up/shutting down with the ISDN subsystem */
1246 -int gigaset_isdn_register(struct cardstate *cs, const char *isdnid);
1247 -void gigaset_isdn_unregister(struct cardstate *cs);
1248 +void gigaset_isdn_regdrv(void);
1249 +void gigaset_isdn_unregdrv(void);
1250 +int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid);
1251 +void gigaset_isdn_unregdev(struct cardstate *cs);
1252
1253 /* Called from hardware module to indicate completion of an skb */
1254 void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
1255 diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
1256 index c129ee4..6429a6b 100644
1257 --- a/drivers/isdn/gigaset/i4l.c
1258 +++ b/drivers/isdn/gigaset/i4l.c
1259 @@ -632,15 +632,13 @@ void gigaset_isdn_stop(struct cardstate *cs)
1260 }
1261
1262 /**
1263 - * gigaset_isdn_register() - register to LL
1264 + * gigaset_isdn_regdev() - register to LL
1265 * @cs: device descriptor structure.
1266 * @isdnid: device name.
1267 *
1268 - * Called by main module to register the device with the LL.
1269 - *
1270 * Return value: 1 for success, 0 for failure
1271 */
1272 -int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1273 +int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
1274 {
1275 isdn_if *iif;
1276
1277 @@ -690,15 +688,29 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
1278 }
1279
1280 /**
1281 - * gigaset_isdn_unregister() - unregister from LL
1282 + * gigaset_isdn_unregdev() - unregister device from LL
1283 * @cs: device descriptor structure.
1284 - *
1285 - * Called by main module to unregister the device from the LL.
1286 */
1287 -void gigaset_isdn_unregister(struct cardstate *cs)
1288 +void gigaset_isdn_unregdev(struct cardstate *cs)
1289 {
1290 gig_dbg(DEBUG_CMD, "sending UNLOAD");
1291 gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
1292 kfree(cs->iif);
1293 cs->iif = NULL;
1294 }
1295 +
1296 +/**
1297 + * gigaset_isdn_regdrv() - register driver to LL
1298 + */
1299 +void gigaset_isdn_regdrv(void)
1300 +{
1301 + /* nothing to do */
1302 +}
1303 +
1304 +/**
1305 + * gigaset_isdn_unregdrv() - unregister driver from LL
1306 + */
1307 +void gigaset_isdn_unregdrv(void)
1308 +{
1309 + /* nothing to do */
1310 +}
1311 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
1312 index d2260b0..07bb299 100644
1313 --- a/drivers/isdn/gigaset/interface.c
1314 +++ b/drivers/isdn/gigaset/interface.c
1315 @@ -632,7 +632,6 @@ void gigaset_if_receive(struct cardstate *cs,
1316 if (tty == NULL)
1317 gig_dbg(DEBUG_ANY, "receive on closed device");
1318 else {
1319 - tty_buffer_request_room(tty, len);
1320 tty_insert_flip_string(tty, buffer, len);
1321 tty_flip_buffer_push(tty);
1322 }
1323 diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
1324 index e5225d2..0823e26 100644
1325 --- a/drivers/leds/leds-gpio.c
1326 +++ b/drivers/leds/leds-gpio.c
1327 @@ -211,7 +211,6 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
1328 const struct of_device_id *match)
1329 {
1330 struct device_node *np = ofdev->node, *child;
1331 - struct gpio_led led;
1332 struct gpio_led_of_platform_data *pdata;
1333 int count = 0, ret;
1334
1335 @@ -226,8 +225,8 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
1336 if (!pdata)
1337 return -ENOMEM;
1338
1339 - memset(&led, 0, sizeof(led));
1340 for_each_child_of_node(np, child) {
1341 + struct gpio_led led = {};
1342 enum of_gpio_flags flags;
1343 const char *state;
1344
1345 diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
1346 index cc0505e..6b0a495 100644
1347 --- a/drivers/media/video/em28xx/em28xx-dvb.c
1348 +++ b/drivers/media/video/em28xx/em28xx-dvb.c
1349 @@ -606,6 +606,7 @@ static int dvb_fini(struct em28xx *dev)
1350
1351 if (dev->dvb) {
1352 unregister_dvb(dev->dvb);
1353 + kfree(dev->dvb);
1354 dev->dvb = NULL;
1355 }
1356
1357 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1358 index efa0e41..1f800ae 100644
1359 --- a/drivers/net/bonding/bond_main.c
1360 +++ b/drivers/net/bonding/bond_main.c
1361 @@ -4935,6 +4935,8 @@ int bond_create(struct net *net, const char *name)
1362 }
1363
1364 res = register_netdevice(bond_dev);
1365 + if (res < 0)
1366 + goto out_netdev;
1367
1368 out:
1369 rtnl_unlock();
1370 diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
1371 index 0ec1524..fe5e320 100644
1372 --- a/drivers/net/can/bfin_can.c
1373 +++ b/drivers/net/can/bfin_can.c
1374 @@ -26,6 +26,7 @@
1375
1376 #define DRV_NAME "bfin_can"
1377 #define BFIN_CAN_TIMEOUT 100
1378 +#define TX_ECHO_SKB_MAX 1
1379
1380 /*
1381 * transmit and receive channels
1382 @@ -590,7 +591,7 @@ struct net_device *alloc_bfin_candev(void)
1383 struct net_device *dev;
1384 struct bfin_can_priv *priv;
1385
1386 - dev = alloc_candev(sizeof(*priv));
1387 + dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
1388 if (!dev)
1389 return NULL;
1390
1391 diff --git a/drivers/net/e100.c b/drivers/net/e100.c
1392 index 839fb2b..a565ea1 100644
1393 --- a/drivers/net/e100.c
1394 +++ b/drivers/net/e100.c
1395 @@ -2854,7 +2854,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
1396 }
1397 nic->cbs_pool = pci_pool_create(netdev->name,
1398 nic->pdev,
1399 - nic->params.cbs.count * sizeof(struct cb),
1400 + nic->params.cbs.max * sizeof(struct cb),
1401 sizeof(u32),
1402 0);
1403 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
1404 diff --git a/drivers/net/jme.c b/drivers/net/jme.c
1405 index 792b88f..981c9fb 100644
1406 --- a/drivers/net/jme.c
1407 +++ b/drivers/net/jme.c
1408 @@ -946,6 +946,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1409 jme->jme_vlan_rx(skb, jme->vlgrp,
1410 le16_to_cpu(rxdesc->descwb.vlan));
1411 NET_STAT(jme).rx_bytes += 4;
1412 + } else {
1413 + dev_kfree_skb(skb);
1414 }
1415 } else {
1416 jme->jme_rx(skb);
1417 @@ -2085,12 +2087,45 @@ jme_tx_timeout(struct net_device *netdev)
1418 jme_reset_link(jme);
1419 }
1420
1421 +static inline void jme_pause_rx(struct jme_adapter *jme)
1422 +{
1423 + atomic_dec(&jme->link_changing);
1424 +
1425 + jme_set_rx_pcc(jme, PCC_OFF);
1426 + if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1427 + JME_NAPI_DISABLE(jme);
1428 + } else {
1429 + tasklet_disable(&jme->rxclean_task);
1430 + tasklet_disable(&jme->rxempty_task);
1431 + }
1432 +}
1433 +
1434 +static inline void jme_resume_rx(struct jme_adapter *jme)
1435 +{
1436 + struct dynpcc_info *dpi = &(jme->dpi);
1437 +
1438 + if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1439 + JME_NAPI_ENABLE(jme);
1440 + } else {
1441 + tasklet_hi_enable(&jme->rxclean_task);
1442 + tasklet_hi_enable(&jme->rxempty_task);
1443 + }
1444 + dpi->cur = PCC_P1;
1445 + dpi->attempt = PCC_P1;
1446 + dpi->cnt = 0;
1447 + jme_set_rx_pcc(jme, PCC_P1);
1448 +
1449 + atomic_inc(&jme->link_changing);
1450 +}
1451 +
1452 static void
1453 jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1454 {
1455 struct jme_adapter *jme = netdev_priv(netdev);
1456
1457 + jme_pause_rx(jme);
1458 jme->vlgrp = grp;
1459 + jme_resume_rx(jme);
1460 }
1461
1462 static void
1463 diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
1464 index 9fbb2eb..449a982 100644
1465 --- a/drivers/net/pppol2tp.c
1466 +++ b/drivers/net/pppol2tp.c
1467 @@ -756,6 +756,7 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
1468
1469 /* Try to dequeue as many skbs from reorder_q as we can. */
1470 pppol2tp_recv_dequeue(session);
1471 + sock_put(sock);
1472
1473 return 0;
1474
1475 @@ -772,6 +773,7 @@ discard_bad_csum:
1476 UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
1477 tunnel->stats.rx_errors++;
1478 kfree_skb(skb);
1479 + sock_put(sock);
1480
1481 return 0;
1482
1483 @@ -1180,7 +1182,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1484 /* Calculate UDP checksum if configured to do so */
1485 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
1486 skb->ip_summed = CHECKSUM_NONE;
1487 - else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
1488 + else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1489 + (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1490 skb->ip_summed = CHECKSUM_COMPLETE;
1491 csum = skb_checksum(skb, 0, udp_len, 0);
1492 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1493 @@ -1661,6 +1664,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1494 if (tunnel_sock == NULL)
1495 goto end;
1496
1497 + sock_hold(tunnel_sock);
1498 tunnel = tunnel_sock->sk_user_data;
1499 } else {
1500 tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
1501 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1502 index 60f96c4..67d414b 100644
1503 --- a/drivers/net/r8169.c
1504 +++ b/drivers/net/r8169.c
1505 @@ -186,7 +186,12 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
1506
1507 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
1508
1509 -static int rx_copybreak = 200;
1510 +/*
1511 + * we set our copybreak very high so that we don't have
1512 + * to allocate 16k frames all the time (see note in
1513 + * rtl8169_open()
1514 + */
1515 +static int rx_copybreak = 16383;
1516 static int use_dac;
1517 static struct {
1518 u32 msg_enable;
1519 @@ -3245,9 +3250,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
1520 }
1521
1522 static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
1523 - struct net_device *dev)
1524 + unsigned int mtu)
1525 {
1526 - unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
1527 + unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
1528 +
1529 + if (max_frame != 16383)
1530 + printk(KERN_WARNING "WARNING! Changing of MTU on this NIC"
1531 + "May lead to frame reception errors!\n");
1532
1533 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
1534 }
1535 @@ -3259,7 +3268,17 @@ static int rtl8169_open(struct net_device *dev)
1536 int retval = -ENOMEM;
1537
1538
1539 - rtl8169_set_rxbufsize(tp, dev);
1540 + /*
1541 + * Note that we use a magic value here, its wierd I know
1542 + * its done because, some subset of rtl8169 hardware suffers from
1543 + * a problem in which frames received that are longer than
1544 + * the size set in RxMaxSize register return garbage sizes
1545 + * when received. To avoid this we need to turn off filtering,
1546 + * which is done by setting a value of 16383 in the RxMaxSize register
1547 + * and allocating 16k frames to handle the largest possible rx value
1548 + * thats what the magic math below does.
1549 + */
1550 + rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
1551
1552 /*
1553 * Rx and Tx desscriptors needs 256 bytes alignment.
1554 @@ -3912,7 +3931,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1555
1556 rtl8169_down(dev);
1557
1558 - rtl8169_set_rxbufsize(tp, dev);
1559 + rtl8169_set_rxbufsize(tp, dev->mtu);
1560
1561 ret = rtl8169_init_ring(dev);
1562 if (ret < 0)
1563 diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
1564 index 7f82b02..17d1493 100644
1565 --- a/drivers/net/tg3.c
1566 +++ b/drivers/net/tg3.c
1567 @@ -5223,7 +5223,7 @@ static void tg3_poll_controller(struct net_device *dev)
1568 struct tg3 *tp = netdev_priv(dev);
1569
1570 for (i = 0; i < tp->irq_cnt; i++)
1571 - tg3_interrupt(tp->napi[i].irq_vec, dev);
1572 + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1573 }
1574 #endif
1575
1576 diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
1577 index bbd2f31..8b43089 100644
1578 --- a/drivers/net/wireless/ath/ath5k/ath5k.h
1579 +++ b/drivers/net/wireless/ath/ath5k/ath5k.h
1580 @@ -535,7 +535,7 @@ struct ath5k_txq_info {
1581 u32 tqi_cbr_period; /* Constant bit rate period */
1582 u32 tqi_cbr_overflow_limit;
1583 u32 tqi_burst_time;
1584 - u32 tqi_ready_time; /* Not used */
1585 + u32 tqi_ready_time; /* Time queue waits after an event */
1586 };
1587
1588 /*
1589 diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
1590 index d6ee8ac..ced648b 100644
1591 --- a/drivers/net/wireless/ath/ath5k/base.c
1592 +++ b/drivers/net/wireless/ath/ath5k/base.c
1593 @@ -1537,7 +1537,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1594
1595 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
1596 if (ret)
1597 - return ret;
1598 + goto err;
1599 +
1600 if (sc->opmode == NL80211_IFTYPE_AP ||
1601 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1602 /*
1603 @@ -1564,10 +1565,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1604 if (ret) {
1605 ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1606 "hardware queue!\n", __func__);
1607 - return ret;
1608 + goto err;
1609 }
1610 + ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
1611 + if (ret)
1612 + goto err;
1613
1614 - return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
1615 + /* reconfigure cabq with ready time to 80% of beacon_interval */
1616 + ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1617 + if (ret)
1618 + goto err;
1619 +
1620 + qi.tqi_ready_time = (sc->bintval * 80) / 100;
1621 + ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1622 + if (ret)
1623 + goto err;
1624 +
1625 + ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1626 +err:
1627 + return ret;
1628 }
1629
1630 static void
1631 diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
1632 index 72474c0..97df0d9 100644
1633 --- a/drivers/net/wireless/ath/ath5k/phy.c
1634 +++ b/drivers/net/wireless/ath/ath5k/phy.c
1635 @@ -1386,38 +1386,39 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1636 goto done;
1637
1638 /* Calibration has finished, get the results and re-run */
1639 +
1640 + /* work around empty results which can apparently happen on 5212 */
1641 for (i = 0; i <= 10; i++) {
1642 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
1643 i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
1644 q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
1645 + ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1646 + "iq_corr:%x i_pwr:%x q_pwr:%x", iq_corr, i_pwr, q_pwr);
1647 + if (i_pwr && q_pwr)
1648 + break;
1649 }
1650
1651 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
1652 q_coffd = q_pwr >> 7;
1653
1654 - /* No correction */
1655 - if (i_coffd == 0 || q_coffd == 0)
1656 + /* protect against divide by 0 and loss of sign bits */
1657 + if (i_coffd == 0 || q_coffd < 2)
1658 goto done;
1659
1660 - i_coff = ((-iq_corr) / i_coffd);
1661 + i_coff = (-iq_corr) / i_coffd;
1662 + i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
1663
1664 - /* Boundary check */
1665 - if (i_coff > 31)
1666 - i_coff = 31;
1667 - if (i_coff < -32)
1668 - i_coff = -32;
1669 + q_coff = (i_pwr / q_coffd) - 128;
1670 + q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
1671
1672 - q_coff = (((s32)i_pwr / q_coffd) - 128);
1673 + ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1674 + "new I:%d Q:%d (i_coffd:%x q_coffd:%x)",
1675 + i_coff, q_coff, i_coffd, q_coffd);
1676
1677 - /* Boundary check */
1678 - if (q_coff > 15)
1679 - q_coff = 15;
1680 - if (q_coff < -16)
1681 - q_coff = -16;
1682 -
1683 - /* Commit new I/Q value */
1684 - AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE |
1685 - ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S));
1686 + /* Commit new I/Q values (set enable bit last to match HAL sources) */
1687 + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_I_COFF, i_coff);
1688 + AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_Q_COFF, q_coff);
1689 + AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE);
1690
1691 /* Re-enable calibration -if we don't we'll commit
1692 * the same values again and again */
1693 diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
1694 index eeebb9a..b7c5725 100644
1695 --- a/drivers/net/wireless/ath/ath5k/qcu.c
1696 +++ b/drivers/net/wireless/ath/ath5k/qcu.c
1697 @@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
1698 break;
1699
1700 case AR5K_TX_QUEUE_CAB:
1701 + /* XXX: use BCN_SENT_GT, if we can figure out how */
1702 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
1703 - AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
1704 + AR5K_QCU_MISC_FRSHED_DBA_GT |
1705 AR5K_QCU_MISC_CBREXP_DIS |
1706 AR5K_QCU_MISC_CBREXP_BCN_DIS);
1707
1708 - ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
1709 + ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
1710 (AR5K_TUNE_SW_BEACON_RESP -
1711 AR5K_TUNE_DMA_BEACON_RESP) -
1712 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
1713 diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
1714 index 4cb9c5d..1464f89 100644
1715 --- a/drivers/net/wireless/ath/ath5k/reg.h
1716 +++ b/drivers/net/wireless/ath/ath5k/reg.h
1717 @@ -2187,6 +2187,7 @@
1718 */
1719 #define AR5K_PHY_IQ 0x9920 /* Register Address */
1720 #define AR5K_PHY_IQ_CORR_Q_Q_COFF 0x0000001f /* Mask for q correction info */
1721 +#define AR5K_PHY_IQ_CORR_Q_Q_COFF_S 0
1722 #define AR5K_PHY_IQ_CORR_Q_I_COFF 0x000007e0 /* Mask for i correction info */
1723 #define AR5K_PHY_IQ_CORR_Q_I_COFF_S 5
1724 #define AR5K_PHY_IQ_CORR_ENABLE 0x00000800 /* Enable i/q correction */
1725 diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
1726 index 62954fc..dbc52ee 100644
1727 --- a/drivers/net/wireless/ath/ath5k/reset.c
1728 +++ b/drivers/net/wireless/ath/ath5k/reset.c
1729 @@ -1371,8 +1371,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1730 * Set clocks to 32KHz operation and use an
1731 * external 32KHz crystal when sleeping if one
1732 * exists */
1733 - if (ah->ah_version == AR5K_AR5212)
1734 - ath5k_hw_set_sleep_clock(ah, true);
1735 + if (ah->ah_version == AR5K_AR5212 &&
1736 + ah->ah_op_mode != NL80211_IFTYPE_AP)
1737 + ath5k_hw_set_sleep_clock(ah, true);
1738
1739 /*
1740 * Disable beacons and reset the register
1741 diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
1742 index 1597a42..2bad712 100644
1743 --- a/drivers/net/wireless/ath/ath9k/ath9k.h
1744 +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
1745 @@ -267,6 +267,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1746 u16 tid, u16 *ssn);
1747 void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
1748 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
1749 +void ath9k_enable_ps(struct ath_softc *sc);
1750
1751 /********/
1752 /* VIFs */
1753 diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
1754 index 06eaaa9..20b1fd3 100644
1755 --- a/drivers/net/wireless/ath/ath9k/beacon.c
1756 +++ b/drivers/net/wireless/ath/ath9k/beacon.c
1757 @@ -573,6 +573,13 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
1758 u64 tsf;
1759 int num_beacons, offset, dtim_dec_count, cfp_dec_count;
1760
1761 + /* No need to configure beacon if we are not associated */
1762 + if (!common->curaid) {
1763 + ath_print(common, ATH_DBG_BEACON,
1764 + "STA is not yet associated..skipping beacon config\n");
1765 + return;
1766 + }
1767 +
1768 memset(&bs, 0, sizeof(bs));
1769 intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
1770
1771 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
1772 index 7c64aa5..6661178 100644
1773 --- a/drivers/net/wireless/ath/ath9k/hw.c
1774 +++ b/drivers/net/wireless/ath/ath9k/hw.c
1775 @@ -380,7 +380,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
1776 ah->config.pcie_clock_req = 0;
1777 ah->config.pcie_waen = 0;
1778 ah->config.analog_shiftreg = 1;
1779 - ah->config.ht_enable = 1;
1780 ah->config.ofdm_trig_low = 200;
1781 ah->config.ofdm_trig_high = 500;
1782 ah->config.cck_trig_high = 200;
1783 @@ -392,6 +391,11 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
1784 ah->config.spurchans[i][1] = AR_NO_SPUR;
1785 }
1786
1787 + if (ah->hw_version.devid != AR2427_DEVID_PCIE)
1788 + ah->config.ht_enable = 1;
1789 + else
1790 + ah->config.ht_enable = 0;
1791 +
1792 ah->config.intr_mitigation = true;
1793
1794 /*
1795 @@ -590,6 +594,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
1796 case AR5416_DEVID_AR9287_PCI:
1797 case AR5416_DEVID_AR9287_PCIE:
1798 case AR9271_USB:
1799 + case AR2427_DEVID_PCIE:
1800 return true;
1801 default:
1802 break;
1803 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
1804 index e2b0c73..33a28ec 100644
1805 --- a/drivers/net/wireless/ath/ath9k/hw.h
1806 +++ b/drivers/net/wireless/ath/ath9k/hw.h
1807 @@ -40,6 +40,7 @@
1808 #define AR9280_DEVID_PCI 0x0029
1809 #define AR9280_DEVID_PCIE 0x002a
1810 #define AR9285_DEVID_PCIE 0x002b
1811 +#define AR2427_DEVID_PCIE 0x002c
1812
1813 #define AR5416_AR9100_DEVID 0x000b
1814
1815 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1816 index 4faafbd..33a1071 100644
1817 --- a/drivers/net/wireless/ath/ath9k/main.c
1818 +++ b/drivers/net/wireless/ath/ath9k/main.c
1819 @@ -1854,11 +1854,14 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1820 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1821 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1822 IEEE80211_HW_SIGNAL_DBM |
1823 - IEEE80211_HW_AMPDU_AGGREGATION |
1824 IEEE80211_HW_SUPPORTS_PS |
1825 IEEE80211_HW_PS_NULLFUNC_STACK |
1826 + IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1827 IEEE80211_HW_SPECTRUM_MGMT;
1828
1829 + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1830 + hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1831 +
1832 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1833 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1834
1835 @@ -2679,6 +2682,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1836 mutex_unlock(&sc->mutex);
1837 }
1838
1839 +void ath9k_enable_ps(struct ath_softc *sc)
1840 +{
1841 + sc->ps_enabled = true;
1842 + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1843 + if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1844 + sc->imask |= ATH9K_INT_TIM_TIMER;
1845 + ath9k_hw_set_interrupts(sc->sc_ah,
1846 + sc->imask);
1847 + }
1848 + }
1849 + ath9k_hw_setrxabort(sc->sc_ah, 1);
1850 +}
1851 +
1852 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1853 {
1854 struct ath_wiphy *aphy = hw->priv;
1855 @@ -2732,22 +2748,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1856 if (changed & IEEE80211_CONF_CHANGE_PS) {
1857 if (conf->flags & IEEE80211_CONF_PS) {
1858 sc->sc_flags |= SC_OP_PS_ENABLED;
1859 - if (!(ah->caps.hw_caps &
1860 - ATH9K_HW_CAP_AUTOSLEEP)) {
1861 - if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
1862 - sc->imask |= ATH9K_INT_TIM_TIMER;
1863 - ath9k_hw_set_interrupts(sc->sc_ah,
1864 - sc->imask);
1865 - }
1866 - }
1867 /*
1868 * At this point we know hardware has received an ACK
1869 * of a previously sent null data frame.
1870 */
1871 if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
1872 sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
1873 - sc->ps_enabled = true;
1874 - ath9k_hw_setrxabort(sc->sc_ah, 1);
1875 + ath9k_enable_ps(sc);
1876 }
1877 } else {
1878 sc->ps_enabled = false;
1879 diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
1880 index f7af5ea..199c54a 100644
1881 --- a/drivers/net/wireless/ath/ath9k/pci.c
1882 +++ b/drivers/net/wireless/ath/ath9k/pci.c
1883 @@ -25,6 +25,7 @@ static struct pci_device_id ath_pci_id_table[] __devinitdata = {
1884 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
1885 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
1886 { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
1887 + { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
1888 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
1889 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
1890 { 0 }
1891 diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
1892 index 1d6cf7d..171ce2b 100644
1893 --- a/drivers/net/wireless/ath/ath9k/rc.c
1894 +++ b/drivers/net/wireless/ath/ath9k/rc.c
1895 @@ -1323,7 +1323,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1896
1897 static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1898 struct ieee80211_sta *sta, void *priv_sta,
1899 - u32 changed)
1900 + u32 changed, enum nl80211_channel_type oper_chan_type)
1901 {
1902 struct ath_softc *sc = priv;
1903 struct ath_rate_priv *ath_rc_priv = priv_sta;
1904 @@ -1340,8 +1340,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1905 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
1906 return;
1907
1908 - if (sc->hw->conf.channel_type == NL80211_CHAN_HT40MINUS ||
1909 - sc->hw->conf.channel_type == NL80211_CHAN_HT40PLUS)
1910 + if (oper_chan_type == NL80211_CHAN_HT40MINUS ||
1911 + oper_chan_type == NL80211_CHAN_HT40PLUS)
1912 oper_cw40 = true;
1913
1914 oper_sgi40 = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1915 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
1916 index 29bf336..c3ce920 100644
1917 --- a/drivers/net/wireless/ath/ath9k/xmit.c
1918 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
1919 @@ -1353,25 +1353,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1920 return htype;
1921 }
1922
1923 -static bool is_pae(struct sk_buff *skb)
1924 -{
1925 - struct ieee80211_hdr *hdr;
1926 - __le16 fc;
1927 -
1928 - hdr = (struct ieee80211_hdr *)skb->data;
1929 - fc = hdr->frame_control;
1930 -
1931 - if (ieee80211_is_data(fc)) {
1932 - if (ieee80211_is_nullfunc(fc) ||
1933 - /* Port Access Entity (IEEE 802.1X) */
1934 - (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
1935 - return true;
1936 - }
1937 - }
1938 -
1939 - return false;
1940 -}
1941 -
1942 static int get_hw_crypto_keytype(struct sk_buff *skb)
1943 {
1944 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1945 @@ -1701,7 +1682,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1946 goto tx_done;
1947 }
1948
1949 - if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
1950 + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1951 /*
1952 * Try aggregation if it's a unicast data frame
1953 * and the destination is HT capable.
1954 @@ -2053,10 +2034,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1955 */
1956 if (bf->bf_isnullfunc &&
1957 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
1958 - if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
1959 - sc->ps_enabled = true;
1960 - ath9k_hw_setrxabort(sc->sc_ah, 1);
1961 - } else
1962 + if ((sc->sc_flags & SC_OP_PS_ENABLED))
1963 + ath9k_enable_ps(sc);
1964 + else
1965 sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
1966 }
1967
1968 @@ -2264,7 +2244,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1969 if (ATH_TXQ_SETUP(sc, i)) {
1970 txq = &sc->tx.txq[i];
1971
1972 - spin_lock(&txq->axq_lock);
1973 + spin_lock_bh(&txq->axq_lock);
1974
1975 list_for_each_entry_safe(ac,
1976 ac_tmp, &txq->axq_acq, list) {
1977 @@ -2285,7 +2265,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1978 }
1979 }
1980
1981 - spin_unlock(&txq->axq_lock);
1982 + spin_unlock_bh(&txq->axq_lock);
1983 }
1984 }
1985 }
1986 diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
1987 index b59166c..629c166 100644
1988 --- a/drivers/net/wireless/b43/main.c
1989 +++ b/drivers/net/wireless/b43/main.c
1990 @@ -852,19 +852,16 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
1991 if (B43_WARN_ON(!modparam_hwtkip))
1992 return;
1993
1994 - mutex_lock(&wl->mutex);
1995 -
1996 + /* This is only called from the RX path through mac80211, where
1997 + * our mutex is already locked. */
1998 + B43_WARN_ON(!mutex_is_locked(&wl->mutex));
1999 dev = wl->current_dev;
2000 - if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
2001 - goto out_unlock;
2002 + B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
2003
2004 keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
2005
2006 rx_tkip_phase1_write(dev, index, iv32, phase1key);
2007 keymac_write(dev, index, addr);
2008 -
2009 -out_unlock:
2010 - mutex_unlock(&wl->mutex);
2011 }
2012
2013 static void do_key_write(struct b43_wldev *dev,
2014 diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
2015 index 234891d..e955515 100644
2016 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c
2017 +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
2018 @@ -2474,11 +2474,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2019 memset((void *)&priv->hw_params, 0,
2020 sizeof(struct iwl_hw_params));
2021
2022 - priv->shared_virt =
2023 - pci_alloc_consistent(priv->pci_dev,
2024 - sizeof(struct iwl3945_shared),
2025 - &priv->shared_phys);
2026 -
2027 + priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
2028 + sizeof(struct iwl3945_shared),
2029 + &priv->shared_phys, GFP_KERNEL);
2030 if (!priv->shared_virt) {
2031 IWL_ERR(priv, "failed to allocate pci memory\n");
2032 mutex_unlock(&priv->mutex);
2033 diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
2034 index f36f804..6e9e156 100644
2035 --- a/drivers/net/wireless/iwlwifi/iwl-core.c
2036 +++ b/drivers/net/wireless/iwlwifi/iwl-core.c
2037 @@ -1658,9 +1658,9 @@ EXPORT_SYMBOL(iwl_set_tx_power);
2038 void iwl_free_isr_ict(struct iwl_priv *priv)
2039 {
2040 if (priv->ict_tbl_vir) {
2041 - pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
2042 - PAGE_SIZE, priv->ict_tbl_vir,
2043 - priv->ict_tbl_dma);
2044 + dma_free_coherent(&priv->pci_dev->dev,
2045 + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
2046 + priv->ict_tbl_vir, priv->ict_tbl_dma);
2047 priv->ict_tbl_vir = NULL;
2048 }
2049 }
2050 @@ -1676,9 +1676,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
2051 if (priv->cfg->use_isr_legacy)
2052 return 0;
2053 /* allocate shrared data table */
2054 - priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
2055 - ICT_COUNT) + PAGE_SIZE,
2056 - &priv->ict_tbl_dma);
2057 + priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
2058 + (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
2059 + &priv->ict_tbl_dma, GFP_KERNEL);
2060 if (!priv->ict_tbl_vir)
2061 return -ENOMEM;
2062
2063 diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
2064 index bd0b12e..f8481e8 100644
2065 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
2066 +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
2067 @@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
2068 struct fw_desc *desc)
2069 {
2070 if (desc->v_addr)
2071 - pci_free_consistent(pci_dev, desc->len,
2072 - desc->v_addr, desc->p_addr);
2073 + dma_free_coherent(&pci_dev->dev, desc->len,
2074 + desc->v_addr, desc->p_addr);
2075 desc->v_addr = NULL;
2076 desc->len = 0;
2077 }
2078 @@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
2079 static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
2080 struct fw_desc *desc)
2081 {
2082 - desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
2083 + desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
2084 + &desc->p_addr, GFP_KERNEL);
2085 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
2086 }
2087
2088 diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
2089 index 2dbce85..4ac16d9 100644
2090 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c
2091 +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
2092 @@ -350,10 +350,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
2093 }
2094 }
2095
2096 - pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2097 - rxq->dma_addr);
2098 - pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
2099 - rxq->rb_stts, rxq->rb_stts_dma);
2100 + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2101 + rxq->dma_addr);
2102 + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
2103 + rxq->rb_stts, rxq->rb_stts_dma);
2104 rxq->bd = NULL;
2105 rxq->rb_stts = NULL;
2106 }
2107 @@ -362,7 +362,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
2108 int iwl_rx_queue_alloc(struct iwl_priv *priv)
2109 {
2110 struct iwl_rx_queue *rxq = &priv->rxq;
2111 - struct pci_dev *dev = priv->pci_dev;
2112 + struct device *dev = &priv->pci_dev->dev;
2113 int i;
2114
2115 spin_lock_init(&rxq->lock);
2116 @@ -370,12 +370,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
2117 INIT_LIST_HEAD(&rxq->rx_used);
2118
2119 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2120 - rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
2121 + rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
2122 + GFP_KERNEL);
2123 if (!rxq->bd)
2124 goto err_bd;
2125
2126 - rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
2127 - &rxq->rb_stts_dma);
2128 + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
2129 + &rxq->rb_stts_dma, GFP_KERNEL);
2130 if (!rxq->rb_stts)
2131 goto err_rb;
2132
2133 @@ -392,8 +393,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
2134 return 0;
2135
2136 err_rb:
2137 - pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2138 - rxq->dma_addr);
2139 + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2140 + rxq->dma_addr);
2141 err_bd:
2142 return -ENOMEM;
2143 }
2144 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
2145 index 8f40715..88470fb 100644
2146 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c
2147 +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
2148 @@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
2149 static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
2150 struct iwl_dma_ptr *ptr, size_t size)
2151 {
2152 - ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
2153 + ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
2154 + GFP_KERNEL);
2155 if (!ptr->addr)
2156 return -ENOMEM;
2157 ptr->size = size;
2158 @@ -73,7 +74,7 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
2159 if (unlikely(!ptr->addr))
2160 return;
2161
2162 - pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
2163 + dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
2164 memset(ptr, 0, sizeof(*ptr));
2165 }
2166
2167 @@ -126,7 +127,7 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
2168 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
2169 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2170 else {
2171 - IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
2172 + IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
2173 priv->stations[sta_id].tid[tid].tfds_in_queue,
2174 freed);
2175 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
2176 @@ -146,7 +147,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
2177 {
2178 struct iwl_tx_queue *txq = &priv->txq[txq_id];
2179 struct iwl_queue *q = &txq->q;
2180 - struct pci_dev *dev = priv->pci_dev;
2181 + struct device *dev = &priv->pci_dev->dev;
2182 int i;
2183
2184 if (q->n_bd == 0)
2185 @@ -163,8 +164,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
2186
2187 /* De-alloc circular buffer of TFDs */
2188 if (txq->q.n_bd)
2189 - pci_free_consistent(dev, priv->hw_params.tfd_size *
2190 - txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2191 + dma_free_coherent(dev, priv->hw_params.tfd_size *
2192 + txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2193
2194 /* De-alloc array of per-TFD driver data */
2195 kfree(txq->txb);
2196 @@ -193,7 +194,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
2197 {
2198 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
2199 struct iwl_queue *q = &txq->q;
2200 - struct pci_dev *dev = priv->pci_dev;
2201 + struct device *dev = &priv->pci_dev->dev;
2202 int i;
2203
2204 if (q->n_bd == 0)
2205 @@ -205,8 +206,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
2206
2207 /* De-alloc circular buffer of TFDs */
2208 if (txq->q.n_bd)
2209 - pci_free_consistent(dev, priv->hw_params.tfd_size *
2210 - txq->q.n_bd, txq->tfds, txq->q.dma_addr);
2211 + dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
2212 + txq->tfds, txq->q.dma_addr);
2213
2214 /* deallocate arrays */
2215 kfree(txq->cmd);
2216 @@ -297,7 +298,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
2217 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
2218 struct iwl_tx_queue *txq, u32 id)
2219 {
2220 - struct pci_dev *dev = priv->pci_dev;
2221 + struct device *dev = &priv->pci_dev->dev;
2222 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2223
2224 /* Driver private data, only for Tx (not command) queues,
2225 @@ -316,8 +317,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
2226
2227 /* Circular buffer of transmit frame descriptors (TFDs),
2228 * shared with device */
2229 - txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
2230 -
2231 + txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
2232 + GFP_KERNEL);
2233 if (!txq->tfds) {
2234 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
2235 goto error;
2236 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2237 index f8e4e4b..f297865 100644
2238 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
2239 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
2240 @@ -352,10 +352,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
2241 static void iwl3945_unset_hw_params(struct iwl_priv *priv)
2242 {
2243 if (priv->shared_virt)
2244 - pci_free_consistent(priv->pci_dev,
2245 - sizeof(struct iwl3945_shared),
2246 - priv->shared_virt,
2247 - priv->shared_phys);
2248 + dma_free_coherent(&priv->pci_dev->dev,
2249 + sizeof(struct iwl3945_shared),
2250 + priv->shared_virt,
2251 + priv->shared_phys);
2252 }
2253
2254 static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2255 @@ -1253,10 +1253,10 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
2256 }
2257 }
2258
2259 - pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2260 - rxq->dma_addr);
2261 - pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
2262 - rxq->rb_stts, rxq->rb_stts_dma);
2263 + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2264 + rxq->dma_addr);
2265 + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
2266 + rxq->rb_stts, rxq->rb_stts_dma);
2267 rxq->bd = NULL;
2268 rxq->rb_stts = NULL;
2269 }
2270 diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2271 index a007230..1685c09 100644
2272 --- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2273 +++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
2274 @@ -443,7 +443,8 @@ out:
2275
2276 void wl1251_debugfs_reset(struct wl1251 *wl)
2277 {
2278 - memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
2279 + if (wl->stats.fw_stats != NULL)
2280 + memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
2281 wl->stats.retry_count = 0;
2282 wl->stats.excessive_retries = 0;
2283 }
2284 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2285 index 315fea4..3245d33 100644
2286 --- a/drivers/pci/pci.c
2287 +++ b/drivers/pci/pci.c
2288 @@ -2421,18 +2421,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
2289 */
2290 int pcix_get_max_mmrbc(struct pci_dev *dev)
2291 {
2292 - int err, cap;
2293 + int cap;
2294 u32 stat;
2295
2296 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2297 if (!cap)
2298 return -EINVAL;
2299
2300 - err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2301 - if (err)
2302 + if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2303 return -EINVAL;
2304
2305 - return (stat & PCI_X_STATUS_MAX_READ) >> 12;
2306 + return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
2307 }
2308 EXPORT_SYMBOL(pcix_get_max_mmrbc);
2309
2310 @@ -2445,18 +2444,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
2311 */
2312 int pcix_get_mmrbc(struct pci_dev *dev)
2313 {
2314 - int ret, cap;
2315 - u32 cmd;
2316 + int cap;
2317 + u16 cmd;
2318
2319 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2320 if (!cap)
2321 return -EINVAL;
2322
2323 - ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2324 - if (!ret)
2325 - ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2326 + if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2327 + return -EINVAL;
2328
2329 - return ret;
2330 + return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
2331 }
2332 EXPORT_SYMBOL(pcix_get_mmrbc);
2333
2334 @@ -2471,28 +2469,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
2335 */
2336 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2337 {
2338 - int cap, err = -EINVAL;
2339 - u32 stat, cmd, v, o;
2340 + int cap;
2341 + u32 stat, v, o;
2342 + u16 cmd;
2343
2344 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
2345 - goto out;
2346 + return -EINVAL;
2347
2348 v = ffs(mmrbc) - 10;
2349
2350 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2351 if (!cap)
2352 - goto out;
2353 + return -EINVAL;
2354
2355 - err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
2356 - if (err)
2357 - goto out;
2358 + if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
2359 + return -EINVAL;
2360
2361 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
2362 return -E2BIG;
2363
2364 - err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
2365 - if (err)
2366 - goto out;
2367 + if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
2368 + return -EINVAL;
2369
2370 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
2371 if (o != v) {
2372 @@ -2502,10 +2499,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
2373
2374 cmd &= ~PCI_X_CMD_MAX_READ;
2375 cmd |= v << 2;
2376 - err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
2377 + if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
2378 + return -EIO;
2379 }
2380 -out:
2381 - return err;
2382 + return 0;
2383 }
2384 EXPORT_SYMBOL(pcix_set_mmrbc);
2385
2386 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2387 index d58b940..456c265 100644
2388 --- a/drivers/pci/quirks.c
2389 +++ b/drivers/pci/quirks.c
2390 @@ -2534,6 +2534,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
2391 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
2392 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
2393 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
2394 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
2395
2396 #endif /* CONFIG_PCI_IOV */
2397
2398 diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
2399 index ed90082..8cb20e4 100644
2400 --- a/drivers/platform/x86/classmate-laptop.c
2401 +++ b/drivers/platform/x86/classmate-laptop.c
2402 @@ -34,6 +34,11 @@ struct cmpc_accel {
2403 #define CMPC_ACCEL_SENSITIVITY_DEFAULT 5
2404
2405
2406 +#define CMPC_ACCEL_HID "ACCE0000"
2407 +#define CMPC_TABLET_HID "TBLT0000"
2408 +#define CMPC_BL_HID "IPML200"
2409 +#define CMPC_KEYS_HID "FnBT0000"
2410 +
2411 /*
2412 * Generic input device code.
2413 */
2414 @@ -282,10 +287,9 @@ static int cmpc_accel_remove(struct acpi_device *acpi, int type)
2415 }
2416
2417 static const struct acpi_device_id cmpc_accel_device_ids[] = {
2418 - {"ACCE0000", 0},
2419 + {CMPC_ACCEL_HID, 0},
2420 {"", 0}
2421 };
2422 -MODULE_DEVICE_TABLE(acpi, cmpc_accel_device_ids);
2423
2424 static struct acpi_driver cmpc_accel_acpi_driver = {
2425 .owner = THIS_MODULE,
2426 @@ -366,10 +370,9 @@ static int cmpc_tablet_resume(struct acpi_device *acpi)
2427 }
2428
2429 static const struct acpi_device_id cmpc_tablet_device_ids[] = {
2430 - {"TBLT0000", 0},
2431 + {CMPC_TABLET_HID, 0},
2432 {"", 0}
2433 };
2434 -MODULE_DEVICE_TABLE(acpi, cmpc_tablet_device_ids);
2435
2436 static struct acpi_driver cmpc_tablet_acpi_driver = {
2437 .owner = THIS_MODULE,
2438 @@ -477,17 +480,16 @@ static int cmpc_bl_remove(struct acpi_device *acpi, int type)
2439 return 0;
2440 }
2441
2442 -static const struct acpi_device_id cmpc_device_ids[] = {
2443 - {"IPML200", 0},
2444 +static const struct acpi_device_id cmpc_bl_device_ids[] = {
2445 + {CMPC_BL_HID, 0},
2446 {"", 0}
2447 };
2448 -MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
2449
2450 static struct acpi_driver cmpc_bl_acpi_driver = {
2451 .owner = THIS_MODULE,
2452 .name = "cmpc",
2453 .class = "cmpc",
2454 - .ids = cmpc_device_ids,
2455 + .ids = cmpc_bl_device_ids,
2456 .ops = {
2457 .add = cmpc_bl_add,
2458 .remove = cmpc_bl_remove
2459 @@ -540,10 +542,9 @@ static int cmpc_keys_remove(struct acpi_device *acpi, int type)
2460 }
2461
2462 static const struct acpi_device_id cmpc_keys_device_ids[] = {
2463 - {"FnBT0000", 0},
2464 + {CMPC_KEYS_HID, 0},
2465 {"", 0}
2466 };
2467 -MODULE_DEVICE_TABLE(acpi, cmpc_keys_device_ids);
2468
2469 static struct acpi_driver cmpc_keys_acpi_driver = {
2470 .owner = THIS_MODULE,
2471 @@ -607,3 +608,13 @@ static void cmpc_exit(void)
2472
2473 module_init(cmpc_init);
2474 module_exit(cmpc_exit);
2475 +
2476 +static const struct acpi_device_id cmpc_device_ids[] = {
2477 + {CMPC_ACCEL_HID, 0},
2478 + {CMPC_TABLET_HID, 0},
2479 + {CMPC_BL_HID, 0},
2480 + {CMPC_KEYS_HID, 0},
2481 + {"", 0}
2482 +};
2483 +
2484 +MODULE_DEVICE_TABLE(acpi, cmpc_device_ids);
2485 diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
2486 index fa34b92..1b82170 100644
2487 --- a/drivers/scsi/qlogicpti.c
2488 +++ b/drivers/scsi/qlogicpti.c
2489 @@ -738,7 +738,7 @@ static int __devinit qpti_register_irq(struct qlogicpti *qpti)
2490 * sanely maintain.
2491 */
2492 if (request_irq(qpti->irq, qpti_intr,
2493 - IRQF_SHARED, "Qlogic/PTI", qpti))
2494 + IRQF_SHARED, "QlogicPTI", qpti))
2495 goto fail;
2496
2497 printk("qlogicpti%d: IRQ %d ", qpti->qpti_id, qpti->irq);
2498 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
2499 index 653f22a..bb8fd5b 100644
2500 --- a/drivers/scsi/scsi_transport_fc.c
2501 +++ b/drivers/scsi/scsi_transport_fc.c
2502 @@ -1216,6 +1216,15 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
2503 {
2504 struct fc_vport *vport = transport_class_to_vport(dev);
2505 struct Scsi_Host *shost = vport_to_shost(vport);
2506 + unsigned long flags;
2507 +
2508 + spin_lock_irqsave(shost->host_lock, flags);
2509 + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
2510 + spin_unlock_irqrestore(shost->host_lock, flags);
2511 + return -EBUSY;
2512 + }
2513 + vport->flags |= FC_VPORT_DELETING;
2514 + spin_unlock_irqrestore(shost->host_lock, flags);
2515
2516 fc_queue_work(shost, &vport->vport_delete_work);
2517 return count;
2518 @@ -1805,6 +1814,9 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
2519 list_for_each_entry(vport, &fc_host->vports, peers) {
2520 if ((vport->channel == 0) &&
2521 (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
2522 + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
2523 + break;
2524 + vport->flags |= FC_VPORT_DELETING;
2525 match = 1;
2526 break;
2527 }
2528 @@ -3354,18 +3366,6 @@ fc_vport_terminate(struct fc_vport *vport)
2529 unsigned long flags;
2530 int stat;
2531
2532 - spin_lock_irqsave(shost->host_lock, flags);
2533 - if (vport->flags & FC_VPORT_CREATING) {
2534 - spin_unlock_irqrestore(shost->host_lock, flags);
2535 - return -EBUSY;
2536 - }
2537 - if (vport->flags & (FC_VPORT_DEL)) {
2538 - spin_unlock_irqrestore(shost->host_lock, flags);
2539 - return -EALREADY;
2540 - }
2541 - vport->flags |= FC_VPORT_DELETING;
2542 - spin_unlock_irqrestore(shost->host_lock, flags);
2543 -
2544 if (i->f->vport_delete)
2545 stat = i->f->vport_delete(vport);
2546 else
2547 diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
2548 index 55b034b..3c8a024 100644
2549 --- a/drivers/scsi/ses.c
2550 +++ b/drivers/scsi/ses.c
2551 @@ -591,8 +591,6 @@ static int ses_intf_add(struct device *cdev,
2552 ses_dev->page10_len = len;
2553 buf = NULL;
2554 }
2555 - kfree(hdr_buf);
2556 -
2557 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
2558 if (!scomp)
2559 goto err_free;
2560 @@ -604,6 +602,8 @@ static int ses_intf_add(struct device *cdev,
2561 goto err_free;
2562 }
2563
2564 + kfree(hdr_buf);
2565 +
2566 edev->scratch = ses_dev;
2567 for (i = 0; i < components; i++)
2568 edev->component[i].scratch = scomp + i;
2569 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
2570 index a678186..4fd67d6 100644
2571 --- a/drivers/usb/core/devio.c
2572 +++ b/drivers/usb/core/devio.c
2573 @@ -1176,6 +1176,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
2574 free_async(as);
2575 return -ENOMEM;
2576 }
2577 + /* Isochronous input data may end up being discontiguous
2578 + * if some of the packets are short. Clear the buffer so
2579 + * that the gaps don't leak kernel data to userspace.
2580 + */
2581 + if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
2582 + memset(as->urb->transfer_buffer, 0,
2583 + uurb->buffer_length);
2584 }
2585 as->urb->dev = ps->dev;
2586 as->urb->pipe = (uurb->type << 30) |
2587 @@ -1312,10 +1319,14 @@ static int processcompl(struct async *as, void __user * __user *arg)
2588 void __user *addr = as->userurb;
2589 unsigned int i;
2590
2591 - if (as->userbuffer && urb->actual_length)
2592 - if (copy_to_user(as->userbuffer, urb->transfer_buffer,
2593 - urb->actual_length))
2594 + if (as->userbuffer && urb->actual_length) {
2595 + if (urb->number_of_packets > 0) /* Isochronous */
2596 + i = urb->transfer_buffer_length;
2597 + else /* Non-Isoc */
2598 + i = urb->actual_length;
2599 + if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
2600 goto err_out;
2601 + }
2602 if (put_user(as->status, &userurb->status))
2603 goto err_out;
2604 if (put_user(urb->actual_length, &userurb->actual_length))
2605 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2606 index 1ec3857..9c90b67 100644
2607 --- a/drivers/usb/host/ehci-hcd.c
2608 +++ b/drivers/usb/host/ehci-hcd.c
2609 @@ -995,7 +995,7 @@ rescan:
2610 /* endpoints can be iso streams. for now, we don't
2611 * accelerate iso completions ... so spin a while.
2612 */
2613 - if (qh->hw->hw_info1 == 0) {
2614 + if (qh->hw == NULL) {
2615 ehci_vdbg (ehci, "iso delay\n");
2616 goto idle_timeout;
2617 }
2618 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
2619 index 1e391e6..df533ce 100644
2620 --- a/drivers/usb/host/ehci-sched.c
2621 +++ b/drivers/usb/host/ehci-sched.c
2622 @@ -1121,8 +1121,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
2623 urb->interval);
2624 }
2625
2626 - /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
2627 - } else if (unlikely (stream->hw_info1 != 0)) {
2628 + /* if dev->ep [epnum] is a QH, hw is set */
2629 + } else if (unlikely (stream->hw != NULL)) {
2630 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
2631 urb->dev->devpath, epnum,
2632 usb_pipein(urb->pipe) ? "in" : "out");
2633 @@ -1563,13 +1563,27 @@ itd_patch(
2634 static inline void
2635 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
2636 {
2637 - /* always prepend ITD/SITD ... only QH tree is order-sensitive */
2638 - itd->itd_next = ehci->pshadow [frame];
2639 - itd->hw_next = ehci->periodic [frame];
2640 - ehci->pshadow [frame].itd = itd;
2641 + union ehci_shadow *prev = &ehci->pshadow[frame];
2642 + __hc32 *hw_p = &ehci->periodic[frame];
2643 + union ehci_shadow here = *prev;
2644 + __hc32 type = 0;
2645 +
2646 + /* skip any iso nodes which might belong to previous microframes */
2647 + while (here.ptr) {
2648 + type = Q_NEXT_TYPE(ehci, *hw_p);
2649 + if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
2650 + break;
2651 + prev = periodic_next_shadow(ehci, prev, type);
2652 + hw_p = shadow_next_periodic(ehci, &here, type);
2653 + here = *prev;
2654 + }
2655 +
2656 + itd->itd_next = here;
2657 + itd->hw_next = *hw_p;
2658 + prev->itd = itd;
2659 itd->frame = frame;
2660 wmb ();
2661 - ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
2662 + *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
2663 }
2664
2665 /* fit urb's itds into the selected schedule slot; activate as needed */
2666 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
2667 index 2d85e21..b1dce96 100644
2668 --- a/drivers/usb/host/ehci.h
2669 +++ b/drivers/usb/host/ehci.h
2670 @@ -394,9 +394,8 @@ struct ehci_iso_sched {
2671 * acts like a qh would, if EHCI had them for ISO.
2672 */
2673 struct ehci_iso_stream {
2674 - /* first two fields match QH, but info1 == 0 */
2675 - __hc32 hw_next;
2676 - __hc32 hw_info1;
2677 + /* first field matches ehci_hq, but is NULL */
2678 + struct ehci_qh_hw *hw;
2679
2680 u32 refcount;
2681 u8 bEndpointAddress;
2682 diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
2683 index bee558a..f71a73a 100644
2684 --- a/drivers/usb/host/r8a66597-hcd.c
2685 +++ b/drivers/usb/host/r8a66597-hcd.c
2686 @@ -418,7 +418,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb)
2687
2688 /* this function must be called with interrupt disabled */
2689 static void free_usb_address(struct r8a66597 *r8a66597,
2690 - struct r8a66597_device *dev)
2691 + struct r8a66597_device *dev, int reset)
2692 {
2693 int port;
2694
2695 @@ -430,7 +430,13 @@ static void free_usb_address(struct r8a66597 *r8a66597,
2696 dev->state = USB_STATE_DEFAULT;
2697 r8a66597->address_map &= ~(1 << dev->address);
2698 dev->address = 0;
2699 - dev_set_drvdata(&dev->udev->dev, NULL);
2700 + /*
2701 + * Only when resetting USB, it is necessary to erase drvdata. When
2702 + * a usb device with usb hub is disconnect, "dev->udev" is already
2703 + * freed on usb_desconnect(). So we cannot access the data.
2704 + */
2705 + if (reset)
2706 + dev_set_drvdata(&dev->udev->dev, NULL);
2707 list_del(&dev->device_list);
2708 kfree(dev);
2709
2710 @@ -1069,7 +1075,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
2711 struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
2712
2713 disable_r8a66597_pipe_all(r8a66597, dev);
2714 - free_usb_address(r8a66597, dev);
2715 + free_usb_address(r8a66597, dev, 0);
2716
2717 start_root_hub_sampling(r8a66597, port, 0);
2718 }
2719 @@ -2085,7 +2091,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597,
2720 spin_lock_irqsave(&r8a66597->lock, flags);
2721 dev = get_r8a66597_device(r8a66597, addr);
2722 disable_r8a66597_pipe_all(r8a66597, dev);
2723 - free_usb_address(r8a66597, dev);
2724 + free_usb_address(r8a66597, dev, 0);
2725 put_child_connect_map(r8a66597, addr);
2726 spin_unlock_irqrestore(&r8a66597->lock, flags);
2727 }
2728 @@ -2228,7 +2234,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2729 rh->port |= (1 << USB_PORT_FEAT_RESET);
2730
2731 disable_r8a66597_pipe_all(r8a66597, dev);
2732 - free_usb_address(r8a66597, dev);
2733 + free_usb_address(r8a66597, dev, 1);
2734
2735 r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
2736 get_dvstctr_reg(port));
2737 diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
2738 index 5e92c72..fa920c7 100644
2739 --- a/drivers/usb/host/xhci-hcd.c
2740 +++ b/drivers/usb/host/xhci-hcd.c
2741 @@ -1173,6 +1173,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2742 cmd_completion = &virt_dev->cmd_completion;
2743 cmd_status = &virt_dev->cmd_status;
2744 }
2745 + init_completion(cmd_completion);
2746
2747 if (!ctx_change)
2748 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2749 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2750 index 34acf6c..ca9e3ba 100644
2751 --- a/drivers/usb/serial/ftdi_sio.c
2752 +++ b/drivers/usb/serial/ftdi_sio.c
2753 @@ -658,6 +658,7 @@ static struct usb_device_id id_table_combined [] = {
2754 { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
2755 { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
2756 { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
2757 + { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
2758 { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
2759 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
2760 { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
2761 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2762 index d10b5a8..8f9e805 100644
2763 --- a/drivers/usb/serial/ftdi_sio_ids.h
2764 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2765 @@ -501,6 +501,13 @@
2766 #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
2767
2768 /*
2769 + * Contec products (http://www.contec.com)
2770 + * Submitted by Daniel Sangorrin
2771 + */
2772 +#define CONTEC_VID 0x06CE /* Vendor ID */
2773 +#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
2774 +
2775 +/*
2776 * Definitions for B&B Electronics products.
2777 */
2778 #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
2779 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2780 index 6e94a67..d93283d 100644
2781 --- a/drivers/usb/serial/option.c
2782 +++ b/drivers/usb/serial/option.c
2783 @@ -288,7 +288,9 @@ static int option_resume(struct usb_serial *serial);
2784
2785 #define QUALCOMM_VENDOR_ID 0x05C6
2786
2787 -#define MAXON_VENDOR_ID 0x16d8
2788 +#define CMOTECH_VENDOR_ID 0x16d8
2789 +#define CMOTECH_PRODUCT_6008 0x6008
2790 +#define CMOTECH_PRODUCT_6280 0x6280
2791
2792 #define TELIT_VENDOR_ID 0x1bc7
2793 #define TELIT_PRODUCT_UC864E 0x1003
2794 @@ -520,7 +522,8 @@ static struct usb_device_id option_ids[] = {
2795 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
2796 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
2797 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2798 - { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
2799 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
2800 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
2801 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
2802 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
2803 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2804 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2805 index 7528b8d..8ab4ab2 100644
2806 --- a/drivers/usb/serial/qcserial.c
2807 +++ b/drivers/usb/serial/qcserial.c
2808 @@ -47,6 +47,35 @@ static struct usb_device_id id_table[] = {
2809 {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
2810 {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
2811 {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
2812 + {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
2813 + {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
2814 + {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
2815 + {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
2816 + {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
2817 + {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
2818 + {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
2819 + {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
2820 + {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
2821 + {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
2822 + {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
2823 + {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
2824 + {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
2825 + {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
2826 + {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
2827 + {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
2828 + {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
2829 + {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2830 + {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2831 + {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2832 + {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2833 + {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2834 + {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2835 + {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2836 + {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2837 + {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2838 + {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
2839 + {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
2840 + {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
2841 { } /* Terminating entry */
2842 };
2843 MODULE_DEVICE_TABLE(usb, id_table);
2844 diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
2845 index 5a5c303..f15fb02 100644
2846 --- a/drivers/video/Kconfig
2847 +++ b/drivers/video/Kconfig
2848 @@ -909,6 +909,18 @@ config FB_XVR2500
2849 mostly initialized the card already. It is treated as a
2850 completely dumb framebuffer device.
2851
2852 +config FB_XVR1000
2853 + bool "Sun XVR-1000 support"
2854 + depends on (FB = y) && SPARC64
2855 + select FB_CFB_FILLRECT
2856 + select FB_CFB_COPYAREA
2857 + select FB_CFB_IMAGEBLIT
2858 + help
2859 + This is the framebuffer device for the Sun XVR-1000 and similar
2860 + graphics cards. The driver only works on sparc64 systems where
2861 + the system firmware has mostly initialized the card already. It
2862 + is treated as a completely dumb framebuffer device.
2863 +
2864 config FB_PVR2
2865 tristate "NEC PowerVR 2 display support"
2866 depends on FB && SH_DREAMCAST
2867 diff --git a/drivers/video/Makefile b/drivers/video/Makefile
2868 index 4ecb30c..8c9a357 100644
2869 --- a/drivers/video/Makefile
2870 +++ b/drivers/video/Makefile
2871 @@ -79,6 +79,7 @@ obj-$(CONFIG_FB_N411) += n411.o
2872 obj-$(CONFIG_FB_HGA) += hgafb.o
2873 obj-$(CONFIG_FB_XVR500) += sunxvr500.o
2874 obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o
2875 +obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o
2876 obj-$(CONFIG_FB_IGA) += igafb.o
2877 obj-$(CONFIG_FB_APOLLO) += dnfb.o
2878 obj-$(CONFIG_FB_Q40) += q40fb.o
2879 diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
2880 new file mode 100644
2881 index 0000000..a8248c0
2882 --- /dev/null
2883 +++ b/drivers/video/sunxvr1000.c
2884 @@ -0,0 +1,228 @@
2885 +/* sunxvr1000.c: Sun XVR-1000 driver for sparc64 systems
2886 + *
2887 + * Copyright (C) 2010 David S. Miller (davem@davemloft.net)
2888 + */
2889 +
2890 +#include <linux/module.h>
2891 +#include <linux/kernel.h>
2892 +#include <linux/slab.h>
2893 +#include <linux/fb.h>
2894 +#include <linux/init.h>
2895 +#include <linux/of_device.h>
2896 +
2897 +struct gfb_info {
2898 + struct fb_info *info;
2899 +
2900 + char __iomem *fb_base;
2901 + unsigned long fb_base_phys;
2902 +
2903 + struct device_node *of_node;
2904 +
2905 + unsigned int width;
2906 + unsigned int height;
2907 + unsigned int depth;
2908 + unsigned int fb_size;
2909 +
2910 + u32 pseudo_palette[16];
2911 +};
2912 +
2913 +static int __devinit gfb_get_props(struct gfb_info *gp)
2914 +{
2915 + gp->width = of_getintprop_default(gp->of_node, "width", 0);
2916 + gp->height = of_getintprop_default(gp->of_node, "height", 0);
2917 + gp->depth = of_getintprop_default(gp->of_node, "depth", 32);
2918 +
2919 + if (!gp->width || !gp->height) {
2920 + printk(KERN_ERR "gfb: Critical properties missing for %s\n",
2921 + gp->of_node->full_name);
2922 + return -EINVAL;
2923 + }
2924 +
2925 + return 0;
2926 +}
2927 +
2928 +static int gfb_setcolreg(unsigned regno,
2929 + unsigned red, unsigned green, unsigned blue,
2930 + unsigned transp, struct fb_info *info)
2931 +{
2932 + u32 value;
2933 +
2934 + if (regno < 16) {
2935 + red >>= 8;
2936 + green >>= 8;
2937 + blue >>= 8;
2938 +
2939 + value = (blue << 16) | (green << 8) | red;
2940 + ((u32 *)info->pseudo_palette)[regno] = value;
2941 + }
2942 +
2943 + return 0;
2944 +}
2945 +
2946 +static struct fb_ops gfb_ops = {
2947 + .owner = THIS_MODULE,
2948 + .fb_setcolreg = gfb_setcolreg,
2949 + .fb_fillrect = cfb_fillrect,
2950 + .fb_copyarea = cfb_copyarea,
2951 + .fb_imageblit = cfb_imageblit,
2952 +};
2953 +
2954 +static int __devinit gfb_set_fbinfo(struct gfb_info *gp)
2955 +{
2956 + struct fb_info *info = gp->info;
2957 + struct fb_var_screeninfo *var = &info->var;
2958 +
2959 + info->flags = FBINFO_DEFAULT;
2960 + info->fbops = &gfb_ops;
2961 + info->screen_base = gp->fb_base;
2962 + info->screen_size = gp->fb_size;
2963 +
2964 + info->pseudo_palette = gp->pseudo_palette;
2965 +
2966 + /* Fill fix common fields */
2967 + strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
2968 + info->fix.smem_start = gp->fb_base_phys;
2969 + info->fix.smem_len = gp->fb_size;
2970 + info->fix.type = FB_TYPE_PACKED_PIXELS;
2971 + if (gp->depth == 32 || gp->depth == 24)
2972 + info->fix.visual = FB_VISUAL_TRUECOLOR;
2973 + else
2974 + info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
2975 +
2976 + var->xres = gp->width;
2977 + var->yres = gp->height;
2978 + var->xres_virtual = var->xres;
2979 + var->yres_virtual = var->yres;
2980 + var->bits_per_pixel = gp->depth;
2981 +
2982 + var->red.offset = 0;
2983 + var->red.length = 8;
2984 + var->green.offset = 8;
2985 + var->green.length = 8;
2986 + var->blue.offset = 16;
2987 + var->blue.length = 8;
2988 + var->transp.offset = 0;
2989 + var->transp.length = 0;
2990 +
2991 + if (fb_alloc_cmap(&info->cmap, 256, 0)) {
2992 + printk(KERN_ERR "gfb: Cannot allocate color map.\n");
2993 + return -ENOMEM;
2994 + }
2995 +
2996 + return 0;
2997 +}
2998 +
2999 +static int __devinit gfb_probe(struct of_device *op,
3000 + const struct of_device_id *match)
3001 +{
3002 + struct device_node *dp = op->node;
3003 + struct fb_info *info;
3004 + struct gfb_info *gp;
3005 + int err;
3006 +
3007 + info = framebuffer_alloc(sizeof(struct gfb_info), &op->dev);
3008 + if (!info) {
3009 + printk(KERN_ERR "gfb: Cannot allocate fb_info\n");
3010 + err = -ENOMEM;
3011 + goto err_out;
3012 + }
3013 +
3014 + gp = info->par;
3015 + gp->info = info;
3016 + gp->of_node = dp;
3017 +
3018 + gp->fb_base_phys = op->resource[6].start;
3019 +
3020 + err = gfb_get_props(gp);
3021 + if (err)
3022 + goto err_release_fb;
3023 +
3024 + /* Framebuffer length is the same regardless of resolution. */
3025 + info->fix.line_length = 16384;
3026 + gp->fb_size = info->fix.line_length * gp->height;
3027 +
3028 + gp->fb_base = of_ioremap(&op->resource[6], 0,
3029 + gp->fb_size, "gfb fb");
3030 + if (!gp->fb_base)
3031 + goto err_release_fb;
3032 +
3033 + err = gfb_set_fbinfo(gp);
3034 + if (err)
3035 + goto err_unmap_fb;
3036 +
3037 + printk("gfb: Found device at %s\n", dp->full_name);
3038 +
3039 + err = register_framebuffer(info);
3040 + if (err < 0) {
3041 + printk(KERN_ERR "gfb: Could not register framebuffer %s\n",
3042 + dp->full_name);
3043 + goto err_unmap_fb;
3044 + }
3045 +
3046 + dev_set_drvdata(&op->dev, info);
3047 +
3048 + return 0;
3049 +
3050 +err_unmap_fb:
3051 + of_iounmap(&op->resource[6], gp->fb_base, gp->fb_size);
3052 +
3053 +err_release_fb:
3054 + framebuffer_release(info);
3055 +
3056 +err_out:
3057 + return err;
3058 +}
3059 +
3060 +static int __devexit gfb_remove(struct of_device *op)
3061 +{
3062 + struct fb_info *info = dev_get_drvdata(&op->dev);
3063 + struct gfb_info *gp = info->par;
3064 +
3065 + unregister_framebuffer(info);
3066 +
3067 + iounmap(gp->fb_base);
3068 +
3069 + of_iounmap(&op->resource[6], gp->fb_base, gp->fb_size);
3070 +
3071 + framebuffer_release(info);
3072 +
3073 + dev_set_drvdata(&op->dev, NULL);
3074 +
3075 + return 0;
3076 +}
3077 +
3078 +static const struct of_device_id gfb_match[] = {
3079 + {
3080 + .name = "SUNW,gfb",
3081 + },
3082 + {},
3083 +};
3084 +MODULE_DEVICE_TABLE(of, ffb_match);
3085 +
3086 +static struct of_platform_driver gfb_driver = {
3087 + .name = "gfb",
3088 + .match_table = gfb_match,
3089 + .probe = gfb_probe,
3090 + .remove = __devexit_p(gfb_remove),
3091 +};
3092 +
3093 +static int __init gfb_init(void)
3094 +{
3095 + if (fb_get_options("gfb", NULL))
3096 + return -ENODEV;
3097 +
3098 + return of_register_driver(&gfb_driver, &of_bus_type);
3099 +}
3100 +
3101 +static void __exit gfb_exit(void)
3102 +{
3103 + of_unregister_driver(&gfb_driver);
3104 +}
3105 +
3106 +module_init(gfb_init);
3107 +module_exit(gfb_exit);
3108 +
3109 +MODULE_DESCRIPTION("framebuffer driver for Sun XVR-1000 graphics");
3110 +MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
3111 +MODULE_VERSION("1.0");
3112 +MODULE_LICENSE("GPL");
3113 diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
3114 index 28d9cf7..7127bfe 100644
3115 --- a/drivers/virtio/virtio_pci.c
3116 +++ b/drivers/virtio/virtio_pci.c
3117 @@ -473,7 +473,8 @@ static void vp_del_vqs(struct virtio_device *vdev)
3118
3119 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
3120 info = vq->priv;
3121 - if (vp_dev->per_vq_vectors)
3122 + if (vp_dev->per_vq_vectors &&
3123 + info->msix_vector != VIRTIO_MSI_NO_VECTOR)
3124 free_irq(vp_dev->msix_entries[info->msix_vector].vector,
3125 vq);
3126 vp_del_vq(vq);
3127 diff --git a/fs/exec.c b/fs/exec.c
3128 index cce6bbd..9071360 100644
3129 --- a/fs/exec.c
3130 +++ b/fs/exec.c
3131 @@ -1923,8 +1923,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
3132 /*
3133 * Dont allow local users get cute and trick others to coredump
3134 * into their pre-created files:
3135 + * Note, this is not relevant for pipes
3136 */
3137 - if (inode->i_uid != current_fsuid())
3138 + if (!ispipe && (inode->i_uid != current_fsuid()))
3139 goto close_fail;
3140 if (!cprm.file->f_op)
3141 goto close_fail;
3142 diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
3143 index a6abbae..e6dd2ae 100644
3144 --- a/fs/gfs2/file.c
3145 +++ b/fs/gfs2/file.c
3146 @@ -640,7 +640,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
3147
3148 if (!(fl->fl_flags & FL_POSIX))
3149 return -ENOLCK;
3150 - if (__mandatory_lock(&ip->i_inode))
3151 + if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
3152 return -ENOLCK;
3153
3154 if (cmd == F_CANCELLK) {
3155 diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
3156 index 944b627..69e7b81 100644
3157 --- a/fs/nfs/delegation.h
3158 +++ b/fs/nfs/delegation.h
3159 @@ -71,4 +71,10 @@ static inline int nfs_inode_return_delegation(struct inode *inode)
3160 }
3161 #endif
3162
3163 +static inline int nfs_have_delegated_attributes(struct inode *inode)
3164 +{
3165 + return nfs_have_delegation(inode, FMODE_READ) &&
3166 + !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
3167 +}
3168 +
3169 #endif
3170 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3171 index 3c7f03b..8b5382e 100644
3172 --- a/fs/nfs/dir.c
3173 +++ b/fs/nfs/dir.c
3174 @@ -1789,7 +1789,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
3175 cache = nfs_access_search_rbtree(inode, cred);
3176 if (cache == NULL)
3177 goto out;
3178 - if (!nfs_have_delegation(inode, FMODE_READ) &&
3179 + if (!nfs_have_delegated_attributes(inode) &&
3180 !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
3181 goto out_stale;
3182 res->jiffies = cache->jiffies;
3183 diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3184 index 63f2071..bdd2142 100644
3185 --- a/fs/nfs/file.c
3186 +++ b/fs/nfs/file.c
3187 @@ -486,7 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
3188 {
3189 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
3190
3191 - if (gfp & __GFP_WAIT)
3192 + /* Only do I/O if gfp is a superset of GFP_KERNEL */
3193 + if ((gfp & GFP_KERNEL) == GFP_KERNEL)
3194 nfs_wb_page(page->mapping->host, page);
3195 /* If PagePrivate() is set, then the page is not freeable */
3196 if (PagePrivate(page))
3197 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
3198 index f141bde..5f59a2d 100644
3199 --- a/fs/nfs/inode.c
3200 +++ b/fs/nfs/inode.c
3201 @@ -759,7 +759,7 @@ int nfs_attribute_timeout(struct inode *inode)
3202 {
3203 struct nfs_inode *nfsi = NFS_I(inode);
3204
3205 - if (nfs_have_delegation(inode, FMODE_READ))
3206 + if (nfs_have_delegated_attributes(inode))
3207 return 0;
3208 return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
3209 }
3210 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
3211 index a12c45b..29d9d36 100644
3212 --- a/fs/nfs/pagelist.c
3213 +++ b/fs/nfs/pagelist.c
3214 @@ -112,12 +112,10 @@ void nfs_unlock_request(struct nfs_page *req)
3215 */
3216 int nfs_set_page_tag_locked(struct nfs_page *req)
3217 {
3218 - struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
3219 -
3220 if (!nfs_lock_request_dontget(req))
3221 return 0;
3222 if (req->wb_page != NULL)
3223 - radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
3224 + radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
3225 return 1;
3226 }
3227
3228 @@ -126,10 +124,10 @@ int nfs_set_page_tag_locked(struct nfs_page *req)
3229 */
3230 void nfs_clear_page_tag_locked(struct nfs_page *req)
3231 {
3232 - struct inode *inode = req->wb_context->path.dentry->d_inode;
3233 - struct nfs_inode *nfsi = NFS_I(inode);
3234 -
3235 if (req->wb_page != NULL) {
3236 + struct inode *inode = req->wb_context->path.dentry->d_inode;
3237 + struct nfs_inode *nfsi = NFS_I(inode);
3238 +
3239 spin_lock(&inode->i_lock);
3240 radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
3241 nfs_unlock_request(req);
3242 @@ -142,16 +140,22 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
3243 * nfs_clear_request - Free up all resources allocated to the request
3244 * @req:
3245 *
3246 - * Release page resources associated with a write request after it
3247 - * has completed.
3248 + * Release page and open context resources associated with a read/write
3249 + * request after it has completed.
3250 */
3251 void nfs_clear_request(struct nfs_page *req)
3252 {
3253 struct page *page = req->wb_page;
3254 + struct nfs_open_context *ctx = req->wb_context;
3255 +
3256 if (page != NULL) {
3257 page_cache_release(page);
3258 req->wb_page = NULL;
3259 }
3260 + if (ctx != NULL) {
3261 + put_nfs_open_context(ctx);
3262 + req->wb_context = NULL;
3263 + }
3264 }
3265
3266
3267 @@ -165,9 +169,8 @@ static void nfs_free_request(struct kref *kref)
3268 {
3269 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
3270
3271 - /* Release struct file or cached credential */
3272 + /* Release struct file and open context */
3273 nfs_clear_request(req);
3274 - put_nfs_open_context(req->wb_context);
3275 nfs_page_free(req);
3276 }
3277
3278 diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
3279 index 105b508..ddce17b 100644
3280 --- a/fs/nilfs2/segment.c
3281 +++ b/fs/nilfs2/segment.c
3282 @@ -1902,8 +1902,7 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
3283
3284 list_splice_tail_init(&sci->sc_write_logs, &logs);
3285 ret = nilfs_wait_on_logs(&logs);
3286 - if (ret)
3287 - nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret);
3288 + nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret ? : err);
3289
3290 list_splice_tail_init(&sci->sc_segbufs, &logs);
3291 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
3292 diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
3293 index 0028d2e..90be97f 100644
3294 --- a/fs/partitions/msdos.c
3295 +++ b/fs/partitions/msdos.c
3296 @@ -31,14 +31,17 @@
3297 */
3298 #include <asm/unaligned.h>
3299
3300 -#define SYS_IND(p) (get_unaligned(&p->sys_ind))
3301 -#define NR_SECTS(p) ({ __le32 __a = get_unaligned(&p->nr_sects); \
3302 - le32_to_cpu(__a); \
3303 - })
3304 +#define SYS_IND(p) get_unaligned(&p->sys_ind)
3305
3306 -#define START_SECT(p) ({ __le32 __a = get_unaligned(&p->start_sect); \
3307 - le32_to_cpu(__a); \
3308 - })
3309 +static inline sector_t nr_sects(struct partition *p)
3310 +{
3311 + return (sector_t)get_unaligned_le32(&p->nr_sects);
3312 +}
3313 +
3314 +static inline sector_t start_sect(struct partition *p)
3315 +{
3316 + return (sector_t)get_unaligned_le32(&p->start_sect);
3317 +}
3318
3319 static inline int is_extended_partition(struct partition *p)
3320 {
3321 @@ -104,13 +107,13 @@ static int aix_magic_present(unsigned char *p, struct block_device *bdev)
3322
3323 static void
3324 parse_extended(struct parsed_partitions *state, struct block_device *bdev,
3325 - u32 first_sector, u32 first_size)
3326 + sector_t first_sector, sector_t first_size)
3327 {
3328 struct partition *p;
3329 Sector sect;
3330 unsigned char *data;
3331 - u32 this_sector, this_size;
3332 - int sector_size = bdev_logical_block_size(bdev) / 512;
3333 + sector_t this_sector, this_size;
3334 + sector_t sector_size = bdev_logical_block_size(bdev) / 512;
3335 int loopct = 0; /* number of links followed
3336 without finding a data partition */
3337 int i;
3338 @@ -145,14 +148,14 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
3339 * First process the data partition(s)
3340 */
3341 for (i=0; i<4; i++, p++) {
3342 - u32 offs, size, next;
3343 - if (!NR_SECTS(p) || is_extended_partition(p))
3344 + sector_t offs, size, next;
3345 + if (!nr_sects(p) || is_extended_partition(p))
3346 continue;
3347
3348 /* Check the 3rd and 4th entries -
3349 these sometimes contain random garbage */
3350 - offs = START_SECT(p)*sector_size;
3351 - size = NR_SECTS(p)*sector_size;
3352 + offs = start_sect(p)*sector_size;
3353 + size = nr_sects(p)*sector_size;
3354 next = this_sector + offs;
3355 if (i >= 2) {
3356 if (offs + size > this_size)
3357 @@ -179,13 +182,13 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev,
3358 */
3359 p -= 4;
3360 for (i=0; i<4; i++, p++)
3361 - if (NR_SECTS(p) && is_extended_partition(p))
3362 + if (nr_sects(p) && is_extended_partition(p))
3363 break;
3364 if (i == 4)
3365 goto done; /* nothing left to do */
3366
3367 - this_sector = first_sector + START_SECT(p) * sector_size;
3368 - this_size = NR_SECTS(p) * sector_size;
3369 + this_sector = first_sector + start_sect(p) * sector_size;
3370 + this_size = nr_sects(p) * sector_size;
3371 put_dev_sector(sect);
3372 }
3373 done:
3374 @@ -197,7 +200,7 @@ done:
3375
3376 static void
3377 parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
3378 - u32 offset, u32 size, int origin)
3379 + sector_t offset, sector_t size, int origin)
3380 {
3381 #ifdef CONFIG_SOLARIS_X86_PARTITION
3382 Sector sect;
3383 @@ -244,7 +247,7 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
3384 */
3385 static void
3386 parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
3387 - u32 offset, u32 size, int origin, char *flavour,
3388 + sector_t offset, sector_t size, int origin, char *flavour,
3389 int max_partitions)
3390 {
3391 Sector sect;
3392 @@ -263,7 +266,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
3393 if (le16_to_cpu(l->d_npartitions) < max_partitions)
3394 max_partitions = le16_to_cpu(l->d_npartitions);
3395 for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
3396 - u32 bsd_start, bsd_size;
3397 + sector_t bsd_start, bsd_size;
3398
3399 if (state->next == state->limit)
3400 break;
3401 @@ -290,7 +293,7 @@ parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
3402
3403 static void
3404 parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
3405 - u32 offset, u32 size, int origin)
3406 + sector_t offset, sector_t size, int origin)
3407 {
3408 #ifdef CONFIG_BSD_DISKLABEL
3409 parse_bsd(state, bdev, offset, size, origin,
3410 @@ -300,7 +303,7 @@ parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
3411
3412 static void
3413 parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
3414 - u32 offset, u32 size, int origin)
3415 + sector_t offset, sector_t size, int origin)
3416 {
3417 #ifdef CONFIG_BSD_DISKLABEL
3418 parse_bsd(state, bdev, offset, size, origin,
3419 @@ -310,7 +313,7 @@ parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
3420
3421 static void
3422 parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
3423 - u32 offset, u32 size, int origin)
3424 + sector_t offset, sector_t size, int origin)
3425 {
3426 #ifdef CONFIG_BSD_DISKLABEL
3427 parse_bsd(state, bdev, offset, size, origin,
3428 @@ -324,7 +327,7 @@ parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
3429 */
3430 static void
3431 parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
3432 - u32 offset, u32 size, int origin)
3433 + sector_t offset, sector_t size, int origin)
3434 {
3435 #ifdef CONFIG_UNIXWARE_DISKLABEL
3436 Sector sect;
3437 @@ -348,7 +351,8 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
3438
3439 if (p->s_label != UNIXWARE_FS_UNUSED)
3440 put_partition(state, state->next++,
3441 - START_SECT(p), NR_SECTS(p));
3442 + le32_to_cpu(p->start_sect),
3443 + le32_to_cpu(p->nr_sects));
3444 p++;
3445 }
3446 put_dev_sector(sect);
3447 @@ -363,7 +367,7 @@ parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
3448 */
3449 static void
3450 parse_minix(struct parsed_partitions *state, struct block_device *bdev,
3451 - u32 offset, u32 size, int origin)
3452 + sector_t offset, sector_t size, int origin)
3453 {
3454 #ifdef CONFIG_MINIX_SUBPARTITION
3455 Sector sect;
3456 @@ -390,7 +394,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
3457 /* add each partition in use */
3458 if (SYS_IND(p) == MINIX_PARTITION)
3459 put_partition(state, state->next++,
3460 - START_SECT(p), NR_SECTS(p));
3461 + start_sect(p), nr_sects(p));
3462 }
3463 printk(" >\n");
3464 }
3465 @@ -401,7 +405,7 @@ parse_minix(struct parsed_partitions *state, struct block_device *bdev,
3466 static struct {
3467 unsigned char id;
3468 void (*parse)(struct parsed_partitions *, struct block_device *,
3469 - u32, u32, int);
3470 + sector_t, sector_t, int);
3471 } subtypes[] = {
3472 {FREEBSD_PARTITION, parse_freebsd},
3473 {NETBSD_PARTITION, parse_netbsd},
3474 @@ -415,7 +419,7 @@ static struct {
3475
3476 int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3477 {
3478 - int sector_size = bdev_logical_block_size(bdev) / 512;
3479 + sector_t sector_size = bdev_logical_block_size(bdev) / 512;
3480 Sector sect;
3481 unsigned char *data;
3482 struct partition *p;
3483 @@ -483,14 +487,21 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3484
3485 state->next = 5;
3486 for (slot = 1 ; slot <= 4 ; slot++, p++) {
3487 - u32 start = START_SECT(p)*sector_size;
3488 - u32 size = NR_SECTS(p)*sector_size;
3489 + sector_t start = start_sect(p)*sector_size;
3490 + sector_t size = nr_sects(p)*sector_size;
3491 if (!size)
3492 continue;
3493 if (is_extended_partition(p)) {
3494 - /* prevent someone doing mkfs or mkswap on an
3495 - extended partition, but leave room for LILO */
3496 - put_partition(state, slot, start, size == 1 ? 1 : 2);
3497 + /*
3498 + * prevent someone doing mkfs or mkswap on an
3499 + * extended partition, but leave room for LILO
3500 + * FIXME: this uses one logical sector for > 512b
3501 + * sector, although it may not be enough/proper.
3502 + */
3503 + sector_t n = 2;
3504 + n = min(size, max(sector_size, n));
3505 + put_partition(state, slot, start, n);
3506 +
3507 printk(" <");
3508 parse_extended(state, bdev, start, size);
3509 printk(" >");
3510 @@ -513,7 +524,7 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3511 unsigned char id = SYS_IND(p);
3512 int n;
3513
3514 - if (!NR_SECTS(p))
3515 + if (!nr_sects(p))
3516 continue;
3517
3518 for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++)
3519 @@ -521,8 +532,8 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
3520
3521 if (!subtypes[n].parse)
3522 continue;
3523 - subtypes[n].parse(state, bdev, START_SECT(p)*sector_size,
3524 - NR_SECTS(p)*sector_size, slot);
3525 + subtypes[n].parse(state, bdev, start_sect(p)*sector_size,
3526 + nr_sects(p)*sector_size, slot);
3527 }
3528 put_dev_sector(sect);
3529 return 1;
3530 diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
3531 index 3fc62b0..6e722c1 100644
3532 --- a/fs/quota/dquot.c
3533 +++ b/fs/quota/dquot.c
3534 @@ -225,6 +225,8 @@ static struct hlist_head *dquot_hash;
3535 struct dqstats dqstats;
3536 EXPORT_SYMBOL(dqstats);
3537
3538 +static qsize_t inode_get_rsv_space(struct inode *inode);
3539 +
3540 static inline unsigned int
3541 hashfn(const struct super_block *sb, unsigned int id, int type)
3542 {
3543 @@ -840,11 +842,14 @@ static int dqinit_needed(struct inode *inode, int type)
3544 static void add_dquot_ref(struct super_block *sb, int type)
3545 {
3546 struct inode *inode, *old_inode = NULL;
3547 + int reserved = 0;
3548
3549 spin_lock(&inode_lock);
3550 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
3551 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
3552 continue;
3553 + if (unlikely(inode_get_rsv_space(inode) > 0))
3554 + reserved = 1;
3555 if (!atomic_read(&inode->i_writecount))
3556 continue;
3557 if (!dqinit_needed(inode, type))
3558 @@ -865,6 +870,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
3559 }
3560 spin_unlock(&inode_lock);
3561 iput(old_inode);
3562 +
3563 + if (reserved) {
3564 + printk(KERN_WARNING "VFS (%s): Writes happened before quota"
3565 + " was turned on thus quota information is probably "
3566 + "inconsistent. Please run quotacheck(8).\n", sb->s_id);
3567 + }
3568 }
3569
3570 /*
3571 @@ -978,10 +989,12 @@ static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
3572 /*
3573 * Claim reserved quota space
3574 */
3575 -static void dquot_claim_reserved_space(struct dquot *dquot,
3576 - qsize_t number)
3577 +static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
3578 {
3579 - WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
3580 + if (dquot->dq_dqb.dqb_rsvspace < number) {
3581 + WARN_ON_ONCE(1);
3582 + number = dquot->dq_dqb.dqb_rsvspace;
3583 + }
3584 dquot->dq_dqb.dqb_curspace += number;
3585 dquot->dq_dqb.dqb_rsvspace -= number;
3586 }
3587 @@ -989,7 +1002,12 @@ static void dquot_claim_reserved_space(struct dquot *dquot,
3588 static inline
3589 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
3590 {
3591 - dquot->dq_dqb.dqb_rsvspace -= number;
3592 + if (dquot->dq_dqb.dqb_rsvspace >= number)
3593 + dquot->dq_dqb.dqb_rsvspace -= number;
3594 + else {
3595 + WARN_ON_ONCE(1);
3596 + dquot->dq_dqb.dqb_rsvspace = 0;
3597 + }
3598 }
3599
3600 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
3601 @@ -1242,6 +1260,7 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
3602 return QUOTA_NL_BHARDBELOW;
3603 return QUOTA_NL_NOWARN;
3604 }
3605 +
3606 /*
3607 * Initialize quota pointers in inode
3608 * We do things in a bit complicated way but by that we avoid calling
3609 @@ -1253,6 +1272,7 @@ int dquot_initialize(struct inode *inode, int type)
3610 int cnt, ret = 0;
3611 struct dquot *got[MAXQUOTAS] = { NULL, NULL };
3612 struct super_block *sb = inode->i_sb;
3613 + qsize_t rsv;
3614
3615 /* First test before acquiring mutex - solves deadlocks when we
3616 * re-enter the quota code and are already holding the mutex */
3617 @@ -1287,6 +1307,13 @@ int dquot_initialize(struct inode *inode, int type)
3618 if (!inode->i_dquot[cnt]) {
3619 inode->i_dquot[cnt] = got[cnt];
3620 got[cnt] = NULL;
3621 + /*
3622 + * Make quota reservation system happy if someone
3623 + * did a write before quota was turned on
3624 + */
3625 + rsv = inode_get_rsv_space(inode);
3626 + if (unlikely(rsv))
3627 + dquot_resv_space(inode->i_dquot[cnt], rsv);
3628 }
3629 }
3630 out_err:
3631 @@ -1351,28 +1378,30 @@ static qsize_t *inode_reserved_space(struct inode * inode)
3632 return inode->i_sb->dq_op->get_reserved_space(inode);
3633 }
3634
3635 -static void inode_add_rsv_space(struct inode *inode, qsize_t number)
3636 +void inode_add_rsv_space(struct inode *inode, qsize_t number)
3637 {
3638 spin_lock(&inode->i_lock);
3639 *inode_reserved_space(inode) += number;
3640 spin_unlock(&inode->i_lock);
3641 }
3642 +EXPORT_SYMBOL(inode_add_rsv_space);
3643
3644 -
3645 -static void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3646 +void inode_claim_rsv_space(struct inode *inode, qsize_t number)
3647 {
3648 spin_lock(&inode->i_lock);
3649 *inode_reserved_space(inode) -= number;
3650 __inode_add_bytes(inode, number);
3651 spin_unlock(&inode->i_lock);
3652 }
3653 +EXPORT_SYMBOL(inode_claim_rsv_space);
3654
3655 -static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3656 +void inode_sub_rsv_space(struct inode *inode, qsize_t number)
3657 {
3658 spin_lock(&inode->i_lock);
3659 *inode_reserved_space(inode) -= number;
3660 spin_unlock(&inode->i_lock);
3661 }
3662 +EXPORT_SYMBOL(inode_sub_rsv_space);
3663
3664 static qsize_t inode_get_rsv_space(struct inode *inode)
3665 {
3666 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
3667 index 5032b9a..ad5ec1d 100644
3668 --- a/include/linux/decompress/mm.h
3669 +++ b/include/linux/decompress/mm.h
3670 @@ -14,11 +14,21 @@
3671
3672 /* Code active when included from pre-boot environment: */
3673
3674 +/*
3675 + * Some architectures want to ensure there is no local data in their
3676 + * pre-boot environment, so that data can arbitarily relocated (via
3677 + * GOT references). This is achieved by defining STATIC_RW_DATA to
3678 + * be null.
3679 + */
3680 +#ifndef STATIC_RW_DATA
3681 +#define STATIC_RW_DATA static
3682 +#endif
3683 +
3684 /* A trivial malloc implementation, adapted from
3685 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
3686 */
3687 -static unsigned long malloc_ptr;
3688 -static int malloc_count;
3689 +STATIC_RW_DATA unsigned long malloc_ptr;
3690 +STATIC_RW_DATA int malloc_count;
3691
3692 static void *malloc(int size)
3693 {
3694 diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
3695 index 1822d63..16b92d0 100644
3696 --- a/include/linux/if_tunnel.h
3697 +++ b/include/linux/if_tunnel.h
3698 @@ -2,6 +2,7 @@
3699 #define _IF_TUNNEL_H_
3700
3701 #include <linux/types.h>
3702 +#include <asm/byteorder.h>
3703
3704 #ifdef __KERNEL__
3705 #include <linux/ip.h>
3706 diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
3707 index bc0fc79..ece0b1c 100644
3708 --- a/include/linux/kfifo.h
3709 +++ b/include/linux/kfifo.h
3710 @@ -102,8 +102,6 @@ union { \
3711 unsigned char name##kfifo_buffer[size]; \
3712 struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer)
3713
3714 -#undef __kfifo_initializer
3715 -
3716 extern void kfifo_init(struct kfifo *fifo, void *buffer,
3717 unsigned int size);
3718 extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
3719 diff --git a/include/linux/kvm.h b/include/linux/kvm.h
3720 index a24de0b..553a388 100644
3721 --- a/include/linux/kvm.h
3722 +++ b/include/linux/kvm.h
3723 @@ -497,6 +497,7 @@ struct kvm_ioeventfd {
3724 #endif
3725 #define KVM_CAP_S390_PSW 42
3726 #define KVM_CAP_PPC_SEGSTATE 43
3727 +#define KVM_CAP_X86_ROBUST_SINGLESTEP 51
3728
3729 #ifdef KVM_CAP_IRQ_ROUTING
3730
3731 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3732 index 99914e6..03e8d81 100644
3733 --- a/include/linux/netdevice.h
3734 +++ b/include/linux/netdevice.h
3735 @@ -2023,12 +2023,12 @@ static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
3736 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
3737 * ARP on active-backup slaves with arp_validate enabled.
3738 */
3739 -static inline int skb_bond_should_drop(struct sk_buff *skb)
3740 +static inline int skb_bond_should_drop(struct sk_buff *skb,
3741 + struct net_device *master)
3742 {
3743 - struct net_device *dev = skb->dev;
3744 - struct net_device *master = dev->master;
3745 -
3746 if (master) {
3747 + struct net_device *dev = skb->dev;
3748 +
3749 if (master->priv_flags & IFF_MASTER_ARPMON)
3750 dev->last_rx = jiffies;
3751
3752 diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
3753 index 49d321f..264d83d 100644
3754 --- a/include/linux/netfilter/nfnetlink.h
3755 +++ b/include/linux/netfilter/nfnetlink.h
3756 @@ -76,7 +76,7 @@ extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
3757 extern int nfnetlink_has_listeners(unsigned int group);
3758 extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
3759 int echo, gfp_t flags);
3760 -extern void nfnetlink_set_err(u32 pid, u32 group, int error);
3761 +extern int nfnetlink_set_err(u32 pid, u32 group, int error);
3762 extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
3763
3764 extern void nfnl_lock(void);
3765 diff --git a/include/linux/netlink.h b/include/linux/netlink.h
3766 index fde27c0..6eaca5e 100644
3767 --- a/include/linux/netlink.h
3768 +++ b/include/linux/netlink.h
3769 @@ -188,7 +188,7 @@ extern int netlink_has_listeners(struct sock *sk, unsigned int group);
3770 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
3771 extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
3772 __u32 group, gfp_t allocation);
3773 -extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
3774 +extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
3775 extern int netlink_register_notifier(struct notifier_block *nb);
3776 extern int netlink_unregister_notifier(struct notifier_block *nb);
3777
3778 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
3779 index c8ea0c7..41f977b 100644
3780 --- a/include/linux/perf_event.h
3781 +++ b/include/linux/perf_event.h
3782 @@ -793,6 +793,13 @@ struct perf_sample_data {
3783 struct perf_raw_record *raw;
3784 };
3785
3786 +static inline
3787 +void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
3788 +{
3789 + data->addr = addr;
3790 + data->raw = NULL;
3791 +}
3792 +
3793 extern void perf_output_sample(struct perf_output_handle *handle,
3794 struct perf_event_header *header,
3795 struct perf_sample_data *data,
3796 diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
3797 index 3ebb231..a529d86 100644
3798 --- a/include/linux/quotaops.h
3799 +++ b/include/linux/quotaops.h
3800 @@ -26,6 +26,10 @@ static inline void writeout_quota_sb(struct super_block *sb, int type)
3801 sb->s_qcop->quota_sync(sb, type);
3802 }
3803
3804 +void inode_add_rsv_space(struct inode *inode, qsize_t number);
3805 +void inode_claim_rsv_space(struct inode *inode, qsize_t number);
3806 +void inode_sub_rsv_space(struct inode *inode, qsize_t number);
3807 +
3808 int dquot_initialize(struct inode *inode, int type);
3809 int dquot_drop(struct inode *inode);
3810 struct dquot *dqget(struct super_block *sb, unsigned int id, int type);
3811 @@ -42,7 +46,6 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number);
3812 int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
3813 int dquot_claim_space(struct inode *inode, qsize_t number);
3814 void dquot_release_reserved_space(struct inode *inode, qsize_t number);
3815 -qsize_t dquot_get_reserved_space(struct inode *inode);
3816
3817 int dquot_free_space(struct inode *inode, qsize_t number);
3818 int dquot_free_inode(const struct inode *inode, qsize_t number);
3819 @@ -199,6 +202,8 @@ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
3820 if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
3821 return 1;
3822 }
3823 + else
3824 + inode_add_rsv_space(inode, nr);
3825 return 0;
3826 }
3827
3828 @@ -221,7 +226,7 @@ static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
3829 if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
3830 return 1;
3831 } else
3832 - inode_add_bytes(inode, nr);
3833 + inode_claim_rsv_space(inode, nr);
3834
3835 mark_inode_dirty(inode);
3836 return 0;
3837 @@ -235,6 +240,8 @@ void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
3838 {
3839 if (sb_any_quota_active(inode->i_sb))
3840 inode->i_sb->dq_op->release_rsv(inode, nr);
3841 + else
3842 + inode_sub_rsv_space(inode, nr);
3843 }
3844
3845 static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
3846 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3847 index ec226a2..28a9617 100644
3848 --- a/include/linux/skbuff.h
3849 +++ b/include/linux/skbuff.h
3850 @@ -190,9 +190,6 @@ struct skb_shared_info {
3851 atomic_t dataref;
3852 unsigned short nr_frags;
3853 unsigned short gso_size;
3854 -#ifdef CONFIG_HAS_DMA
3855 - dma_addr_t dma_head;
3856 -#endif
3857 /* Warning: this field is not always filled in (UFO)! */
3858 unsigned short gso_segs;
3859 unsigned short gso_type;
3860 @@ -201,9 +198,6 @@ struct skb_shared_info {
3861 struct sk_buff *frag_list;
3862 struct skb_shared_hwtstamps hwtstamps;
3863 skb_frag_t frags[MAX_SKB_FRAGS];
3864 -#ifdef CONFIG_HAS_DMA
3865 - dma_addr_t dma_maps[MAX_SKB_FRAGS];
3866 -#endif
3867 /* Intermediate layers must ensure that destructor_arg
3868 * remains valid until skb destructor */
3869 void * destructor_arg;
3870 diff --git a/include/linux/tty.h b/include/linux/tty.h
3871 index 6abfcf5..42f2076 100644
3872 --- a/include/linux/tty.h
3873 +++ b/include/linux/tty.h
3874 @@ -68,6 +68,17 @@ struct tty_buffer {
3875 unsigned long data[0];
3876 };
3877
3878 +/*
3879 + * We default to dicing tty buffer allocations to this many characters
3880 + * in order to avoid multiple page allocations. We know the size of
3881 + * tty_buffer itself but it must also be taken into account that the
3882 + * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
3883 + * logic this must match
3884 + */
3885 +
3886 +#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
3887 +
3888 +
3889 struct tty_bufhead {
3890 struct delayed_work work;
3891 spinlock_t lock;
3892 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
3893 index 0bf3697..f39b303 100644
3894 --- a/include/net/mac80211.h
3895 +++ b/include/net/mac80211.h
3896 @@ -926,6 +926,9 @@ enum ieee80211_tkip_key_type {
3897 * @IEEE80211_HW_BEACON_FILTER:
3898 * Hardware supports dropping of irrelevant beacon frames to
3899 * avoid waking up cpu.
3900 + * @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
3901 + * Hardware can provide ack status reports of Tx frames to
3902 + * the stack.
3903 */
3904 enum ieee80211_hw_flags {
3905 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
3906 @@ -943,6 +946,7 @@ enum ieee80211_hw_flags {
3907 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
3908 IEEE80211_HW_MFP_CAPABLE = 1<<13,
3909 IEEE80211_HW_BEACON_FILTER = 1<<14,
3910 + IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<15,
3911 };
3912
3913 /**
3914 @@ -2258,7 +2262,8 @@ struct rate_control_ops {
3915 struct ieee80211_sta *sta, void *priv_sta);
3916 void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
3917 struct ieee80211_sta *sta,
3918 - void *priv_sta, u32 changed);
3919 + void *priv_sta, u32 changed,
3920 + enum nl80211_channel_type oper_chan_type);
3921 void (*free_sta)(void *priv, struct ieee80211_sta *sta,
3922 void *priv_sta);
3923
3924 diff --git a/include/net/netlink.h b/include/net/netlink.h
3925 index a63b219..668ad04 100644
3926 --- a/include/net/netlink.h
3927 +++ b/include/net/netlink.h
3928 @@ -945,7 +945,11 @@ static inline u64 nla_get_u64(const struct nlattr *nla)
3929 */
3930 static inline __be64 nla_get_be64(const struct nlattr *nla)
3931 {
3932 - return *(__be64 *) nla_data(nla);
3933 + __be64 tmp;
3934 +
3935 + nla_memcpy(&tmp, nla, sizeof(tmp));
3936 +
3937 + return tmp;
3938 }
3939
3940 /**
3941 diff --git a/include/net/sock.h b/include/net/sock.h
3942 index 3f1a480..86f2da1 100644
3943 --- a/include/net/sock.h
3944 +++ b/include/net/sock.h
3945 @@ -253,6 +253,8 @@ struct sock {
3946 struct {
3947 struct sk_buff *head;
3948 struct sk_buff *tail;
3949 + int len;
3950 + int limit;
3951 } sk_backlog;
3952 wait_queue_head_t *sk_sleep;
3953 struct dst_entry *sk_dst_cache;
3954 @@ -574,8 +576,8 @@ static inline int sk_stream_memory_free(struct sock *sk)
3955 return sk->sk_wmem_queued < sk->sk_sndbuf;
3956 }
3957
3958 -/* The per-socket spinlock must be held here. */
3959 -static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
3960 +/* OOB backlog add */
3961 +static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
3962 {
3963 if (!sk->sk_backlog.tail) {
3964 sk->sk_backlog.head = sk->sk_backlog.tail = skb;
3965 @@ -586,6 +588,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
3966 skb->next = NULL;
3967 }
3968
3969 +/* The per-socket spinlock must be held here. */
3970 +static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
3971 +{
3972 + if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
3973 + return -ENOBUFS;
3974 +
3975 + __sk_add_backlog(sk, skb);
3976 + sk->sk_backlog.len += skb->truesize;
3977 + return 0;
3978 +}
3979 +
3980 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
3981 {
3982 return sk->sk_backlog_rcv(sk, skb);
3983 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
3984 index 60c2770..1e355d8 100644
3985 --- a/include/net/xfrm.h
3986 +++ b/include/net/xfrm.h
3987 @@ -274,7 +274,8 @@ struct xfrm_policy_afinfo {
3988 struct dst_entry *dst,
3989 int nfheader_len);
3990 int (*fill_dst)(struct xfrm_dst *xdst,
3991 - struct net_device *dev);
3992 + struct net_device *dev,
3993 + struct flowi *fl);
3994 };
3995
3996 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
3997 diff --git a/init/main.c b/init/main.c
3998 index 4cb47a1..512ba15 100644
3999 --- a/init/main.c
4000 +++ b/init/main.c
4001 @@ -846,7 +846,7 @@ static int __init kernel_init(void * unused)
4002 /*
4003 * init can allocate pages on any node
4004 */
4005 - set_mems_allowed(node_possible_map);
4006 + set_mems_allowed(node_states[N_HIGH_MEMORY]);
4007 /*
4008 * init can run on any cpu.
4009 */
4010 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
4011 index c79bd57..04985a7 100644
4012 --- a/ipc/mqueue.c
4013 +++ b/ipc/mqueue.c
4014 @@ -705,7 +705,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
4015 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
4016 if (IS_ERR(dentry)) {
4017 error = PTR_ERR(dentry);
4018 - goto out_err;
4019 + goto out_putfd;
4020 }
4021 mntget(ipc_ns->mq_mnt);
4022
4023 @@ -742,7 +742,6 @@ out:
4024 mntput(ipc_ns->mq_mnt);
4025 out_putfd:
4026 put_unused_fd(fd);
4027 -out_err:
4028 fd = error;
4029 out_upsem:
4030 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
4031 diff --git a/kernel/cpuset.c b/kernel/cpuset.c
4032 index ba401fa..5d38bd7 100644
4033 --- a/kernel/cpuset.c
4034 +++ b/kernel/cpuset.c
4035 @@ -920,9 +920,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
4036 * call to guarantee_online_mems(), as we know no one is changing
4037 * our task's cpuset.
4038 *
4039 - * Hold callback_mutex around the two modifications of our tasks
4040 - * mems_allowed to synchronize with cpuset_mems_allowed().
4041 - *
4042 * While the mm_struct we are migrating is typically from some
4043 * other task, the task_struct mems_allowed that we are hacking
4044 * is for our current task, which must allocate new pages for that
4045 @@ -1391,11 +1388,10 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
4046
4047 if (cs == &top_cpuset) {
4048 cpumask_copy(cpus_attach, cpu_possible_mask);
4049 - to = node_possible_map;
4050 } else {
4051 guarantee_online_cpus(cs, cpus_attach);
4052 - guarantee_online_mems(cs, &to);
4053 }
4054 + guarantee_online_mems(cs, &to);
4055
4056 /* do per-task migration stuff possibly for each in the threadgroup */
4057 cpuset_attach_task(tsk, &to, cs);
4058 @@ -2090,15 +2086,23 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
4059 static int cpuset_track_online_nodes(struct notifier_block *self,
4060 unsigned long action, void *arg)
4061 {
4062 + nodemask_t oldmems;
4063 +
4064 cgroup_lock();
4065 switch (action) {
4066 case MEM_ONLINE:
4067 - case MEM_OFFLINE:
4068 + oldmems = top_cpuset.mems_allowed;
4069 mutex_lock(&callback_mutex);
4070 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
4071 mutex_unlock(&callback_mutex);
4072 - if (action == MEM_OFFLINE)
4073 - scan_for_empty_cpusets(&top_cpuset);
4074 + update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
4075 + break;
4076 + case MEM_OFFLINE:
4077 + /*
4078 + * needn't update top_cpuset.mems_allowed explicitly because
4079 + * scan_for_empty_cpusets() will update it.
4080 + */
4081 + scan_for_empty_cpusets(&top_cpuset);
4082 break;
4083 default:
4084 break;
4085 diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
4086 index 967e661..4d99512 100644
4087 --- a/kernel/hw_breakpoint.c
4088 +++ b/kernel/hw_breakpoint.c
4089 @@ -489,5 +489,4 @@ struct pmu perf_ops_bp = {
4090 .enable = arch_install_hw_breakpoint,
4091 .disable = arch_uninstall_hw_breakpoint,
4092 .read = hw_breakpoint_pmu_read,
4093 - .unthrottle = hw_breakpoint_pmu_unthrottle
4094 };
4095 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
4096 index d70394f..71eba24 100644
4097 --- a/kernel/irq/chip.c
4098 +++ b/kernel/irq/chip.c
4099 @@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
4100 if (desc->chip->ack)
4101 desc->chip->ack(irq);
4102 }
4103 + desc->status |= IRQ_MASKED;
4104 +}
4105 +
4106 +static inline void mask_irq(struct irq_desc *desc, int irq)
4107 +{
4108 + if (desc->chip->mask) {
4109 + desc->chip->mask(irq);
4110 + desc->status |= IRQ_MASKED;
4111 + }
4112 +}
4113 +
4114 +static inline void unmask_irq(struct irq_desc *desc, int irq)
4115 +{
4116 + if (desc->chip->unmask) {
4117 + desc->chip->unmask(irq);
4118 + desc->status &= ~IRQ_MASKED;
4119 + }
4120 }
4121
4122 /*
4123 @@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
4124 raw_spin_lock(&desc->lock);
4125 desc->status &= ~IRQ_INPROGRESS;
4126
4127 - if (unlikely(desc->status & IRQ_ONESHOT))
4128 - desc->status |= IRQ_MASKED;
4129 - else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
4130 - desc->chip->unmask(irq);
4131 + if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
4132 + unmask_irq(desc, irq);
4133 out_unlock:
4134 raw_spin_unlock(&desc->lock);
4135 }
4136 @@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
4137 action = desc->action;
4138 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
4139 desc->status |= IRQ_PENDING;
4140 - if (desc->chip->mask)
4141 - desc->chip->mask(irq);
4142 + mask_irq(desc, irq);
4143 goto out;
4144 }
4145
4146 @@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
4147 irqreturn_t action_ret;
4148
4149 if (unlikely(!action)) {
4150 - desc->chip->mask(irq);
4151 + mask_irq(desc, irq);
4152 goto out_unlock;
4153 }
4154
4155 @@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
4156 if (unlikely((desc->status &
4157 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
4158 (IRQ_PENDING | IRQ_MASKED))) {
4159 - desc->chip->unmask(irq);
4160 - desc->status &= ~IRQ_MASKED;
4161 + unmask_irq(desc, irq);
4162 }
4163
4164 desc->status &= ~IRQ_PENDING;
4165 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4166 index eb6078c..69a3d7b 100644
4167 --- a/kernel/irq/manage.c
4168 +++ b/kernel/irq/manage.c
4169 @@ -483,8 +483,26 @@ static int irq_wait_for_interrupt(struct irqaction *action)
4170 */
4171 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
4172 {
4173 +again:
4174 chip_bus_lock(irq, desc);
4175 raw_spin_lock_irq(&desc->lock);
4176 +
4177 + /*
4178 + * Implausible though it may be we need to protect us against
4179 + * the following scenario:
4180 + *
4181 + * The thread is faster done than the hard interrupt handler
4182 + * on the other CPU. If we unmask the irq line then the
4183 + * interrupt can come in again and masks the line, leaves due
4184 + * to IRQ_INPROGRESS and the irq line is masked forever.
4185 + */
4186 + if (unlikely(desc->status & IRQ_INPROGRESS)) {
4187 + raw_spin_unlock_irq(&desc->lock);
4188 + chip_bus_sync_unlock(irq, desc);
4189 + cpu_relax();
4190 + goto again;
4191 + }
4192 +
4193 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
4194 desc->status &= ~IRQ_MASKED;
4195 desc->chip->unmask(irq);
4196 diff --git a/kernel/kthread.c b/kernel/kthread.c
4197 index fbb6222..84c7f99 100644
4198 --- a/kernel/kthread.c
4199 +++ b/kernel/kthread.c
4200 @@ -219,7 +219,7 @@ int kthreadd(void *unused)
4201 set_task_comm(tsk, "kthreadd");
4202 ignore_signals(tsk);
4203 set_cpus_allowed_ptr(tsk, cpu_all_mask);
4204 - set_mems_allowed(node_possible_map);
4205 + set_mems_allowed(node_states[N_HIGH_MEMORY]);
4206
4207 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
4208
4209 diff --git a/kernel/perf_event.c b/kernel/perf_event.c
4210 index b707465..32d0ae2 100644
4211 --- a/kernel/perf_event.c
4212 +++ b/kernel/perf_event.c
4213 @@ -4027,8 +4027,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4214 if (rctx < 0)
4215 return;
4216
4217 - data.addr = addr;
4218 - data.raw = NULL;
4219 + perf_sample_data_init(&data, addr);
4220
4221 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4222
4223 @@ -4073,11 +4072,10 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4224 struct perf_event *event;
4225 u64 period;
4226
4227 - event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4228 + event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4229 event->pmu->read(event);
4230
4231 - data.addr = 0;
4232 - data.raw = NULL;
4233 + perf_sample_data_init(&data, 0);
4234 data.period = event->hw.last_period;
4235 regs = get_irq_regs();
4236 /*
4237 @@ -4241,17 +4239,15 @@ static const struct pmu perf_ops_task_clock = {
4238 void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4239 int entry_size)
4240 {
4241 + struct pt_regs *regs = get_irq_regs();
4242 + struct perf_sample_data data;
4243 struct perf_raw_record raw = {
4244 .size = entry_size,
4245 .data = record,
4246 };
4247
4248 - struct perf_sample_data data = {
4249 - .addr = addr,
4250 - .raw = &raw,
4251 - };
4252 -
4253 - struct pt_regs *regs = get_irq_regs();
4254 + perf_sample_data_init(&data, addr);
4255 + data.raw = &raw;
4256
4257 if (!regs)
4258 regs = task_pt_regs(current);
4259 @@ -4367,8 +4363,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
4260 struct perf_sample_data sample;
4261 struct pt_regs *regs = data;
4262
4263 - sample.raw = NULL;
4264 - sample.addr = bp->attr.bp_addr;
4265 + perf_sample_data_init(&sample, bp->attr.bp_addr);
4266
4267 if (!perf_exclude_event(bp, regs))
4268 perf_swevent_add(bp, 1, 1, &sample, regs);
4269 @@ -5251,12 +5246,22 @@ int perf_event_init_task(struct task_struct *child)
4270 return ret;
4271 }
4272
4273 +static void __init perf_event_init_all_cpus(void)
4274 +{
4275 + int cpu;
4276 + struct perf_cpu_context *cpuctx;
4277 +
4278 + for_each_possible_cpu(cpu) {
4279 + cpuctx = &per_cpu(perf_cpu_context, cpu);
4280 + __perf_event_init_context(&cpuctx->ctx, NULL);
4281 + }
4282 +}
4283 +
4284 static void __cpuinit perf_event_init_cpu(int cpu)
4285 {
4286 struct perf_cpu_context *cpuctx;
4287
4288 cpuctx = &per_cpu(perf_cpu_context, cpu);
4289 - __perf_event_init_context(&cpuctx->ctx, NULL);
4290
4291 spin_lock(&perf_resource_lock);
4292 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
4293 @@ -5327,6 +5332,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
4294
4295 void __init perf_event_init(void)
4296 {
4297 + perf_event_init_all_cpus();
4298 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4299 (void *)(long)smp_processor_id());
4300 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4301 diff --git a/kernel/sched.c b/kernel/sched.c
4302 index 00a59b0..7ca9345 100644
4303 --- a/kernel/sched.c
4304 +++ b/kernel/sched.c
4305 @@ -3423,6 +3423,7 @@ struct sd_lb_stats {
4306 unsigned long max_load;
4307 unsigned long busiest_load_per_task;
4308 unsigned long busiest_nr_running;
4309 + unsigned long busiest_group_capacity;
4310
4311 int group_imb; /* Is there imbalance in this sd */
4312 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4313 @@ -3742,8 +3743,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4314 unsigned long load, max_cpu_load, min_cpu_load;
4315 int i;
4316 unsigned int balance_cpu = -1, first_idle_cpu = 0;
4317 - unsigned long sum_avg_load_per_task;
4318 - unsigned long avg_load_per_task;
4319 + unsigned long avg_load_per_task = 0;
4320
4321 if (local_group) {
4322 balance_cpu = group_first_cpu(group);
4323 @@ -3752,7 +3752,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4324 }
4325
4326 /* Tally up the load of all CPUs in the group */
4327 - sum_avg_load_per_task = avg_load_per_task = 0;
4328 max_cpu_load = 0;
4329 min_cpu_load = ~0UL;
4330
4331 @@ -3782,7 +3781,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4332 sgs->sum_nr_running += rq->nr_running;
4333 sgs->sum_weighted_load += weighted_cpuload(i);
4334
4335 - sum_avg_load_per_task += cpu_avg_load_per_task(i);
4336 }
4337
4338 /*
4339 @@ -3800,7 +3798,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4340 /* Adjust by relative CPU power of the group */
4341 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
4342
4343 -
4344 /*
4345 * Consider the group unbalanced when the imbalance is larger
4346 * than the average weight of two tasks.
4347 @@ -3810,8 +3807,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
4348 * normalized nr_running number somewhere that negates
4349 * the hierarchy?
4350 */
4351 - avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
4352 - group->cpu_power;
4353 + if (sgs->sum_nr_running)
4354 + avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
4355
4356 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
4357 sgs->group_imb = 1;
4358 @@ -3880,6 +3877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
4359 sds->max_load = sgs.avg_load;
4360 sds->busiest = group;
4361 sds->busiest_nr_running = sgs.sum_nr_running;
4362 + sds->busiest_group_capacity = sgs.group_capacity;
4363 sds->busiest_load_per_task = sgs.sum_weighted_load;
4364 sds->group_imb = sgs.group_imb;
4365 }
4366 @@ -3902,6 +3900,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
4367 {
4368 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4369 unsigned int imbn = 2;
4370 + unsigned long scaled_busy_load_per_task;
4371
4372 if (sds->this_nr_running) {
4373 sds->this_load_per_task /= sds->this_nr_running;
4374 @@ -3912,8 +3911,12 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
4375 sds->this_load_per_task =
4376 cpu_avg_load_per_task(this_cpu);
4377
4378 - if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
4379 - sds->busiest_load_per_task * imbn) {
4380 + scaled_busy_load_per_task = sds->busiest_load_per_task
4381 + * SCHED_LOAD_SCALE;
4382 + scaled_busy_load_per_task /= sds->busiest->cpu_power;
4383 +
4384 + if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4385 + (scaled_busy_load_per_task * imbn)) {
4386 *imbalance = sds->busiest_load_per_task;
4387 return;
4388 }
4389 @@ -3964,7 +3967,14 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
4390 static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
4391 unsigned long *imbalance)
4392 {
4393 - unsigned long max_pull;
4394 + unsigned long max_pull, load_above_capacity = ~0UL;
4395 +
4396 + sds->busiest_load_per_task /= sds->busiest_nr_running;
4397 + if (sds->group_imb) {
4398 + sds->busiest_load_per_task =
4399 + min(sds->busiest_load_per_task, sds->avg_load);
4400 + }
4401 +
4402 /*
4403 * In the presence of smp nice balancing, certain scenarios can have
4404 * max load less than avg load(as we skip the groups at or below
4405 @@ -3975,9 +3985,29 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
4406 return fix_small_imbalance(sds, this_cpu, imbalance);
4407 }
4408
4409 - /* Don't want to pull so many tasks that a group would go idle */
4410 - max_pull = min(sds->max_load - sds->avg_load,
4411 - sds->max_load - sds->busiest_load_per_task);
4412 + if (!sds->group_imb) {
4413 + /*
4414 + * Don't want to pull so many tasks that a group would go idle.
4415 + */
4416 + load_above_capacity = (sds->busiest_nr_running -
4417 + sds->busiest_group_capacity);
4418 +
4419 + load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
4420 +
4421 + load_above_capacity /= sds->busiest->cpu_power;
4422 + }
4423 +
4424 + /*
4425 + * We're trying to get all the cpus to the average_load, so we don't
4426 + * want to push ourselves above the average load, nor do we wish to
4427 + * reduce the max loaded cpu below the average load. At the same time,
4428 + * we also don't want to reduce the group load below the group capacity
4429 + * (so that we can implement power-savings policies etc). Thus we look
4430 + * for the minimum possible imbalance.
4431 + * Be careful of negative numbers as they'll appear as very large values
4432 + * with unsigned longs.
4433 + */
4434 + max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4435
4436 /* How much load to actually move to equalise the imbalance */
4437 *imbalance = min(max_pull * sds->busiest->cpu_power,
4438 @@ -4045,7 +4075,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
4439 * 4) This group is more busy than the avg busieness at this
4440 * sched_domain.
4441 * 5) The imbalance is within the specified limit.
4442 - * 6) Any rebalance would lead to ping-pong
4443 */
4444 if (balance && !(*balance))
4445 goto ret;
4446 @@ -4064,25 +4093,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
4447 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
4448 goto out_balanced;
4449
4450 - sds.busiest_load_per_task /= sds.busiest_nr_running;
4451 - if (sds.group_imb)
4452 - sds.busiest_load_per_task =
4453 - min(sds.busiest_load_per_task, sds.avg_load);
4454 -
4455 - /*
4456 - * We're trying to get all the cpus to the average_load, so we don't
4457 - * want to push ourselves above the average load, nor do we wish to
4458 - * reduce the max loaded cpu below the average load, as either of these
4459 - * actions would just result in more rebalancing later, and ping-pong
4460 - * tasks around. Thus we look for the minimum possible imbalance.
4461 - * Negative imbalances (*we* are more loaded than anyone else) will
4462 - * be counted as no imbalance for these purposes -- we can't fix that
4463 - * by pulling tasks to us. Be careful of negative numbers as they'll
4464 - * appear as very large values with unsigned longs.
4465 - */
4466 - if (sds.max_load <= sds.busiest_load_per_task)
4467 - goto out_balanced;
4468 -
4469 /* Looks like there is an imbalance. Compute it */
4470 calculate_imbalance(&sds, this_cpu, imbalance);
4471 return sds.busiest;
4472 diff --git a/kernel/softlockup.c b/kernel/softlockup.c
4473 index 0d4c789..4b493f6 100644
4474 --- a/kernel/softlockup.c
4475 +++ b/kernel/softlockup.c
4476 @@ -155,11 +155,11 @@ void softlockup_tick(void)
4477 * Wake up the high-prio watchdog task twice per
4478 * threshold timespan.
4479 */
4480 - if (now > touch_ts + softlockup_thresh/2)
4481 + if (time_after(now - softlockup_thresh/2, touch_ts))
4482 wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
4483
4484 /* Warn about unreasonable delays: */
4485 - if (now <= (touch_ts + softlockup_thresh))
4486 + if (time_before_eq(now - softlockup_thresh, touch_ts))
4487 return;
4488
4489 per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
4490 diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
4491 index 1370083..0e98497 100644
4492 --- a/kernel/time/clocksource.c
4493 +++ b/kernel/time/clocksource.c
4494 @@ -580,6 +580,10 @@ static inline void clocksource_select(void) { }
4495 */
4496 static int __init clocksource_done_booting(void)
4497 {
4498 + mutex_lock(&clocksource_mutex);
4499 + curr_clocksource = clocksource_default_clock();
4500 + mutex_unlock(&clocksource_mutex);
4501 +
4502 finished_booting = 1;
4503
4504 /*
4505 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4506 index 1e6640f..404c9ba 100644
4507 --- a/kernel/trace/ftrace.c
4508 +++ b/kernel/trace/ftrace.c
4509 @@ -3364,6 +3364,7 @@ void ftrace_graph_init_task(struct task_struct *t)
4510 {
4511 /* Make sure we do not use the parent ret_stack */
4512 t->ret_stack = NULL;
4513 + t->curr_ret_stack = -1;
4514
4515 if (ftrace_graph_active) {
4516 struct ftrace_ret_stack *ret_stack;
4517 @@ -3373,7 +3374,6 @@ void ftrace_graph_init_task(struct task_struct *t)
4518 GFP_KERNEL);
4519 if (!ret_stack)
4520 return;
4521 - t->curr_ret_stack = -1;
4522 atomic_set(&t->tracing_graph_pause, 0);
4523 atomic_set(&t->trace_overrun, 0);
4524 t->ftrace_timestamp = 0;
4525 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
4526 index 8c1b2d2..54191d6 100644
4527 --- a/kernel/trace/ring_buffer.c
4528 +++ b/kernel/trace/ring_buffer.c
4529 @@ -2232,12 +2232,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
4530 if (ring_buffer_flags != RB_BUFFERS_ON)
4531 return NULL;
4532
4533 - if (atomic_read(&buffer->record_disabled))
4534 - return NULL;
4535 -
4536 /* If we are tracing schedule, we don't want to recurse */
4537 resched = ftrace_preempt_disable();
4538
4539 + if (atomic_read(&buffer->record_disabled))
4540 + goto out_nocheck;
4541 +
4542 if (trace_recursive_lock())
4543 goto out_nocheck;
4544
4545 @@ -2469,11 +2469,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
4546 if (ring_buffer_flags != RB_BUFFERS_ON)
4547 return -EBUSY;
4548
4549 - if (atomic_read(&buffer->record_disabled))
4550 - return -EBUSY;
4551 -
4552 resched = ftrace_preempt_disable();
4553
4554 + if (atomic_read(&buffer->record_disabled))
4555 + goto out;
4556 +
4557 cpu = raw_smp_processor_id();
4558
4559 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4560 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4561 index eac6875..45cfb6d 100644
4562 --- a/kernel/trace/trace.c
4563 +++ b/kernel/trace/trace.c
4564 @@ -747,10 +747,10 @@ out:
4565 mutex_unlock(&trace_types_lock);
4566 }
4567
4568 -static void __tracing_reset(struct trace_array *tr, int cpu)
4569 +static void __tracing_reset(struct ring_buffer *buffer, int cpu)
4570 {
4571 ftrace_disable_cpu();
4572 - ring_buffer_reset_cpu(tr->buffer, cpu);
4573 + ring_buffer_reset_cpu(buffer, cpu);
4574 ftrace_enable_cpu();
4575 }
4576
4577 @@ -762,7 +762,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
4578
4579 /* Make sure all commits have finished */
4580 synchronize_sched();
4581 - __tracing_reset(tr, cpu);
4582 + __tracing_reset(buffer, cpu);
4583
4584 ring_buffer_record_enable(buffer);
4585 }
4586 @@ -780,7 +780,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
4587 tr->time_start = ftrace_now(tr->cpu);
4588
4589 for_each_online_cpu(cpu)
4590 - __tracing_reset(tr, cpu);
4591 + __tracing_reset(buffer, cpu);
4592
4593 ring_buffer_record_enable(buffer);
4594 }
4595 @@ -857,6 +857,8 @@ void tracing_start(void)
4596 goto out;
4597 }
4598
4599 + /* Prevent the buffers from switching */
4600 + arch_spin_lock(&ftrace_max_lock);
4601
4602 buffer = global_trace.buffer;
4603 if (buffer)
4604 @@ -866,6 +868,8 @@ void tracing_start(void)
4605 if (buffer)
4606 ring_buffer_record_enable(buffer);
4607
4608 + arch_spin_unlock(&ftrace_max_lock);
4609 +
4610 ftrace_start();
4611 out:
4612 spin_unlock_irqrestore(&tracing_start_lock, flags);
4613 @@ -887,6 +891,9 @@ void tracing_stop(void)
4614 if (trace_stop_count++)
4615 goto out;
4616
4617 + /* Prevent the buffers from switching */
4618 + arch_spin_lock(&ftrace_max_lock);
4619 +
4620 buffer = global_trace.buffer;
4621 if (buffer)
4622 ring_buffer_record_disable(buffer);
4623 @@ -895,6 +902,8 @@ void tracing_stop(void)
4624 if (buffer)
4625 ring_buffer_record_disable(buffer);
4626
4627 + arch_spin_unlock(&ftrace_max_lock);
4628 +
4629 out:
4630 spin_unlock_irqrestore(&tracing_start_lock, flags);
4631 }
4632 @@ -1182,6 +1191,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
4633 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
4634 return;
4635
4636 + /*
4637 + * NMIs can not handle page faults, even with fix ups.
4638 + * The save user stack can (and often does) fault.
4639 + */
4640 + if (unlikely(in_nmi()))
4641 + return;
4642 +
4643 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
4644 sizeof(*entry), flags, pc);
4645 if (!event)
4646 @@ -1628,6 +1644,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
4647
4648 ftrace_enable_cpu();
4649
4650 + iter->leftover = 0;
4651 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4652 ;
4653
4654 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4655 index 290fb5b..0beac93 100644
4656 --- a/mm/mempolicy.c
4657 +++ b/mm/mempolicy.c
4658 @@ -2167,8 +2167,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4659 char *rest = nodelist;
4660 while (isdigit(*rest))
4661 rest++;
4662 - if (!*rest)
4663 - err = 0;
4664 + if (*rest)
4665 + goto out;
4666 }
4667 break;
4668 case MPOL_INTERLEAVE:
4669 @@ -2177,7 +2177,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4670 */
4671 if (!nodelist)
4672 nodes = node_states[N_HIGH_MEMORY];
4673 - err = 0;
4674 break;
4675 case MPOL_LOCAL:
4676 /*
4677 @@ -2187,11 +2186,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4678 goto out;
4679 mode = MPOL_PREFERRED;
4680 break;
4681 -
4682 - /*
4683 - * case MPOL_BIND: mpol_new() enforces non-empty nodemask.
4684 - * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
4685 - */
4686 + case MPOL_DEFAULT:
4687 + /*
4688 + * Insist on a empty nodelist
4689 + */
4690 + if (!nodelist)
4691 + err = 0;
4692 + goto out;
4693 + case MPOL_BIND:
4694 + /*
4695 + * Insist on a nodelist
4696 + */
4697 + if (!nodelist)
4698 + goto out;
4699 }
4700
4701 mode_flags = 0;
4702 @@ -2205,13 +2212,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4703 else if (!strcmp(flags, "relative"))
4704 mode_flags |= MPOL_F_RELATIVE_NODES;
4705 else
4706 - err = 1;
4707 + goto out;
4708 }
4709
4710 new = mpol_new(mode, mode_flags, &nodes);
4711 if (IS_ERR(new))
4712 - err = 1;
4713 - else {
4714 + goto out;
4715 +
4716 + {
4717 int ret;
4718 NODEMASK_SCRATCH(scratch);
4719 if (scratch) {
4720 @@ -2222,13 +2230,15 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
4721 ret = -ENOMEM;
4722 NODEMASK_SCRATCH_FREE(scratch);
4723 if (ret) {
4724 - err = 1;
4725 mpol_put(new);
4726 - } else if (no_context) {
4727 - /* save for contextualization */
4728 - new->w.user_nodemask = nodes;
4729 + goto out;
4730 }
4731 }
4732 + err = 0;
4733 + if (no_context) {
4734 + /* save for contextualization */
4735 + new->w.user_nodemask = nodes;
4736 + }
4737
4738 out:
4739 /* Restore string for error message */
4740 diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
4741 index e75a2f3..152760a 100644
4742 --- a/net/8021q/vlan_core.c
4743 +++ b/net/8021q/vlan_core.c
4744 @@ -11,7 +11,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
4745 if (netpoll_rx(skb))
4746 return NET_RX_DROP;
4747
4748 - if (skb_bond_should_drop(skb))
4749 + if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
4750 goto drop;
4751
4752 __vlan_hwaccel_put_tag(skb, vlan_tci);
4753 @@ -82,7 +82,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
4754 {
4755 struct sk_buff *p;
4756
4757 - if (skb_bond_should_drop(skb))
4758 + if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
4759 goto drop;
4760
4761 __vlan_hwaccel_put_tag(skb, vlan_tci);
4762 diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
4763 index 400efa2..615fecc 100644
4764 --- a/net/bluetooth/l2cap.c
4765 +++ b/net/bluetooth/l2cap.c
4766 @@ -2830,6 +2830,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
4767 int len = cmd->len - sizeof(*rsp);
4768 char req[64];
4769
4770 + if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4771 + l2cap_send_disconn_req(conn, sk);
4772 + goto done;
4773 + }
4774 +
4775 /* throw out any old stored conf requests */
4776 result = L2CAP_CONF_SUCCESS;
4777 len = l2cap_parse_conf_rsp(sk, rsp->data,
4778 @@ -3942,16 +3947,24 @@ static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
4779 struct sock *sk;
4780 struct hlist_node *node;
4781 char *str = buf;
4782 + int size = PAGE_SIZE;
4783
4784 read_lock_bh(&l2cap_sk_list.lock);
4785
4786 sk_for_each(sk, node, &l2cap_sk_list.head) {
4787 struct l2cap_pinfo *pi = l2cap_pi(sk);
4788 + int len;
4789
4790 - str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4791 + len = snprintf(str, size, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4792 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4793 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
4794 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
4795 +
4796 + size -= len;
4797 + if (size <= 0)
4798 + break;
4799 +
4800 + str += len;
4801 }
4802
4803 read_unlock_bh(&l2cap_sk_list.lock);
4804 diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
4805 index 89f4a59..3fe9c7c 100644
4806 --- a/net/bluetooth/rfcomm/core.c
4807 +++ b/net/bluetooth/rfcomm/core.c
4808 @@ -2103,6 +2103,7 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
4809 struct rfcomm_session *s;
4810 struct list_head *pp, *p;
4811 char *str = buf;
4812 + int size = PAGE_SIZE;
4813
4814 rfcomm_lock();
4815
4816 @@ -2111,11 +2112,21 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, char *buf)
4817 list_for_each(pp, &s->dlcs) {
4818 struct sock *sk = s->sock->sk;
4819 struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
4820 + int len;
4821
4822 - str += sprintf(str, "%s %s %ld %d %d %d %d\n",
4823 + len = snprintf(str, size, "%s %s %ld %d %d %d %d\n",
4824 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4825 d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits);
4826 +
4827 + size -= len;
4828 + if (size <= 0)
4829 + break;
4830 +
4831 + str += len;
4832 }
4833 +
4834 + if (size <= 0)
4835 + break;
4836 }
4837
4838 rfcomm_unlock();
4839 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
4840 index 4b5968d..bc03b50 100644
4841 --- a/net/bluetooth/rfcomm/sock.c
4842 +++ b/net/bluetooth/rfcomm/sock.c
4843 @@ -1066,13 +1066,22 @@ static ssize_t rfcomm_sock_sysfs_show(struct class *dev, char *buf)
4844 struct sock *sk;
4845 struct hlist_node *node;
4846 char *str = buf;
4847 + int size = PAGE_SIZE;
4848
4849 read_lock_bh(&rfcomm_sk_list.lock);
4850
4851 sk_for_each(sk, node, &rfcomm_sk_list.head) {
4852 - str += sprintf(str, "%s %s %d %d\n",
4853 + int len;
4854 +
4855 + len = snprintf(str, size, "%s %s %d %d\n",
4856 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4857 sk->sk_state, rfcomm_pi(sk)->channel);
4858 +
4859 + size -= len;
4860 + if (size <= 0)
4861 + break;
4862 +
4863 + str += len;
4864 }
4865
4866 read_unlock_bh(&rfcomm_sk_list.lock);
4867 diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
4868 index dd8f6ec..66cab63 100644
4869 --- a/net/bluetooth/sco.c
4870 +++ b/net/bluetooth/sco.c
4871 @@ -958,13 +958,22 @@ static ssize_t sco_sysfs_show(struct class *dev, char *buf)
4872 struct sock *sk;
4873 struct hlist_node *node;
4874 char *str = buf;
4875 + int size = PAGE_SIZE;
4876
4877 read_lock_bh(&sco_sk_list.lock);
4878
4879 sk_for_each(sk, node, &sco_sk_list.head) {
4880 - str += sprintf(str, "%s %s %d\n",
4881 + int len;
4882 +
4883 + len = snprintf(str, size, "%s %s %d\n",
4884 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
4885 sk->sk_state);
4886 +
4887 + size -= len;
4888 + if (size <= 0)
4889 + break;
4890 +
4891 + str += len;
4892 }
4893
4894 read_unlock_bh(&sco_sk_list.lock);
4895 diff --git a/net/core/dev.c b/net/core/dev.c
4896 index ec87421..f51f940 100644
4897 --- a/net/core/dev.c
4898 +++ b/net/core/dev.c
4899 @@ -2421,6 +2421,7 @@ int netif_receive_skb(struct sk_buff *skb)
4900 {
4901 struct packet_type *ptype, *pt_prev;
4902 struct net_device *orig_dev;
4903 + struct net_device *master;
4904 struct net_device *null_or_orig;
4905 int ret = NET_RX_DROP;
4906 __be16 type;
4907 @@ -2440,11 +2441,12 @@ int netif_receive_skb(struct sk_buff *skb)
4908
4909 null_or_orig = NULL;
4910 orig_dev = skb->dev;
4911 - if (orig_dev->master) {
4912 - if (skb_bond_should_drop(skb))
4913 + master = ACCESS_ONCE(orig_dev->master);
4914 + if (master) {
4915 + if (skb_bond_should_drop(skb, master))
4916 null_or_orig = orig_dev; /* deliver only exact match */
4917 else
4918 - skb->dev = orig_dev->master;
4919 + skb->dev = master;
4920 }
4921
4922 __get_cpu_var(netdev_rx_stat).total++;
4923 diff --git a/net/core/sock.c b/net/core/sock.c
4924 index e1f6f22..5779f31 100644
4925 --- a/net/core/sock.c
4926 +++ b/net/core/sock.c
4927 @@ -340,8 +340,12 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
4928 rc = sk_backlog_rcv(sk, skb);
4929
4930 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
4931 - } else
4932 - sk_add_backlog(sk, skb);
4933 + } else if (sk_add_backlog(sk, skb)) {
4934 + bh_unlock_sock(sk);
4935 + atomic_inc(&sk->sk_drops);
4936 + goto discard_and_relse;
4937 + }
4938 +
4939 bh_unlock_sock(sk);
4940 out:
4941 sock_put(sk);
4942 @@ -1138,6 +1142,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
4943 sock_lock_init(newsk);
4944 bh_lock_sock(newsk);
4945 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
4946 + newsk->sk_backlog.len = 0;
4947
4948 atomic_set(&newsk->sk_rmem_alloc, 0);
4949 /*
4950 @@ -1541,6 +1546,12 @@ static void __release_sock(struct sock *sk)
4951
4952 bh_lock_sock(sk);
4953 } while ((skb = sk->sk_backlog.head) != NULL);
4954 +
4955 + /*
4956 + * Doing the zeroing here guarantee we can not loop forever
4957 + * while a wild producer attempts to flood us.
4958 + */
4959 + sk->sk_backlog.len = 0;
4960 }
4961
4962 /**
4963 @@ -1873,6 +1884,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
4964 sk->sk_allocation = GFP_KERNEL;
4965 sk->sk_rcvbuf = sysctl_rmem_default;
4966 sk->sk_sndbuf = sysctl_wmem_default;
4967 + sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
4968 sk->sk_state = TCP_CLOSE;
4969 sk_set_socket(sk, sock);
4970
4971 diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
4972 index af226a0..0d508c3 100644
4973 --- a/net/dccp/minisocks.c
4974 +++ b/net/dccp/minisocks.c
4975 @@ -254,7 +254,7 @@ int dccp_child_process(struct sock *parent, struct sock *child,
4976 * in main socket hash table and lock on listening
4977 * socket does not protect us more.
4978 */
4979 - sk_add_backlog(child, skb);
4980 + __sk_add_backlog(child, skb);
4981 }
4982
4983 bh_unlock_sock(child);
4984 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
4985 index f36ce15..68c1454 100644
4986 --- a/net/ipv4/ip_gre.c
4987 +++ b/net/ipv4/ip_gre.c
4988 @@ -810,11 +810,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
4989 tunnel->err_count = 0;
4990 }
4991
4992 - max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
4993 + max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
4994
4995 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
4996 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
4997 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
4998 + if (max_headroom > dev->needed_headroom)
4999 + dev->needed_headroom = max_headroom;
5000 if (!new_skb) {
5001 ip_rt_put(rt);
5002 txq->tx_dropped++;
5003 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
5004 index d62b05d..af86e41 100644
5005 --- a/net/ipv4/route.c
5006 +++ b/net/ipv4/route.c
5007 @@ -922,10 +922,8 @@ static void rt_secret_rebuild_oneshot(struct net *net)
5008 {
5009 del_timer_sync(&net->ipv4.rt_secret_timer);
5010 rt_cache_invalidate(net);
5011 - if (ip_rt_secret_interval) {
5012 - net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
5013 - add_timer(&net->ipv4.rt_secret_timer);
5014 - }
5015 + if (ip_rt_secret_interval)
5016 + mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
5017 }
5018
5019 static void rt_emergency_hash_rebuild(struct net *net)
5020 @@ -1417,7 +1415,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
5021 dev_hold(rt->u.dst.dev);
5022 if (rt->idev)
5023 in_dev_hold(rt->idev);
5024 - rt->u.dst.obsolete = 0;
5025 + rt->u.dst.obsolete = -1;
5026 rt->u.dst.lastuse = jiffies;
5027 rt->u.dst.path = &rt->u.dst;
5028 rt->u.dst.neighbour = NULL;
5029 @@ -1482,11 +1480,12 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
5030 struct dst_entry *ret = dst;
5031
5032 if (rt) {
5033 - if (dst->obsolete) {
5034 + if (dst->obsolete > 0) {
5035 ip_rt_put(rt);
5036 ret = NULL;
5037 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
5038 - rt->u.dst.expires) {
5039 + (rt->u.dst.expires &&
5040 + time_after_eq(jiffies, rt->u.dst.expires))) {
5041 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
5042 rt->fl.oif,
5043 rt_genid(dev_net(dst->dev)));
5044 @@ -1702,7 +1701,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
5045
5046 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
5047 {
5048 - return NULL;
5049 + if (rt_is_expired((struct rtable *)dst))
5050 + return NULL;
5051 + return dst;
5052 }
5053
5054 static void ipv4_dst_destroy(struct dst_entry *dst)
5055 @@ -1864,7 +1865,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
5056 if (!rth)
5057 goto e_nobufs;
5058
5059 - rth->u.dst.output= ip_rt_bug;
5060 + rth->u.dst.output = ip_rt_bug;
5061 + rth->u.dst.obsolete = -1;
5062
5063 atomic_set(&rth->u.dst.__refcnt, 1);
5064 rth->u.dst.flags= DST_HOST;
5065 @@ -2025,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
5066 rth->fl.oif = 0;
5067 rth->rt_spec_dst= spec_dst;
5068
5069 + rth->u.dst.obsolete = -1;
5070 rth->u.dst.input = ip_forward;
5071 rth->u.dst.output = ip_output;
5072 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
5073 @@ -2189,6 +2192,7 @@ local_input:
5074 goto e_nobufs;
5075
5076 rth->u.dst.output= ip_rt_bug;
5077 + rth->u.dst.obsolete = -1;
5078 rth->rt_genid = rt_genid(net);
5079
5080 atomic_set(&rth->u.dst.__refcnt, 1);
5081 @@ -2415,6 +2419,7 @@ static int __mkroute_output(struct rtable **result,
5082 rth->rt_spec_dst= fl->fl4_src;
5083
5084 rth->u.dst.output=ip_output;
5085 + rth->u.dst.obsolete = -1;
5086 rth->rt_genid = rt_genid(dev_net(dev_out));
5087
5088 RT_CACHE_STAT_INC(out_slow_tot);
5089 @@ -3072,22 +3077,20 @@ static void rt_secret_reschedule(int old)
5090 rtnl_lock();
5091 for_each_net(net) {
5092 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
5093 + long time;
5094
5095 if (!new)
5096 continue;
5097
5098 if (deleted) {
5099 - long time = net->ipv4.rt_secret_timer.expires - jiffies;
5100 + time = net->ipv4.rt_secret_timer.expires - jiffies;
5101
5102 if (time <= 0 || (time += diff) <= 0)
5103 time = 0;
5104 -
5105 - net->ipv4.rt_secret_timer.expires = time;
5106 } else
5107 - net->ipv4.rt_secret_timer.expires = new;
5108 + time = new;
5109
5110 - net->ipv4.rt_secret_timer.expires += jiffies;
5111 - add_timer(&net->ipv4.rt_secret_timer);
5112 + mod_timer(&net->ipv4.rt_secret_timer, jiffies + time);
5113 }
5114 rtnl_unlock();
5115 }
5116 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
5117 index b0a26bb..564a0f8 100644
5118 --- a/net/ipv4/tcp.c
5119 +++ b/net/ipv4/tcp.c
5120 @@ -429,7 +429,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
5121 if (tp->urg_seq == tp->copied_seq &&
5122 !sock_flag(sk, SOCK_URGINLINE) &&
5123 tp->urg_data)
5124 - target--;
5125 + target++;
5126
5127 /* Potential race condition. If read of tp below will
5128 * escape above sk->sk_state, we can be illegally awaken
5129 @@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk)
5130 tp->ucopy.memory = 0;
5131 }
5132
5133 +#ifdef CONFIG_NET_DMA
5134 +static void tcp_service_net_dma(struct sock *sk, bool wait)
5135 +{
5136 + dma_cookie_t done, used;
5137 + dma_cookie_t last_issued;
5138 + struct tcp_sock *tp = tcp_sk(sk);
5139 +
5140 + if (!tp->ucopy.dma_chan)
5141 + return;
5142 +
5143 + last_issued = tp->ucopy.dma_cookie;
5144 + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
5145 +
5146 + do {
5147 + if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
5148 + last_issued, &done,
5149 + &used) == DMA_SUCCESS) {
5150 + /* Safe to free early-copied skbs now */
5151 + __skb_queue_purge(&sk->sk_async_wait_queue);
5152 + break;
5153 + } else {
5154 + struct sk_buff *skb;
5155 + while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
5156 + (dma_async_is_complete(skb->dma_cookie, done,
5157 + used) == DMA_SUCCESS)) {
5158 + __skb_dequeue(&sk->sk_async_wait_queue);
5159 + kfree_skb(skb);
5160 + }
5161 + }
5162 + } while (wait);
5163 +}
5164 +#endif
5165 +
5166 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
5167 {
5168 struct sk_buff *skb;
5169 @@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
5170 /* __ Set realtime policy in scheduler __ */
5171 }
5172
5173 +#ifdef CONFIG_NET_DMA
5174 + if (tp->ucopy.dma_chan)
5175 + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
5176 +#endif
5177 if (copied >= target) {
5178 /* Do not sleep, just process backlog. */
5179 release_sock(sk);
5180 @@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
5181 sk_wait_data(sk, &timeo);
5182
5183 #ifdef CONFIG_NET_DMA
5184 + tcp_service_net_dma(sk, false); /* Don't block */
5185 tp->ucopy.wakeup = 0;
5186 #endif
5187
5188 @@ -1633,6 +1671,9 @@ do_prequeue:
5189 copied = -EFAULT;
5190 break;
5191 }
5192 +
5193 + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
5194 +
5195 if ((offset + used) == skb->len)
5196 copied_early = 1;
5197
5198 @@ -1702,27 +1743,9 @@ skip_copy:
5199 }
5200
5201 #ifdef CONFIG_NET_DMA
5202 - if (tp->ucopy.dma_chan) {
5203 - dma_cookie_t done, used;
5204 -
5205 - dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
5206 -
5207 - while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
5208 - tp->ucopy.dma_cookie, &done,
5209 - &used) == DMA_IN_PROGRESS) {
5210 - /* do partial cleanup of sk_async_wait_queue */
5211 - while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
5212 - (dma_async_is_complete(skb->dma_cookie, done,
5213 - used) == DMA_SUCCESS)) {
5214 - __skb_dequeue(&sk->sk_async_wait_queue);
5215 - kfree_skb(skb);
5216 - }
5217 - }
5218 + tcp_service_net_dma(sk, true); /* Wait for queue to drain */
5219 + tp->ucopy.dma_chan = NULL;
5220
5221 - /* Safe to free early-copied skbs now */
5222 - __skb_queue_purge(&sk->sk_async_wait_queue);
5223 - tp->ucopy.dma_chan = NULL;
5224 - }
5225 if (tp->ucopy.pinned_list) {
5226 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
5227 tp->ucopy.pinned_list = NULL;
5228 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5229 index 3fddc69..b347d3c 100644
5230 --- a/net/ipv4/tcp_input.c
5231 +++ b/net/ipv4/tcp_input.c
5232 @@ -2499,6 +2499,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
5233 int err;
5234 unsigned int mss;
5235
5236 + if (packets == 0)
5237 + return;
5238 +
5239 WARN_ON(packets > tp->packets_out);
5240 if (tp->lost_skb_hint) {
5241 skb = tp->lost_skb_hint;
5242 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
5243 index 65b8ebf..de935e3 100644
5244 --- a/net/ipv4/tcp_ipv4.c
5245 +++ b/net/ipv4/tcp_ipv4.c
5246 @@ -1677,8 +1677,10 @@ process:
5247 if (!tcp_prequeue(sk, skb))
5248 ret = tcp_v4_do_rcv(sk, skb);
5249 }
5250 - } else
5251 - sk_add_backlog(sk, skb);
5252 + } else if (sk_add_backlog(sk, skb)) {
5253 + bh_unlock_sock(sk);
5254 + goto discard_and_relse;
5255 + }
5256 bh_unlock_sock(sk);
5257
5258 sock_put(sk);
5259 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
5260 index f206ee5..4199bc6 100644
5261 --- a/net/ipv4/tcp_minisocks.c
5262 +++ b/net/ipv4/tcp_minisocks.c
5263 @@ -728,7 +728,7 @@ int tcp_child_process(struct sock *parent, struct sock *child,
5264 * in main socket hash table and lock on listening
5265 * socket does not protect us more.
5266 */
5267 - sk_add_backlog(child, skb);
5268 + __sk_add_backlog(child, skb);
5269 }
5270
5271 bh_unlock_sock(child);
5272 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5273 index 383ce23..dc26654 100644
5274 --- a/net/ipv4/tcp_output.c
5275 +++ b/net/ipv4/tcp_output.c
5276 @@ -2393,13 +2393,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
5277 struct tcp_extend_values *xvp = tcp_xv(rvp);
5278 struct inet_request_sock *ireq = inet_rsk(req);
5279 struct tcp_sock *tp = tcp_sk(sk);
5280 + const struct tcp_cookie_values *cvp = tp->cookie_values;
5281 struct tcphdr *th;
5282 struct sk_buff *skb;
5283 struct tcp_md5sig_key *md5;
5284 int tcp_header_size;
5285 int mss;
5286 + int s_data_desired = 0;
5287
5288 - skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
5289 + if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
5290 + s_data_desired = cvp->s_data_desired;
5291 + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
5292 if (skb == NULL)
5293 return NULL;
5294
5295 @@ -2454,16 +2458,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
5296 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
5297
5298 if (OPTION_COOKIE_EXTENSION & opts.options) {
5299 - const struct tcp_cookie_values *cvp = tp->cookie_values;
5300 -
5301 - if (cvp != NULL &&
5302 - cvp->s_data_constant &&
5303 - cvp->s_data_desired > 0) {
5304 - u8 *buf = skb_put(skb, cvp->s_data_desired);
5305 + if (s_data_desired) {
5306 + u8 *buf = skb_put(skb, s_data_desired);
5307
5308 /* copy data directly from the listening socket. */
5309 - memcpy(buf, cvp->s_data_payload, cvp->s_data_desired);
5310 - TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired;
5311 + memcpy(buf, cvp->s_data_payload, s_data_desired);
5312 + TCP_SKB_CB(skb)->end_seq += s_data_desired;
5313 }
5314
5315 if (opts.hash_size > 0) {
5316 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5317 index f0126fd..112c611 100644
5318 --- a/net/ipv4/udp.c
5319 +++ b/net/ipv4/udp.c
5320 @@ -1372,8 +1372,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
5321 bh_lock_sock(sk);
5322 if (!sock_owned_by_user(sk))
5323 rc = __udp_queue_rcv_skb(sk, skb);
5324 - else
5325 - sk_add_backlog(sk, skb);
5326 + else if (sk_add_backlog(sk, skb)) {
5327 + bh_unlock_sock(sk);
5328 + goto drop;
5329 + }
5330 bh_unlock_sock(sk);
5331
5332 return rc;
5333 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
5334 index 67107d6..e4a1483 100644
5335 --- a/net/ipv4/xfrm4_policy.c
5336 +++ b/net/ipv4/xfrm4_policy.c
5337 @@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
5338 return 0;
5339 }
5340
5341 -static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
5342 +static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
5343 + struct flowi *fl)
5344 {
5345 struct rtable *rt = (struct rtable *)xdst->route;
5346
5347 - xdst->u.rt.fl = rt->fl;
5348 + xdst->u.rt.fl = *fl;
5349
5350 xdst->u.dst.dev = dev;
5351 dev_hold(dev);
5352 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
5353 index c2bd74c..6232284 100644
5354 --- a/net/ipv6/route.c
5355 +++ b/net/ipv6/route.c
5356 @@ -897,12 +897,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
5357 struct rt6_info *rt = (struct rt6_info *) dst;
5358
5359 if (rt) {
5360 - if (rt->rt6i_flags & RTF_CACHE)
5361 - ip6_del_rt(rt);
5362 - else
5363 + if (rt->rt6i_flags & RTF_CACHE) {
5364 + if (rt6_check_expired(rt)) {
5365 + ip6_del_rt(rt);
5366 + dst = NULL;
5367 + }
5368 + } else {
5369 dst_release(dst);
5370 + dst = NULL;
5371 + }
5372 }
5373 - return NULL;
5374 + return dst;
5375 }
5376
5377 static void ip6_link_failure(struct sk_buff *skb)
5378 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
5379 index febfd59..548a06e 100644
5380 --- a/net/ipv6/tcp_ipv6.c
5381 +++ b/net/ipv6/tcp_ipv6.c
5382 @@ -1732,8 +1732,10 @@ process:
5383 if (!tcp_prequeue(sk, skb))
5384 ret = tcp_v6_do_rcv(sk, skb);
5385 }
5386 - } else
5387 - sk_add_backlog(sk, skb);
5388 + } else if (sk_add_backlog(sk, skb)) {
5389 + bh_unlock_sock(sk);
5390 + goto discard_and_relse;
5391 + }
5392 bh_unlock_sock(sk);
5393
5394 sock_put(sk);
5395 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5396 index 69ebdbe..d9714d2 100644
5397 --- a/net/ipv6/udp.c
5398 +++ b/net/ipv6/udp.c
5399 @@ -584,16 +584,20 @@ static void flush_stack(struct sock **stack, unsigned int count,
5400 bh_lock_sock(sk);
5401 if (!sock_owned_by_user(sk))
5402 udpv6_queue_rcv_skb(sk, skb1);
5403 - else
5404 - sk_add_backlog(sk, skb1);
5405 + else if (sk_add_backlog(sk, skb1)) {
5406 + kfree_skb(skb1);
5407 + bh_unlock_sock(sk);
5408 + goto drop;
5409 + }
5410 bh_unlock_sock(sk);
5411 - } else {
5412 - atomic_inc(&sk->sk_drops);
5413 - UDP6_INC_STATS_BH(sock_net(sk),
5414 - UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
5415 - UDP6_INC_STATS_BH(sock_net(sk),
5416 - UDP_MIB_INERRORS, IS_UDPLITE(sk));
5417 + continue;
5418 }
5419 +drop:
5420 + atomic_inc(&sk->sk_drops);
5421 + UDP6_INC_STATS_BH(sock_net(sk),
5422 + UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
5423 + UDP6_INC_STATS_BH(sock_net(sk),
5424 + UDP_MIB_INERRORS, IS_UDPLITE(sk));
5425 }
5426 }
5427 /*
5428 @@ -756,8 +760,12 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
5429 bh_lock_sock(sk);
5430 if (!sock_owned_by_user(sk))
5431 udpv6_queue_rcv_skb(sk, skb);
5432 - else
5433 - sk_add_backlog(sk, skb);
5434 + else if (sk_add_backlog(sk, skb)) {
5435 + atomic_inc(&sk->sk_drops);
5436 + bh_unlock_sock(sk);
5437 + sock_put(sk);
5438 + goto discard;
5439 + }
5440 bh_unlock_sock(sk);
5441 sock_put(sk);
5442 return 0;
5443 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
5444 index dbdc696..ae18165 100644
5445 --- a/net/ipv6/xfrm6_policy.c
5446 +++ b/net/ipv6/xfrm6_policy.c
5447 @@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
5448 return 0;
5449 }
5450
5451 -static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
5452 +static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
5453 + struct flowi *fl)
5454 {
5455 struct rt6_info *rt = (struct rt6_info*)xdst->route;
5456
5457 diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
5458 index 019c780..86d6985 100644
5459 --- a/net/llc/llc_c_ac.c
5460 +++ b/net/llc/llc_c_ac.c
5461 @@ -1437,7 +1437,7 @@ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
5462 llc_conn_state_process(sk, skb);
5463 else {
5464 llc_set_backlog_type(skb, LLC_EVENT);
5465 - sk_add_backlog(sk, skb);
5466 + __sk_add_backlog(sk, skb);
5467 }
5468 }
5469 }
5470 diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
5471 index c6bab39..c61ca88 100644
5472 --- a/net/llc/llc_conn.c
5473 +++ b/net/llc/llc_conn.c
5474 @@ -756,7 +756,8 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
5475 else {
5476 dprintk("%s: adding to backlog...\n", __func__);
5477 llc_set_backlog_type(skb, LLC_PACKET);
5478 - sk_add_backlog(sk, skb);
5479 + if (sk_add_backlog(sk, skb))
5480 + goto drop_unlock;
5481 }
5482 out:
5483 bh_unlock_sock(sk);
5484 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
5485 index 91dc863..3521c17 100644
5486 --- a/net/mac80211/ieee80211_i.h
5487 +++ b/net/mac80211/ieee80211_i.h
5488 @@ -264,6 +264,7 @@ enum ieee80211_sta_flags {
5489 IEEE80211_STA_DISABLE_11N = BIT(4),
5490 IEEE80211_STA_CSA_RECEIVED = BIT(5),
5491 IEEE80211_STA_MFP_ENABLED = BIT(6),
5492 + IEEE80211_STA_NULLFUNC_ACKED = BIT(7),
5493 };
5494
5495 /* flags for MLME request */
5496 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
5497 index 05a18f4..1a209ac 100644
5498 --- a/net/mac80211/mlme.c
5499 +++ b/net/mac80211/mlme.c
5500 @@ -205,7 +205,8 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
5501 sta = sta_info_get(local, bssid);
5502 if (sta)
5503 rate_control_rate_update(local, sband, sta,
5504 - IEEE80211_RC_HT_CHANGED);
5505 + IEEE80211_RC_HT_CHANGED,
5506 + local->oper_channel_type);
5507 rcu_read_unlock();
5508 }
5509
5510 @@ -661,8 +662,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
5511 } else {
5512 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
5513 ieee80211_send_nullfunc(local, sdata, 1);
5514 - conf->flags |= IEEE80211_CONF_PS;
5515 - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5516 +
5517 + if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
5518 + conf->flags |= IEEE80211_CONF_PS;
5519 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5520 + }
5521 }
5522 }
5523
5524 @@ -753,6 +757,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
5525 container_of(work, struct ieee80211_local,
5526 dynamic_ps_enable_work);
5527 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
5528 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
5529
5530 /* can only happen when PS was just disabled anyway */
5531 if (!sdata)
5532 @@ -761,11 +766,16 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
5533 if (local->hw.conf.flags & IEEE80211_CONF_PS)
5534 return;
5535
5536 - if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
5537 + if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
5538 + (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
5539 ieee80211_send_nullfunc(local, sdata, 1);
5540
5541 - local->hw.conf.flags |= IEEE80211_CONF_PS;
5542 - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5543 + if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
5544 + (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
5545 + ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
5546 + local->hw.conf.flags |= IEEE80211_CONF_PS;
5547 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5548 + }
5549 }
5550
5551 void ieee80211_dynamic_ps_timer(unsigned long data)
5552 @@ -2467,6 +2477,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
5553 list_add(&wk->list, &ifmgd->work_list);
5554
5555 ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
5556 + ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
5557
5558 for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
5559 if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
5560 diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
5561 index cb9bd1f..3e02ea4 100644
5562 --- a/net/mac80211/rate.h
5563 +++ b/net/mac80211/rate.h
5564 @@ -69,7 +69,8 @@ static inline void rate_control_rate_init(struct sta_info *sta)
5565
5566 static inline void rate_control_rate_update(struct ieee80211_local *local,
5567 struct ieee80211_supported_band *sband,
5568 - struct sta_info *sta, u32 changed)
5569 + struct sta_info *sta, u32 changed,
5570 + enum nl80211_channel_type oper_chan_type)
5571 {
5572 struct rate_control_ref *ref = local->rate_ctrl;
5573 struct ieee80211_sta *ista = &sta->sta;
5574 @@ -77,7 +78,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
5575
5576 if (ref && ref->ops->rate_update)
5577 ref->ops->rate_update(ref->priv, sband, ista,
5578 - priv_sta, changed);
5579 + priv_sta, changed, oper_chan_type);
5580 }
5581
5582 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
5583 diff --git a/net/mac80211/status.c b/net/mac80211/status.c
5584 index d78f36c..f5abeec 100644
5585 --- a/net/mac80211/status.c
5586 +++ b/net/mac80211/status.c
5587 @@ -165,6 +165,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
5588 rcu_read_lock();
5589
5590 sband = local->hw.wiphy->bands[info->band];
5591 + fc = hdr->frame_control;
5592
5593 sta = sta_info_get(local, hdr->addr1);
5594
5595 @@ -180,8 +181,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
5596 return;
5597 }
5598
5599 - fc = hdr->frame_control;
5600 -
5601 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
5602 (ieee80211_is_data_qos(fc))) {
5603 u16 tid, ssn;
5604 @@ -246,6 +245,20 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
5605 local->dot11FailedCount++;
5606 }
5607
5608 + if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
5609 + (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
5610 + !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
5611 + local->ps_sdata && !(local->scanning)) {
5612 + if (info->flags & IEEE80211_TX_STAT_ACK) {
5613 + local->ps_sdata->u.mgd.flags |=
5614 + IEEE80211_STA_NULLFUNC_ACKED;
5615 + ieee80211_queue_work(&local->hw,
5616 + &local->dynamic_ps_enable_work);
5617 + } else
5618 + mod_timer(&local->dynamic_ps_timer, jiffies +
5619 + msecs_to_jiffies(10));
5620 + }
5621 +
5622 /* this was a transmitted frame, but now we want to reuse it */
5623 skb_orphan(skb);
5624
5625 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
5626 index 0ffe689..eeac97f 100644
5627 --- a/net/netfilter/nf_conntrack_netlink.c
5628 +++ b/net/netfilter/nf_conntrack_netlink.c
5629 @@ -571,7 +571,8 @@ nla_put_failure:
5630 nlmsg_failure:
5631 kfree_skb(skb);
5632 errout:
5633 - nfnetlink_set_err(0, group, -ENOBUFS);
5634 + if (nfnetlink_set_err(0, group, -ENOBUFS) > 0)
5635 + return -ENOBUFS;
5636 return 0;
5637 }
5638 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
5639 diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
5640 index eedc0c1..35fe185 100644
5641 --- a/net/netfilter/nfnetlink.c
5642 +++ b/net/netfilter/nfnetlink.c
5643 @@ -114,9 +114,9 @@ int nfnetlink_send(struct sk_buff *skb, u32 pid,
5644 }
5645 EXPORT_SYMBOL_GPL(nfnetlink_send);
5646
5647 -void nfnetlink_set_err(u32 pid, u32 group, int error)
5648 +int nfnetlink_set_err(u32 pid, u32 group, int error)
5649 {
5650 - netlink_set_err(nfnl, pid, group, error);
5651 + return netlink_set_err(nfnl, pid, group, error);
5652 }
5653 EXPORT_SYMBOL_GPL(nfnetlink_set_err);
5654
5655 diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
5656 index 43e83a4..e460bf9 100644
5657 --- a/net/netfilter/xt_recent.c
5658 +++ b/net/netfilter/xt_recent.c
5659 @@ -260,7 +260,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
5660 for (i = 0; i < e->nstamps; i++) {
5661 if (info->seconds && time_after(time, e->stamps[i]))
5662 continue;
5663 - if (info->hit_count && ++hits >= info->hit_count) {
5664 + if (!info->hit_count || ++hits >= info->hit_count) {
5665 ret = !ret;
5666 break;
5667 }
5668 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
5669 index 4c5972b..0052d3c 100644
5670 --- a/net/netlink/af_netlink.c
5671 +++ b/net/netlink/af_netlink.c
5672 @@ -1093,6 +1093,7 @@ static inline int do_one_set_err(struct sock *sk,
5673 struct netlink_set_err_data *p)
5674 {
5675 struct netlink_sock *nlk = nlk_sk(sk);
5676 + int ret = 0;
5677
5678 if (sk == p->exclude_sk)
5679 goto out;
5680 @@ -1104,10 +1105,15 @@ static inline int do_one_set_err(struct sock *sk,
5681 !test_bit(p->group - 1, nlk->groups))
5682 goto out;
5683
5684 + if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
5685 + ret = 1;
5686 + goto out;
5687 + }
5688 +
5689 sk->sk_err = p->code;
5690 sk->sk_error_report(sk);
5691 out:
5692 - return 0;
5693 + return ret;
5694 }
5695
5696 /**
5697 @@ -1116,12 +1122,16 @@ out:
5698 * @pid: the PID of a process that we want to skip (if any)
5699 * @groups: the broadcast group that will notice the error
5700 * @code: error code, must be negative (as usual in kernelspace)
5701 + *
5702 + * This function returns the number of broadcast listeners that have set the
5703 + * NETLINK_RECV_NO_ENOBUFS socket option.
5704 */
5705 -void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
5706 +int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
5707 {
5708 struct netlink_set_err_data info;
5709 struct hlist_node *node;
5710 struct sock *sk;
5711 + int ret = 0;
5712
5713 info.exclude_sk = ssk;
5714 info.pid = pid;
5715 @@ -1132,9 +1142,10 @@ void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
5716 read_lock(&nl_table_lock);
5717
5718 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
5719 - do_one_set_err(sk, &info);
5720 + ret += do_one_set_err(sk, &info);
5721
5722 read_unlock(&nl_table_lock);
5723 + return ret;
5724 }
5725 EXPORT_SYMBOL(netlink_set_err);
5726
5727 diff --git a/net/sctp/input.c b/net/sctp/input.c
5728 index c0c973e..3d74b26 100644
5729 --- a/net/sctp/input.c
5730 +++ b/net/sctp/input.c
5731 @@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
5732 const union sctp_addr *peer,
5733 struct sctp_transport **pt);
5734
5735 -static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
5736 +static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
5737
5738
5739 /* Calculate the SCTP checksum of an SCTP packet. */
5740 @@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
5741 }
5742
5743 if (sock_owned_by_user(sk)) {
5744 + if (sctp_add_backlog(sk, skb)) {
5745 + sctp_bh_unlock_sock(sk);
5746 + sctp_chunk_free(chunk);
5747 + skb = NULL; /* sctp_chunk_free already freed the skb */
5748 + goto discard_release;
5749 + }
5750 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
5751 - sctp_add_backlog(sk, skb);
5752 } else {
5753 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
5754 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
5755 @@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
5756 sctp_bh_lock_sock(sk);
5757
5758 if (sock_owned_by_user(sk)) {
5759 - sk_add_backlog(sk, skb);
5760 - backloged = 1;
5761 + if (sk_add_backlog(sk, skb))
5762 + sctp_chunk_free(chunk);
5763 + else
5764 + backloged = 1;
5765 } else
5766 sctp_inq_push(inqueue, chunk);
5767
5768 @@ -362,22 +369,27 @@ done:
5769 return 0;
5770 }
5771
5772 -static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
5773 +static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
5774 {
5775 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
5776 struct sctp_ep_common *rcvr = chunk->rcvr;
5777 + int ret;
5778
5779 - /* Hold the assoc/ep while hanging on the backlog queue.
5780 - * This way, we know structures we need will not disappear from us
5781 - */
5782 - if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
5783 - sctp_association_hold(sctp_assoc(rcvr));
5784 - else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
5785 - sctp_endpoint_hold(sctp_ep(rcvr));
5786 - else
5787 - BUG();
5788 + ret = sk_add_backlog(sk, skb);
5789 + if (!ret) {
5790 + /* Hold the assoc/ep while hanging on the backlog queue.
5791 + * This way, we know structures we need will not disappear
5792 + * from us
5793 + */
5794 + if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
5795 + sctp_association_hold(sctp_assoc(rcvr));
5796 + else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
5797 + sctp_endpoint_hold(sctp_ep(rcvr));
5798 + else
5799 + BUG();
5800 + }
5801 + return ret;
5802
5803 - sk_add_backlog(sk, skb);
5804 }
5805
5806 /* Handle icmp frag needed error. */
5807 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
5808 index 67fdac9..9bd9d82 100644
5809 --- a/net/sctp/socket.c
5810 +++ b/net/sctp/socket.c
5811 @@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
5812 SCTP_DBG_OBJCNT_INC(sock);
5813 percpu_counter_inc(&sctp_sockets_allocated);
5814
5815 + /* Set socket backlog limit. */
5816 + sk->sk_backlog.limit = sysctl_sctp_rmem[1];
5817 +
5818 local_bh_disable();
5819 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
5820 local_bh_enable();
5821 diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
5822 index f7a7f83..50346a6 100644
5823 --- a/net/sunrpc/auth_gss/auth_gss.c
5824 +++ b/net/sunrpc/auth_gss/auth_gss.c
5825 @@ -1273,9 +1273,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
5826 rqstp->rq_release_snd_buf = priv_release_snd_buf;
5827 return 0;
5828 out_free:
5829 - for (i--; i >= 0; i--) {
5830 - __free_page(rqstp->rq_enc_pages[i]);
5831 - }
5832 + rqstp->rq_enc_pages_num = i;
5833 + priv_release_snd_buf(rqstp);
5834 out:
5835 return -EAGAIN;
5836 }
5837 diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
5838 index 49278f8..27a2378 100644
5839 --- a/net/sunrpc/rpc_pipe.c
5840 +++ b/net/sunrpc/rpc_pipe.c
5841 @@ -587,6 +587,8 @@ static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
5842 struct dentry *dentry;
5843
5844 dentry = __rpc_lookup_create(parent, name);
5845 + if (IS_ERR(dentry))
5846 + return dentry;
5847 if (dentry->d_inode == NULL)
5848 return dentry;
5849 dput(dentry);
5850 diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
5851 index 4f30336..6bd41a9 100644
5852 --- a/net/sunrpc/svc_xprt.c
5853 +++ b/net/sunrpc/svc_xprt.c
5854 @@ -699,8 +699,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
5855 spin_unlock_bh(&pool->sp_lock);
5856
5857 len = 0;
5858 - if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
5859 - !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
5860 + if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
5861 + dprintk("svc_recv: found XPT_CLOSE\n");
5862 + svc_delete_xprt(xprt);
5863 + } else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
5864 struct svc_xprt *newxpt;
5865 newxpt = xprt->xpt_ops->xpo_accept(xprt);
5866 if (newxpt) {
5867 @@ -726,7 +728,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
5868 svc_xprt_received(newxpt);
5869 }
5870 svc_xprt_received(xprt);
5871 - } else if (!test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
5872 + } else {
5873 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
5874 rqstp, pool->sp_id, xprt,
5875 atomic_read(&xprt->xpt_ref.refcount));
5876 @@ -739,11 +741,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
5877 dprintk("svc: got len=%d\n", len);
5878 }
5879
5880 - if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
5881 - dprintk("svc_recv: found XPT_CLOSE\n");
5882 - svc_delete_xprt(xprt);
5883 - }
5884 -
5885 /* No data, incomplete (TCP) read, or accept() */
5886 if (len == 0 || len == -EAGAIN) {
5887 rqstp->rq_res.len = 0;
5888 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
5889 index 870929e..528efef 100644
5890 --- a/net/sunrpc/svcsock.c
5891 +++ b/net/sunrpc/svcsock.c
5892 @@ -968,6 +968,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
5893 return len;
5894 err_delete:
5895 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
5896 + svc_xprt_received(&svsk->sk_xprt);
5897 err_again:
5898 return -EAGAIN;
5899 }
5900 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
5901 index 1ea64f0..4b235fc 100644
5902 --- a/net/tipc/socket.c
5903 +++ b/net/tipc/socket.c
5904 @@ -1322,8 +1322,10 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
5905 if (!sock_owned_by_user(sk)) {
5906 res = filter_rcv(sk, buf);
5907 } else {
5908 - sk_add_backlog(sk, buf);
5909 - res = TIPC_OK;
5910 + if (sk_add_backlog(sk, buf))
5911 + res = TIPC_ERR_OVERLOAD;
5912 + else
5913 + res = TIPC_OK;
5914 }
5915 bh_unlock_sock(sk);
5916
5917 diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
5918 index 3e1efe5..52e3042 100644
5919 --- a/net/x25/x25_dev.c
5920 +++ b/net/x25/x25_dev.c
5921 @@ -53,7 +53,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
5922 if (!sock_owned_by_user(sk)) {
5923 queued = x25_process_rx_frame(sk, skb);
5924 } else {
5925 - sk_add_backlog(sk, skb);
5926 + queued = !sk_add_backlog(sk, skb);
5927 }
5928 bh_unlock_sock(sk);
5929 sock_put(sk);
5930 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
5931 index 0ecb16a..f12dd3d 100644
5932 --- a/net/xfrm/xfrm_policy.c
5933 +++ b/net/xfrm/xfrm_policy.c
5934 @@ -1354,7 +1354,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
5935 return err;
5936 }
5937
5938 -static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
5939 +static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
5940 + struct flowi *fl)
5941 {
5942 struct xfrm_policy_afinfo *afinfo =
5943 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
5944 @@ -1363,7 +1364,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
5945 if (!afinfo)
5946 return -EINVAL;
5947
5948 - err = afinfo->fill_dst(xdst, dev);
5949 + err = afinfo->fill_dst(xdst, dev, fl);
5950
5951 xfrm_policy_put_afinfo(afinfo);
5952
5953 @@ -1468,7 +1469,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
5954 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
5955 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
5956
5957 - err = xfrm_fill_dst(xdst, dev);
5958 + err = xfrm_fill_dst(xdst, dev, fl);
5959 if (err)
5960 goto free_dst;
5961
5962 diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
5963 index d9266ba..4e5f2f7 100644
5964 --- a/sound/pci/ac97/ac97_patch.c
5965 +++ b/sound/pci/ac97/ac97_patch.c
5966 @@ -1867,12 +1867,14 @@ static unsigned int ad1981_jacks_blacklist[] = {
5967 0x10140523, /* Thinkpad R40 */
5968 0x10140534, /* Thinkpad X31 */
5969 0x10140537, /* Thinkpad T41p */
5970 + 0x1014053e, /* Thinkpad R40e */
5971 0x10140554, /* Thinkpad T42p/R50p */
5972 0x10140567, /* Thinkpad T43p 2668-G7U */
5973 0x10140581, /* Thinkpad X41-2527 */
5974 0x10280160, /* Dell Dimension 2400 */
5975 0x104380b0, /* Asus A7V8X-MX */
5976 0x11790241, /* Toshiba Satellite A-15 S127 */
5977 + 0x1179ff10, /* Toshiba P500 */
5978 0x144dc01a, /* Samsung NP-X20C004/SEG */
5979 0 /* end */
5980 };
5981 diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
5982 index a312bae..bbaec22 100644
5983 --- a/sound/pci/cmipci.c
5984 +++ b/sound/pci/cmipci.c
5985 @@ -941,13 +941,21 @@ static snd_pcm_uframes_t snd_cmipci_pcm_pointer(struct cmipci *cm, struct cmipci
5986 struct snd_pcm_substream *substream)
5987 {
5988 size_t ptr;
5989 - unsigned int reg;
5990 + unsigned int reg, rem, tries;
5991 +
5992 if (!rec->running)
5993 return 0;
5994 #if 1 // this seems better..
5995 reg = rec->ch ? CM_REG_CH1_FRAME2 : CM_REG_CH0_FRAME2;
5996 - ptr = rec->dma_size - (snd_cmipci_read_w(cm, reg) + 1);
5997 - ptr >>= rec->shift;
5998 + for (tries = 0; tries < 3; tries++) {
5999 + rem = snd_cmipci_read_w(cm, reg);
6000 + if (rem < rec->dma_size)
6001 + goto ok;
6002 + }
6003 + printk(KERN_ERR "cmipci: invalid PCM pointer: %#x\n", rem);
6004 + return SNDRV_PCM_POS_XRUN;
6005 +ok:
6006 + ptr = (rec->dma_size - (rem + 1)) >> rec->shift;
6007 #else
6008 reg = rec->ch ? CM_REG_CH1_FRAME1 : CM_REG_CH0_FRAME1;
6009 ptr = snd_cmipci_read(cm, reg) - rec->offset;
6010 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6011 index 6d6e307..9ace8eb 100644
6012 --- a/sound/pci/hda/hda_intel.c
6013 +++ b/sound/pci/hda/hda_intel.c
6014 @@ -2265,8 +2265,10 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
6015 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
6016 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
6017 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
6018 + SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
6019 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
6020 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
6021 + SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
6022 {}
6023 };
6024
6025 @@ -2354,6 +2356,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
6026 static struct snd_pci_quirk msi_black_list[] __devinitdata = {
6027 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
6028 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
6029 + SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
6030 {}
6031 };
6032
6033 @@ -2372,6 +2375,13 @@ static void __devinit check_msi(struct azx *chip)
6034 "hda_intel: msi for device %04x:%04x set to %d\n",
6035 q->subvendor, q->subdevice, q->value);
6036 chip->msi = q->value;
6037 + return;
6038 + }
6039 +
6040 + /* NVidia chipsets seem to cause troubles with MSI */
6041 + if (chip->driver_type == AZX_DRIVER_NVIDIA) {
6042 + printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
6043 + chip->msi = 0;
6044 }
6045 }
6046
6047 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6048 index c578c28..71b7a96 100644
6049 --- a/sound/pci/hda/patch_conexant.c
6050 +++ b/sound/pci/hda/patch_conexant.c
6051 @@ -1570,6 +1570,21 @@ static int patch_cxt5047(struct hda_codec *codec)
6052 #endif
6053 }
6054 spec->vmaster_nid = 0x13;
6055 +
6056 + switch (codec->subsystem_id >> 16) {
6057 + case 0x103c:
6058 + /* HP laptops have really bad sound over 0 dB on NID 0x10.
6059 + * Fix max PCM level to 0 dB (originally it has 0x1e steps
6060 + * with 0 dB offset 0x17)
6061 + */
6062 + snd_hda_override_amp_caps(codec, 0x10, HDA_INPUT,
6063 + (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
6064 + (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
6065 + (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
6066 + (1 << AC_AMPCAP_MUTE_SHIFT));
6067 + break;
6068 + }
6069 +
6070 return 0;
6071 }
6072
6073 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6074 index da34095..a79f841 100644
6075 --- a/sound/pci/hda/patch_realtek.c
6076 +++ b/sound/pci/hda/patch_realtek.c
6077 @@ -407,6 +407,8 @@ static int alc_mux_enum_info(struct snd_kcontrol *kcontrol,
6078 unsigned int mux_idx = snd_ctl_get_ioffidx(kcontrol, &uinfo->id);
6079 if (mux_idx >= spec->num_mux_defs)
6080 mux_idx = 0;
6081 + if (!spec->input_mux[mux_idx].num_items && mux_idx > 0)
6082 + mux_idx = 0;
6083 return snd_hda_input_mux_info(&spec->input_mux[mux_idx], uinfo);
6084 }
6085
6086 @@ -435,6 +437,8 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol,
6087
6088 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
6089 imux = &spec->input_mux[mux_idx];
6090 + if (!imux->num_items && mux_idx > 0)
6091 + imux = &spec->input_mux[0];
6092
6093 type = get_wcaps_type(get_wcaps(codec, nid));
6094 if (type == AC_WID_AUD_MIX) {
6095 @@ -6380,7 +6384,7 @@ static struct alc_config_preset alc260_presets[] = {
6096 .num_dacs = ARRAY_SIZE(alc260_dac_nids),
6097 .dac_nids = alc260_dac_nids,
6098 .num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids),
6099 - .adc_nids = alc260_adc_nids,
6100 + .adc_nids = alc260_dual_adc_nids,
6101 .num_channel_mode = ARRAY_SIZE(alc260_modes),
6102 .channel_mode = alc260_modes,
6103 .input_mux = &alc260_capture_source,
6104 @@ -9097,7 +9101,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
6105 SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
6106 SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
6107 SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
6108 - SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
6109 + SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC882_6ST_DIG),
6110
6111 {}
6112 };
6113 @@ -9941,6 +9945,8 @@ static void alc882_auto_init_input_src(struct hda_codec *codec)
6114 continue;
6115 mux_idx = c >= spec->num_mux_defs ? 0 : c;
6116 imux = &spec->input_mux[mux_idx];
6117 + if (!imux->num_items && mux_idx > 0)
6118 + imux = &spec->input_mux[0];
6119 for (idx = 0; idx < conns; idx++) {
6120 /* if the current connection is the selected one,
6121 * unmute it as default - otherwise mute it
6122 diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
6123 index bdd3b7e..bd498d4 100644
6124 --- a/tools/perf/Documentation/Makefile
6125 +++ b/tools/perf/Documentation/Makefile
6126 @@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
6127 DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
6128 DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
6129
6130 +# Make the path relative to DESTDIR, not prefix
6131 +ifndef DESTDIR
6132 prefix?=$(HOME)
6133 +endif
6134 bindir?=$(prefix)/bin
6135 htmldir?=$(prefix)/share/doc/perf-doc
6136 pdfdir?=$(prefix)/share/doc/perf-doc
6137 @@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man
6138 man1dir=$(mandir)/man1
6139 man5dir=$(mandir)/man5
6140 man7dir=$(mandir)/man7
6141 -# DESTDIR=
6142
6143 ASCIIDOC=asciidoc
6144 ASCIIDOC_EXTRA = --unsafe
6145 diff --git a/tools/perf/Makefile b/tools/perf/Makefile
6146 index 2e7fa3a..03eb7c9 100644
6147 --- a/tools/perf/Makefile
6148 +++ b/tools/perf/Makefile
6149 @@ -216,7 +216,10 @@ STRIP ?= strip
6150 # runtime figures out where they are based on the path to the executable.
6151 # This can help installing the suite in a relocatable way.
6152
6153 +# Make the path relative to DESTDIR, not to prefix
6154 +ifndef DESTDIR
6155 prefix = $(HOME)
6156 +endif
6157 bindir_relative = bin
6158 bindir = $(prefix)/$(bindir_relative)
6159 mandir = share/man
6160 @@ -233,7 +236,6 @@ sysconfdir = $(prefix)/etc
6161 ETC_PERFCONFIG = etc/perfconfig
6162 endif
6163 lib = lib
6164 -# DESTDIR=
6165
6166 export prefix bindir sharedir sysconfdir
6167
6168 diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
6169 index 593ff25..0b1ba36 100644
6170 --- a/tools/perf/builtin-annotate.c
6171 +++ b/tools/perf/builtin-annotate.c
6172 @@ -53,32 +53,20 @@ struct sym_priv {
6173
6174 static const char *sym_hist_filter;
6175
6176 -static int symbol_filter(struct map *map __used, struct symbol *sym)
6177 +static int sym__alloc_hist(struct symbol *self)
6178 {
6179 - if (sym_hist_filter == NULL ||
6180 - strcmp(sym->name, sym_hist_filter) == 0) {
6181 - struct sym_priv *priv = symbol__priv(sym);
6182 - const int size = (sizeof(*priv->hist) +
6183 - (sym->end - sym->start) * sizeof(u64));
6184 + struct sym_priv *priv = symbol__priv(self);
6185 + const int size = (sizeof(*priv->hist) +
6186 + (self->end - self->start) * sizeof(u64));
6187
6188 - priv->hist = malloc(size);
6189 - if (priv->hist)
6190 - memset(priv->hist, 0, size);
6191 - return 0;
6192 - }
6193 - /*
6194 - * FIXME: We should really filter it out, as we don't want to go thru symbols
6195 - * we're not interested, and if a DSO ends up with no symbols, delete it too,
6196 - * but right now the kernel loading routines in symbol.c bail out if no symbols
6197 - * are found, fix it later.
6198 - */
6199 - return 0;
6200 + priv->hist = zalloc(size);
6201 + return priv->hist == NULL ? -1 : 0;
6202 }
6203
6204 /*
6205 * collect histogram counts
6206 */
6207 -static void hist_hit(struct hist_entry *he, u64 ip)
6208 +static int annotate__hist_hit(struct hist_entry *he, u64 ip)
6209 {
6210 unsigned int sym_size, offset;
6211 struct symbol *sym = he->sym;
6212 @@ -88,11 +76,11 @@ static void hist_hit(struct hist_entry *he, u64 ip)
6213 he->count++;
6214
6215 if (!sym || !he->map)
6216 - return;
6217 + return 0;
6218
6219 priv = symbol__priv(sym);
6220 - if (!priv->hist)
6221 - return;
6222 + if (priv->hist == NULL && sym__alloc_hist(sym) < 0)
6223 + return -ENOMEM;
6224
6225 sym_size = sym->end - sym->start;
6226 offset = ip - sym->start;
6227 @@ -102,7 +90,7 @@ static void hist_hit(struct hist_entry *he, u64 ip)
6228 he->map->unmap_ip(he->map, ip));
6229
6230 if (offset >= sym_size)
6231 - return;
6232 + return 0;
6233
6234 h = priv->hist;
6235 h->sum++;
6236 @@ -114,18 +102,31 @@ static void hist_hit(struct hist_entry *he, u64 ip)
6237 he->sym->name,
6238 (void *)(unsigned long)ip, ip - he->sym->start,
6239 h->ip[offset]);
6240 + return 0;
6241 }
6242
6243 static int perf_session__add_hist_entry(struct perf_session *self,
6244 struct addr_location *al, u64 count)
6245 {
6246 - bool hit;
6247 - struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL,
6248 - count, &hit);
6249 - if (he == NULL)
6250 - return -ENOMEM;
6251 - hist_hit(he, al->addr);
6252 - return 0;
6253 + bool hit;
6254 + struct hist_entry *he;
6255 +
6256 + if (sym_hist_filter != NULL &&
6257 + (al->sym == NULL || strcmp(sym_hist_filter, al->sym->name) != 0)) {
6258 + /* We're only interested in a symbol named sym_hist_filter */
6259 + if (al->sym != NULL) {
6260 + rb_erase(&al->sym->rb_node,
6261 + &al->map->dso->symbols[al->map->type]);
6262 + symbol__delete(al->sym);
6263 + }
6264 + return 0;
6265 + }
6266 +
6267 + he = __perf_session__add_hist_entry(self, al, NULL, count, &hit);
6268 + if (he == NULL)
6269 + return -ENOMEM;
6270 +
6271 + return annotate__hist_hit(he, al->addr);
6272 }
6273
6274 static int process_sample_event(event_t *event, struct perf_session *session)
6275 @@ -135,7 +136,7 @@ static int process_sample_event(event_t *event, struct perf_session *session)
6276 dump_printf("(IP, %d): %d: %p\n", event->header.misc,
6277 event->ip.pid, (void *)(long)event->ip.ip);
6278
6279 - if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) {
6280 + if (event__preprocess_sample(event, session, &al, NULL) < 0) {
6281 fprintf(stderr, "problem processing %d event, skipping it.\n",
6282 event->header.type);
6283 return -1;
6284 diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
6285 index c1e6774..fa626eb 100644
6286 --- a/tools/perf/builtin-probe.c
6287 +++ b/tools/perf/builtin-probe.c
6288 @@ -48,7 +48,6 @@
6289 #include "util/probe-event.h"
6290
6291 #define MAX_PATH_LEN 256
6292 -#define MAX_PROBES 128
6293
6294 /* Session management structure */
6295 static struct {
6296 diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
6297 index 4b852c0..7f81ded 100644
6298 --- a/tools/perf/util/probe-finder.c
6299 +++ b/tools/perf/util/probe-finder.c
6300 @@ -544,6 +544,9 @@ static void show_probepoint(Dwarf_Die sp_die, Dwarf_Signed offs,
6301 }
6302 free_current_frame_base(pf);
6303
6304 + if (pp->found == MAX_PROBES)
6305 + die("Too many( > %d) probe point found.\n", MAX_PROBES);
6306 +
6307 pp->probes[pp->found] = strdup(tmp);
6308 pp->found++;
6309 }
6310 diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
6311 index 72547b9..fcb8919 100644
6312 --- a/tools/perf/util/symbol.c
6313 +++ b/tools/perf/util/symbol.c
6314 @@ -149,7 +149,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name)
6315 return self;
6316 }
6317
6318 -static void symbol__delete(struct symbol *self)
6319 +void symbol__delete(struct symbol *self)
6320 {
6321 free(((void *)self) - symbol_conf.priv_size);
6322 }
6323 diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
6324 index 8aded23..400227a 100644
6325 --- a/tools/perf/util/symbol.h
6326 +++ b/tools/perf/util/symbol.h
6327 @@ -49,6 +49,8 @@ struct symbol {
6328 char name[0];
6329 };
6330
6331 +void symbol__delete(struct symbol *self);
6332 +
6333 struct strlist;
6334
6335 struct symbol_conf {