Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.29-r3/0101-2.6.29.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 778 - (hide annotations) (download)
Sat May 2 11:55:17 2009 UTC (15 years ago) by niro
File size: 143777 byte(s)
-2.6.29-magellan-r3:
-updated to linux-2.6.29.2

1 niro 778 diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
2     index 5ede747..0876275 100644
3     --- a/Documentation/networking/bonding.txt
4     +++ b/Documentation/networking/bonding.txt
5     @@ -1242,7 +1242,7 @@ monitoring is enabled, and vice-versa.
6     To add ARP targets:
7     # echo +192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
8     # echo +192.168.0.101 > /sys/class/net/bond0/bonding/arp_ip_target
9     - NOTE: up to 10 target addresses may be specified.
10     + NOTE: up to 16 target addresses may be specified.
11    
12     To remove an ARP target:
13     # echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
14     diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
15     index f833a0b..0a2d6b8 100644
16     --- a/arch/ia64/kvm/Kconfig
17     +++ b/arch/ia64/kvm/Kconfig
18     @@ -4,6 +4,10 @@
19     config HAVE_KVM
20     bool
21    
22     +config HAVE_KVM_IRQCHIP
23     + bool
24     + default y
25     +
26     menuconfig VIRTUALIZATION
27     bool "Virtualization"
28     depends on HAVE_KVM || IA64
29     diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
30     index 1a86f84..5abcc7f 100644
31     --- a/arch/mips/kernel/linux32.c
32     +++ b/arch/mips/kernel/linux32.c
33     @@ -134,9 +134,9 @@ SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
34     return sys_ftruncate(fd, merge_64(a2, a3));
35     }
36    
37     -SYSCALL_DEFINE5(32_llseek, unsigned long, fd, unsigned long, offset_high,
38     - unsigned long, offset_low, loff_t __user *, result,
39     - unsigned long, origin)
40     +SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
41     + unsigned int, offset_low, loff_t __user *, result,
42     + unsigned int, origin)
43     {
44     return sys_llseek(fd, offset_high, offset_low, result, origin);
45     }
46     diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
47     index 6d406c5..9696cc3 100644
48     --- a/arch/powerpc/include/asm/futex.h
49     +++ b/arch/powerpc/include/asm/futex.h
50     @@ -27,7 +27,7 @@
51     PPC_LONG "1b,4b,2b,4b\n" \
52     ".previous" \
53     : "=&r" (oldval), "=&r" (ret) \
54     - : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
55     + : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
56     : "cr0", "memory")
57    
58     static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
59     @@ -47,19 +47,19 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
60    
61     switch (op) {
62     case FUTEX_OP_SET:
63     - __futex_atomic_op("", ret, oldval, uaddr, oparg);
64     + __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
65     break;
66     case FUTEX_OP_ADD:
67     - __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
68     + __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
69     break;
70     case FUTEX_OP_OR:
71     - __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
72     + __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
73     break;
74     case FUTEX_OP_ANDN:
75     - __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
76     + __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
77     break;
78     case FUTEX_OP_XOR:
79     - __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
80     + __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
81     break;
82     default:
83     ret = -ENOSYS;
84     diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
85     index 6dbdc48..03becdf 100644
86     --- a/arch/powerpc/kvm/Kconfig
87     +++ b/arch/powerpc/kvm/Kconfig
88     @@ -2,6 +2,9 @@
89     # KVM configuration
90     #
91    
92     +config HAVE_KVM_IRQCHIP
93     + bool
94     +
95     menuconfig VIRTUALIZATION
96     bool "Virtualization"
97     ---help---
98     diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
99     index e051cad..3e260b7 100644
100     --- a/arch/s390/kvm/Kconfig
101     +++ b/arch/s390/kvm/Kconfig
102     @@ -4,6 +4,9 @@
103     config HAVE_KVM
104     bool
105    
106     +config HAVE_KVM_IRQCHIP
107     + bool
108     +
109     menuconfig VIRTUALIZATION
110     bool "Virtualization"
111     default y
112     diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
113     index 0aaa086..ee38e73 100644
114     --- a/arch/sparc/include/asm/tlb_64.h
115     +++ b/arch/sparc/include/asm/tlb_64.h
116     @@ -57,9 +57,9 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned i
117    
118     static inline void tlb_flush_mmu(struct mmu_gather *mp)
119     {
120     + if (!mp->fullmm)
121     + flush_tlb_pending();
122     if (mp->need_flush) {
123     - if (!mp->fullmm)
124     - flush_tlb_pending();
125     free_pages_and_swap_cache(mp->pages, mp->pages_nr);
126     mp->pages_nr = 0;
127     mp->need_flush = 0;
128     diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
129     index c98d52e..6ed3aca 100644
130     --- a/arch/x86/Kconfig.cpu
131     +++ b/arch/x86/Kconfig.cpu
132     @@ -523,6 +523,7 @@ config X86_PTRACE_BTS
133     bool "Branch Trace Store"
134     default y
135     depends on X86_DEBUGCTLMSR
136     + depends on BROKEN
137     help
138     This adds a ptrace interface to the hardware's branch trace store.
139    
140     diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
141     index 8c3c25f..a99dbbe 100644
142     --- a/arch/x86/boot/memory.c
143     +++ b/arch/x86/boot/memory.c
144     @@ -27,13 +27,14 @@ static int detect_memory_e820(void)
145     do {
146     size = sizeof(struct e820entry);
147    
148     - /* Important: %edx is clobbered by some BIOSes,
149     - so it must be either used for the error output
150     + /* Important: %edx and %esi are clobbered by some BIOSes,
151     + so they must be either used for the error output
152     or explicitly marked clobbered. */
153     asm("int $0x15; setc %0"
154     : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
155     "=m" (*desc)
156     - : "D" (desc), "d" (SMAP), "a" (0xe820));
157     + : "D" (desc), "d" (SMAP), "a" (0xe820)
158     + : "esi");
159    
160     /* BIOSes which terminate the chain with CF = 1 as opposed
161     to %ebx = 0 don't always report the SMAP signature on
162     diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
163     index 4b1c319..89c676d 100644
164     --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
165     +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
166     @@ -680,6 +680,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
167     perf->states[i].transition_latency * 1000;
168     }
169    
170     + /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
171     + if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
172     + policy->cpuinfo.transition_latency > 20 * 1000) {
173     + static int print_once;
174     + policy->cpuinfo.transition_latency = 20 * 1000;
175     + if (!print_once) {
176     + print_once = 1;
177     + printk(KERN_INFO "Capping off P-state tranision latency"
178     + " at 20 uS\n");
179     + }
180     + }
181     +
182     data->max_freq = perf->states[0].core_frequency * 1000;
183     /* table init */
184     for (i=0; i<perf->state_count; i++) {
185     diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
186     index bc7ac4d..7086b24 100644
187     --- a/arch/x86/kernel/io_apic.c
188     +++ b/arch/x86/kernel/io_apic.c
189     @@ -2475,6 +2475,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
190     me = smp_processor_id();
191     for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
192     unsigned int irq;
193     + unsigned int irr;
194     struct irq_desc *desc;
195     struct irq_cfg *cfg;
196     irq = __get_cpu_var(vector_irq)[vector];
197     @@ -2494,6 +2495,18 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
198     if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
199     goto unlock;
200    
201     + irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
202     + /*
203     + * Check if the vector that needs to be cleanedup is
204     + * registered at the cpu's IRR. If so, then this is not
205     + * the best time to clean it up. Lets clean it up in the
206     + * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
207     + * to myself.
208     + */
209     + if (irr & (1 << (vector % 32))) {
210     + send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
211     + goto unlock;
212     + }
213     __get_cpu_var(vector_irq)[vector] = -1;
214     cfg->move_cleanup_count--;
215     unlock:
216     diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
217     index b81125f..0a303c3 100644
218     --- a/arch/x86/kvm/Kconfig
219     +++ b/arch/x86/kvm/Kconfig
220     @@ -4,6 +4,10 @@
221     config HAVE_KVM
222     bool
223    
224     +config HAVE_KVM_IRQCHIP
225     + bool
226     + default y
227     +
228     menuconfig VIRTUALIZATION
229     bool "Virtualization"
230     depends on HAVE_KVM || X86
231     diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
232     index 72bd275..3dceaef 100644
233     --- a/arch/x86/kvm/i8254.c
234     +++ b/arch/x86/kvm/i8254.c
235     @@ -536,6 +536,16 @@ void kvm_pit_reset(struct kvm_pit *pit)
236     pit->pit_state.irq_ack = 1;
237     }
238    
239     +static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
240     +{
241     + struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
242     +
243     + if (!mask) {
244     + atomic_set(&pit->pit_state.pit_timer.pending, 0);
245     + pit->pit_state.irq_ack = 1;
246     + }
247     +}
248     +
249     struct kvm_pit *kvm_create_pit(struct kvm *kvm)
250     {
251     struct kvm_pit *pit;
252     @@ -584,6 +594,9 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
253    
254     kvm_pit_reset(pit);
255    
256     + pit->mask_notifier.func = pit_mask_notifer;
257     + kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
258     +
259     return pit;
260     }
261    
262     @@ -592,6 +605,8 @@ void kvm_free_pit(struct kvm *kvm)
263     struct hrtimer *timer;
264    
265     if (kvm->arch.vpit) {
266     + kvm_unregister_irq_mask_notifier(kvm, 0,
267     + &kvm->arch.vpit->mask_notifier);
268     mutex_lock(&kvm->arch.vpit->pit_state.lock);
269     timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
270     hrtimer_cancel(timer);
271     diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
272     index 4178022..0dfb936 100644
273     --- a/arch/x86/kvm/i8254.h
274     +++ b/arch/x86/kvm/i8254.h
275     @@ -45,6 +45,7 @@ struct kvm_pit {
276     struct kvm *kvm;
277     struct kvm_kpit_state pit_state;
278     int irq_source_id;
279     + struct kvm_irq_mask_notifier mask_notifier;
280     };
281    
282     #define KVM_PIT_BASE_ADDRESS 0x40
283     diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
284     index 258e5d5..eaab214 100644
285     --- a/arch/x86/kvm/mmu.h
286     +++ b/arch/x86/kvm/mmu.h
287     @@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
288     static inline int is_long_mode(struct kvm_vcpu *vcpu)
289     {
290     #ifdef CONFIG_X86_64
291     - return vcpu->arch.shadow_efer & EFER_LME;
292     + return vcpu->arch.shadow_efer & EFER_LMA;
293     #else
294     return 0;
295     #endif
296     diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
297     index c95a67d..89addbd 100644
298     --- a/arch/x86/kvm/paging_tmpl.h
299     +++ b/arch/x86/kvm/paging_tmpl.h
300     @@ -476,16 +476,20 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
301     if (level == PT_PAGE_TABLE_LEVEL ||
302     ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
303     struct kvm_mmu_page *sp = page_header(__pa(sptep));
304     + int need_flush = 0;
305    
306     sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
307     sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
308    
309     if (is_shadow_present_pte(*sptep)) {
310     + need_flush = 1;
311     rmap_remove(vcpu->kvm, sptep);
312     if (is_large_pte(*sptep))
313     --vcpu->kvm->stat.lpages;
314     }
315     set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
316     + if (need_flush)
317     + kvm_flush_remote_tlbs(vcpu->kvm);
318     return 1;
319     }
320     if (!is_shadow_present_pte(*sptep))
321     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
322     index 90de444..898910c 100644
323     --- a/arch/x86/kvm/vmx.c
324     +++ b/arch/x86/kvm/vmx.c
325     @@ -1433,6 +1433,29 @@ continue_rmode:
326     init_rmode(vcpu->kvm);
327     }
328    
329     +static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
330     +{
331     + struct vcpu_vmx *vmx = to_vmx(vcpu);
332     + struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
333     +
334     + vcpu->arch.shadow_efer = efer;
335     + if (!msr)
336     + return;
337     + if (efer & EFER_LMA) {
338     + vmcs_write32(VM_ENTRY_CONTROLS,
339     + vmcs_read32(VM_ENTRY_CONTROLS) |
340     + VM_ENTRY_IA32E_MODE);
341     + msr->data = efer;
342     + } else {
343     + vmcs_write32(VM_ENTRY_CONTROLS,
344     + vmcs_read32(VM_ENTRY_CONTROLS) &
345     + ~VM_ENTRY_IA32E_MODE);
346     +
347     + msr->data = efer & ~EFER_LME;
348     + }
349     + setup_msrs(vmx);
350     +}
351     +
352     #ifdef CONFIG_X86_64
353    
354     static void enter_lmode(struct kvm_vcpu *vcpu)
355     @@ -1447,13 +1470,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
356     (guest_tr_ar & ~AR_TYPE_MASK)
357     | AR_TYPE_BUSY_64_TSS);
358     }
359     -
360     vcpu->arch.shadow_efer |= EFER_LMA;
361     -
362     - find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
363     - vmcs_write32(VM_ENTRY_CONTROLS,
364     - vmcs_read32(VM_ENTRY_CONTROLS)
365     - | VM_ENTRY_IA32E_MODE);
366     + vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
367     }
368    
369     static void exit_lmode(struct kvm_vcpu *vcpu)
370     @@ -1612,30 +1630,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
371     vmcs_writel(GUEST_CR4, hw_cr4);
372     }
373    
374     -static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
375     -{
376     - struct vcpu_vmx *vmx = to_vmx(vcpu);
377     - struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
378     -
379     - vcpu->arch.shadow_efer = efer;
380     - if (!msr)
381     - return;
382     - if (efer & EFER_LMA) {
383     - vmcs_write32(VM_ENTRY_CONTROLS,
384     - vmcs_read32(VM_ENTRY_CONTROLS) |
385     - VM_ENTRY_IA32E_MODE);
386     - msr->data = efer;
387     -
388     - } else {
389     - vmcs_write32(VM_ENTRY_CONTROLS,
390     - vmcs_read32(VM_ENTRY_CONTROLS) &
391     - ~VM_ENTRY_IA32E_MODE);
392     -
393     - msr->data = efer & ~EFER_LME;
394     - }
395     - setup_msrs(vmx);
396     -}
397     -
398     static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
399     {
400     struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
401     diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
402     index 21bc1f7..441489c 100644
403     --- a/arch/x86/mm/pat.c
404     +++ b/arch/x86/mm/pat.c
405     @@ -713,29 +713,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
406     *
407     * If the vma has a linear pfn mapping for the entire range, we get the prot
408     * from pte and reserve the entire vma range with single reserve_pfn_range call.
409     - * Otherwise, we reserve the entire vma range, my ging through the PTEs page
410     - * by page to get physical address and protection.
411     */
412     int track_pfn_vma_copy(struct vm_area_struct *vma)
413     {
414     - int retval = 0;
415     - unsigned long i, j;
416     resource_size_t paddr;
417     unsigned long prot;
418     - unsigned long vma_start = vma->vm_start;
419     - unsigned long vma_end = vma->vm_end;
420     - unsigned long vma_size = vma_end - vma_start;
421     + unsigned long vma_size = vma->vm_end - vma->vm_start;
422     pgprot_t pgprot;
423    
424     if (!pat_enabled)
425     return 0;
426    
427     + /*
428     + * For now, only handle remap_pfn_range() vmas where
429     + * is_linear_pfn_mapping() == TRUE. Handling of
430     + * vm_insert_pfn() is TBD.
431     + */
432     if (is_linear_pfn_mapping(vma)) {
433     /*
434     * reserve the whole chunk covered by vma. We need the
435     * starting address and protection from pte.
436     */
437     - if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
438     + if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
439     WARN_ON_ONCE(1);
440     return -EINVAL;
441     }
442     @@ -743,28 +742,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
443     return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
444     }
445    
446     - /* reserve entire vma page by page, using pfn and prot from pte */
447     - for (i = 0; i < vma_size; i += PAGE_SIZE) {
448     - if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
449     - continue;
450     -
451     - pgprot = __pgprot(prot);
452     - retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
453     - if (retval)
454     - goto cleanup_ret;
455     - }
456     return 0;
457     -
458     -cleanup_ret:
459     - /* Reserve error: Cleanup partial reservation and return error */
460     - for (j = 0; j < i; j += PAGE_SIZE) {
461     - if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
462     - continue;
463     -
464     - free_pfn_range(paddr, PAGE_SIZE);
465     - }
466     -
467     - return retval;
468     }
469    
470     /*
471     @@ -774,50 +752,28 @@ cleanup_ret:
472     * prot is passed in as a parameter for the new mapping. If the vma has a
473     * linear pfn mapping for the entire range reserve the entire vma range with
474     * single reserve_pfn_range call.
475     - * Otherwise, we look t the pfn and size and reserve only the specified range
476     - * page by page.
477     - *
478     - * Note that this function can be called with caller trying to map only a
479     - * subrange/page inside the vma.
480     */
481     int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
482     unsigned long pfn, unsigned long size)
483     {
484     - int retval = 0;
485     - unsigned long i, j;
486     - resource_size_t base_paddr;
487     resource_size_t paddr;
488     - unsigned long vma_start = vma->vm_start;
489     - unsigned long vma_end = vma->vm_end;
490     - unsigned long vma_size = vma_end - vma_start;
491     + unsigned long vma_size = vma->vm_end - vma->vm_start;
492    
493     if (!pat_enabled)
494     return 0;
495    
496     + /*
497     + * For now, only handle remap_pfn_range() vmas where
498     + * is_linear_pfn_mapping() == TRUE. Handling of
499     + * vm_insert_pfn() is TBD.
500     + */
501     if (is_linear_pfn_mapping(vma)) {
502     /* reserve the whole chunk starting from vm_pgoff */
503     paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
504     return reserve_pfn_range(paddr, vma_size, prot, 0);
505     }
506    
507     - /* reserve page by page using pfn and size */
508     - base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
509     - for (i = 0; i < size; i += PAGE_SIZE) {
510     - paddr = base_paddr + i;
511     - retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
512     - if (retval)
513     - goto cleanup_ret;
514     - }
515     return 0;
516     -
517     -cleanup_ret:
518     - /* Reserve error: Cleanup partial reservation and return error */
519     - for (j = 0; j < i; j += PAGE_SIZE) {
520     - paddr = base_paddr + j;
521     - free_pfn_range(paddr, PAGE_SIZE);
522     - }
523     -
524     - return retval;
525     }
526    
527     /*
528     @@ -828,39 +784,23 @@ cleanup_ret:
529     void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
530     unsigned long size)
531     {
532     - unsigned long i;
533     resource_size_t paddr;
534     - unsigned long prot;
535     - unsigned long vma_start = vma->vm_start;
536     - unsigned long vma_end = vma->vm_end;
537     - unsigned long vma_size = vma_end - vma_start;
538     + unsigned long vma_size = vma->vm_end - vma->vm_start;
539    
540     if (!pat_enabled)
541     return;
542    
543     + /*
544     + * For now, only handle remap_pfn_range() vmas where
545     + * is_linear_pfn_mapping() == TRUE. Handling of
546     + * vm_insert_pfn() is TBD.
547     + */
548     if (is_linear_pfn_mapping(vma)) {
549     /* free the whole chunk starting from vm_pgoff */
550     paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
551     free_pfn_range(paddr, vma_size);
552     return;
553     }
554     -
555     - if (size != 0 && size != vma_size) {
556     - /* free page by page, using pfn and size */
557     - paddr = (resource_size_t)pfn << PAGE_SHIFT;
558     - for (i = 0; i < size; i += PAGE_SIZE) {
559     - paddr = paddr + i;
560     - free_pfn_range(paddr, PAGE_SIZE);
561     - }
562     - } else {
563     - /* free entire vma, page by page, using the pfn from pte */
564     - for (i = 0; i < vma_size; i += PAGE_SIZE) {
565     - if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
566     - continue;
567     -
568     - free_pfn_range(paddr, PAGE_SIZE);
569     - }
570     - }
571     }
572    
573     pgprot_t pgprot_writecombine(pgprot_t prot)
574     diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
575     index 7d388d5..096b0ed 100644
576     --- a/arch/x86/pci/fixup.c
577     +++ b/arch/x86/pci/fixup.c
578     @@ -495,26 +495,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
579     pci_siemens_interrupt_controller);
580    
581     /*
582     - * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have
583     - * 4096 bytes configuration space for each function of their processor
584     - * configuration space.
585     - */
586     -static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev)
587     -{
588     - dev->cfg_size = pci_cfg_space_size_ext(dev);
589     -}
590     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size);
591     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size);
592     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size);
593     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size);
594     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size);
595     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size);
596     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size);
597     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size);
598     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size);
599     -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size);
600     -
601     -/*
602     * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
603     * confusing the PCI engine:
604     */
605     diff --git a/crypto/shash.c b/crypto/shash.c
606     index d5a2b61..6792a67 100644
607     --- a/crypto/shash.c
608     +++ b/crypto/shash.c
609     @@ -82,6 +82,9 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
610     u8 buf[shash_align_buffer_size(unaligned_len, alignmask)]
611     __attribute__ ((aligned));
612    
613     + if (unaligned_len > len)
614     + unaligned_len = len;
615     +
616     memcpy(buf, data, unaligned_len);
617    
618     return shash->update(desc, buf, unaligned_len) ?:
619     diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
620     index 35094f2..8f62fa0 100644
621     --- a/drivers/acpi/dock.c
622     +++ b/drivers/acpi/dock.c
623     @@ -1146,9 +1146,10 @@ static int __init dock_init(void)
624     static void __exit dock_exit(void)
625     {
626     struct dock_station *dock_station;
627     + struct dock_station *tmp;
628    
629     unregister_acpi_bus_notifier(&dock_acpi_notifier);
630     - list_for_each_entry(dock_station, &dock_stations, sibiling)
631     + list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling)
632     dock_remove(dock_station);
633     }
634    
635     diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
636     index 4216399..233a5fd 100644
637     --- a/drivers/ata/pata_hpt37x.c
638     +++ b/drivers/ata/pata_hpt37x.c
639     @@ -8,7 +8,7 @@
640     * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
641     * Portions Copyright (C) 2001 Sun Microsystems, Inc.
642     * Portions Copyright (C) 2003 Red Hat Inc
643     - * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
644     + * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
645     *
646     * TODO
647     * Look into engine reset on timeout errors. Should not be required.
648     @@ -24,7 +24,7 @@
649     #include <linux/libata.h>
650    
651     #define DRV_NAME "pata_hpt37x"
652     -#define DRV_VERSION "0.6.11"
653     +#define DRV_VERSION "0.6.12"
654    
655     struct hpt_clock {
656     u8 xfer_speed;
657     @@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
658     }
659    
660     /**
661     - * hpt370_bmdma_start - DMA engine begin
662     - * @qc: ATA command
663     - *
664     - * The 370 and 370A want us to reset the DMA engine each time we
665     - * use it. The 372 and later are fine.
666     - */
667     -
668     -static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
669     -{
670     - struct ata_port *ap = qc->ap;
671     - struct pci_dev *pdev = to_pci_dev(ap->host->dev);
672     - pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
673     - udelay(10);
674     - ata_bmdma_start(qc);
675     -}
676     -
677     -/**
678     * hpt370_bmdma_end - DMA engine stop
679     * @qc: ATA command
680     *
681     @@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_sht = {
682     static struct ata_port_operations hpt370_port_ops = {
683     .inherits = &ata_bmdma_port_ops,
684    
685     - .bmdma_start = hpt370_bmdma_start,
686     .bmdma_stop = hpt370_bmdma_stop,
687    
688     .mode_filter = hpt370_filter,
689     diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
690     index 10d6cbd..2224b76 100644
691     --- a/drivers/char/agp/generic.c
692     +++ b/drivers/char/agp/generic.c
693     @@ -1226,7 +1226,7 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m
694     int i, ret = -ENOMEM;
695    
696     for (i = 0; i < num_pages; i++) {
697     - page = alloc_page(GFP_KERNEL | GFP_DMA32);
698     + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
699     /* agp_free_memory() needs gart address */
700     if (page == NULL)
701     goto out;
702     @@ -1257,7 +1257,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
703     {
704     struct page * page;
705    
706     - page = alloc_page(GFP_KERNEL | GFP_DMA32);
707     + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
708     if (page == NULL)
709     return NULL;
710    
711     diff --git a/drivers/char/vt.c b/drivers/char/vt.c
712     index 7900bd6..60453ab 100644
713     --- a/drivers/char/vt.c
714     +++ b/drivers/char/vt.c
715     @@ -2271,7 +2271,7 @@ rescan_last_byte:
716     continue; /* nothing to display */
717     }
718     /* Glyph not found */
719     - if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
720     + if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
721     /* In legacy mode use the glyph we get by a 1:1 mapping.
722     This would make absolutely no sense with Unicode in mind,
723     but do this for ASCII characters since a font may lack
724     diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
725     index 88d3368..7ee1ce1 100644
726     --- a/drivers/gpu/drm/drm_gem.c
727     +++ b/drivers/gpu/drm/drm_gem.c
728     @@ -505,7 +505,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
729     struct drm_map *map = NULL;
730     struct drm_gem_object *obj;
731     struct drm_hash_item *hash;
732     - unsigned long prot;
733     int ret = 0;
734    
735     mutex_lock(&dev->struct_mutex);
736     @@ -538,11 +537,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
737     vma->vm_ops = obj->dev->driver->gem_vm_ops;
738     vma->vm_private_data = map->handle;
739     /* FIXME: use pgprot_writecombine when available */
740     - prot = pgprot_val(vma->vm_page_prot);
741     -#ifdef CONFIG_X86
742     - prot |= _PAGE_CACHE_WC;
743     -#endif
744     - vma->vm_page_prot = __pgprot(prot);
745     + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
746    
747     /* Take a ref for this mapping of the object, so that the fault
748     * handler can dereference the mmap offset's pointer to the object.
749     diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
750     index 6d21b9e..908d24e 100644
751     --- a/drivers/gpu/drm/i915/i915_dma.c
752     +++ b/drivers/gpu/drm/i915/i915_dma.c
753     @@ -41,7 +41,6 @@
754     int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
755     {
756     drm_i915_private_t *dev_priv = dev->dev_private;
757     - struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
758     drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
759     u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
760     u32 last_acthd = I915_READ(acthd_reg);
761     @@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
762     if (ring->space >= n)
763     return 0;
764    
765     - if (master_priv->sarea_priv)
766     - master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
767     + if (dev->primary->master) {
768     + struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
769     + if (master_priv->sarea_priv)
770     + master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
771     + }
772     +
773    
774     if (ring->head != last_head)
775     i = 0;
776     diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
777     index 37427e4..fb6390a 100644
778     --- a/drivers/gpu/drm/i915/i915_gem.c
779     +++ b/drivers/gpu/drm/i915/i915_gem.c
780     @@ -603,6 +603,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
781     case -EAGAIN:
782     return VM_FAULT_OOM;
783     case -EFAULT:
784     + case -EINVAL:
785     return VM_FAULT_SIGBUS;
786     default:
787     return VM_FAULT_NOPAGE;
788     diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
789     index 7fb4191..4cce1ae 100644
790     --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
791     +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
792     @@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
793     */
794     swizzle_x = I915_BIT_6_SWIZZLE_NONE;
795     swizzle_y = I915_BIT_6_SWIZZLE_NONE;
796     - } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
797     - IS_GM45(dev)) {
798     + } else if (IS_MOBILE(dev)) {
799     uint32_t dcc;
800    
801     - /* On 915-945 and GM965, channel interleave by the CPU is
802     - * determined by DCC. The CPU will alternate based on bit 6
803     - * in interleaved mode, and the GPU will then also alternate
804     - * on bit 6, 9, and 10 for X, but the CPU may also optionally
805     - * alternate based on bit 17 (XOR not disabled and XOR
806     - * bit == 17).
807     + /* On mobile 9xx chipsets, channel interleave by the CPU is
808     + * determined by DCC. For single-channel, neither the CPU
809     + * nor the GPU do swizzling. For dual channel interleaved,
810     + * the GPU's interleave is bit 9 and 10 for X tiled, and bit
811     + * 9 for Y tiled. The CPU's interleave is independent, and
812     + * can be based on either bit 11 (haven't seen this yet) or
813     + * bit 17 (common).
814     */
815     dcc = I915_READ(DCC);
816     switch (dcc & DCC_ADDRESSING_MODE_MASK) {
817     @@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
818     swizzle_y = I915_BIT_6_SWIZZLE_NONE;
819     break;
820     case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
821     - if (IS_I915G(dev) || IS_I915GM(dev) ||
822     - dcc & DCC_CHANNEL_XOR_DISABLE) {
823     + if (dcc & DCC_CHANNEL_XOR_DISABLE) {
824     + /* This is the base swizzling by the GPU for
825     + * tiled buffers.
826     + */
827     swizzle_x = I915_BIT_6_SWIZZLE_9_10;
828     swizzle_y = I915_BIT_6_SWIZZLE_9;
829     - } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
830     - (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
831     - /* GM965/GM45 does either bit 11 or bit 17
832     - * swizzling.
833     - */
834     + } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
835     + /* Bit 11 swizzling by the CPU in addition. */
836     swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
837     swizzle_y = I915_BIT_6_SWIZZLE_9_11;
838     } else {
839     - /* Bit 17 or perhaps other swizzling */
840     + /* Bit 17 swizzling by the CPU in addition. */
841     swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
842     swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
843     }
844     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
845     index 90600d8..cc2938d 100644
846     --- a/drivers/gpu/drm/i915/i915_reg.h
847     +++ b/drivers/gpu/drm/i915/i915_reg.h
848     @@ -629,6 +629,22 @@
849     #define TV_HOTPLUG_INT_EN (1 << 18)
850     #define CRT_HOTPLUG_INT_EN (1 << 9)
851     #define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
852     +#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
853     +/* must use period 64 on GM45 according to docs */
854     +#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
855     +#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
856     +#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
857     +#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
858     +#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
859     +#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
860     +#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
861     +#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
862     +#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
863     +#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
864     +#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
865     +#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
866     +#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
867     +
868    
869     #define PORT_HOTPLUG_STAT 0x61114
870     #define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
871     diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
872     index dcaed34..61c108e 100644
873     --- a/drivers/gpu/drm/i915/intel_crt.c
874     +++ b/drivers/gpu/drm/i915/intel_crt.c
875     @@ -133,20 +133,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
876     {
877     struct drm_device *dev = connector->dev;
878     struct drm_i915_private *dev_priv = dev->dev_private;
879     - u32 temp;
880     -
881     - unsigned long timeout = jiffies + msecs_to_jiffies(1000);
882     -
883     - temp = I915_READ(PORT_HOTPLUG_EN);
884     -
885     - I915_WRITE(PORT_HOTPLUG_EN,
886     - temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
887     + u32 hotplug_en;
888     + int i, tries = 0;
889     + /*
890     + * On 4 series desktop, CRT detect sequence need to be done twice
891     + * to get a reliable result.
892     + */
893    
894     - do {
895     - if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
896     - break;
897     - msleep(1);
898     - } while (time_after(timeout, jiffies));
899     + if (IS_G4X(dev) && !IS_GM45(dev))
900     + tries = 2;
901     + else
902     + tries = 1;
903     + hotplug_en = I915_READ(PORT_HOTPLUG_EN);
904     + hotplug_en &= ~(CRT_HOTPLUG_MASK);
905     + hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
906     +
907     + if (IS_GM45(dev))
908     + hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
909     +
910     + hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
911     +
912     + for (i = 0; i < tries ; i++) {
913     + unsigned long timeout;
914     + /* turn on the FORCE_DETECT */
915     + I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
916     + timeout = jiffies + msecs_to_jiffies(1000);
917     + /* wait for FORCE_DETECT to go off */
918     + do {
919     + if (!(I915_READ(PORT_HOTPLUG_EN) &
920     + CRT_HOTPLUG_FORCE_DETECT))
921     + break;
922     + msleep(1);
923     + } while (time_after(timeout, jiffies));
924     + }
925    
926     if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
927     CRT_HOTPLUG_MONITOR_COLOR)
928     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
929     index a283427..601a76f 100644
930     --- a/drivers/gpu/drm/i915/intel_display.c
931     +++ b/drivers/gpu/drm/i915/intel_display.c
932     @@ -1474,13 +1474,21 @@ static void intel_setup_outputs(struct drm_device *dev)
933    
934     if (IS_I9XX(dev)) {
935     int found;
936     + u32 reg;
937    
938     if (I915_READ(SDVOB) & SDVO_DETECTED) {
939     found = intel_sdvo_init(dev, SDVOB);
940     if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
941     intel_hdmi_init(dev, SDVOB);
942     }
943     - if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
944     +
945     + /* Before G4X SDVOC doesn't have its own detect register */
946     + if (IS_G4X(dev))
947     + reg = SDVOC;
948     + else
949     + reg = SDVOB;
950     +
951     + if (I915_READ(reg) & SDVO_DETECTED) {
952     found = intel_sdvo_init(dev, SDVOC);
953     if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
954     intel_hdmi_init(dev, SDVOC);
955     diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
956     index 56485d6..b05cb67 100644
957     --- a/drivers/gpu/drm/i915/intel_tv.c
958     +++ b/drivers/gpu/drm/i915/intel_tv.c
959     @@ -1558,33 +1558,49 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
960     struct drm_device *dev = connector->dev;
961     struct intel_output *intel_output = to_intel_output(connector);
962     struct intel_tv_priv *tv_priv = intel_output->dev_priv;
963     + struct drm_encoder *encoder = &intel_output->enc;
964     + struct drm_crtc *crtc = encoder->crtc;
965     int ret = 0;
966     + bool changed = false;
967    
968     ret = drm_connector_property_set_value(connector, property, val);
969     if (ret < 0)
970     goto out;
971    
972     - if (property == dev->mode_config.tv_left_margin_property)
973     + if (property == dev->mode_config.tv_left_margin_property &&
974     + tv_priv->margin[TV_MARGIN_LEFT] != val) {
975     tv_priv->margin[TV_MARGIN_LEFT] = val;
976     - else if (property == dev->mode_config.tv_right_margin_property)
977     + changed = true;
978     + } else if (property == dev->mode_config.tv_right_margin_property &&
979     + tv_priv->margin[TV_MARGIN_RIGHT] != val) {
980     tv_priv->margin[TV_MARGIN_RIGHT] = val;
981     - else if (property == dev->mode_config.tv_top_margin_property)
982     + changed = true;
983     + } else if (property == dev->mode_config.tv_top_margin_property &&
984     + tv_priv->margin[TV_MARGIN_TOP] != val) {
985     tv_priv->margin[TV_MARGIN_TOP] = val;
986     - else if (property == dev->mode_config.tv_bottom_margin_property)
987     + changed = true;
988     + } else if (property == dev->mode_config.tv_bottom_margin_property &&
989     + tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
990     tv_priv->margin[TV_MARGIN_BOTTOM] = val;
991     - else if (property == dev->mode_config.tv_mode_property) {
992     + changed = true;
993     + } else if (property == dev->mode_config.tv_mode_property) {
994     if (val >= NUM_TV_MODES) {
995     ret = -EINVAL;
996     goto out;
997     }
998     + if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
999     + goto out;
1000     +
1001     tv_priv->tv_format = tv_modes[val].name;
1002     - intel_tv_mode_set(&intel_output->enc, NULL, NULL);
1003     + changed = true;
1004     } else {
1005     ret = -EINVAL;
1006     goto out;
1007     }
1008    
1009     - intel_tv_mode_set(&intel_output->enc, NULL, NULL);
1010     + if (changed && crtc)
1011     + drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1012     + crtc->y, crtc->fb);
1013     out:
1014     return ret;
1015     }
1016     diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
1017     index 3eb9b5c..5ff6962 100644
1018     --- a/drivers/ide/hpt366.c
1019     +++ b/drivers/ide/hpt366.c
1020     @@ -114,6 +114,8 @@
1021     * the register setting lists into the table indexed by the clock selected
1022     * - set the correct hwif->ultra_mask for each individual chip
1023     * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards
1024     + * - stop resetting HPT370's state machine before each DMA transfer as that has
1025     + * caused more harm than good
1026     * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
1027     */
1028    
1029     @@ -133,7 +135,7 @@
1030     #define DRV_NAME "hpt366"
1031    
1032     /* various tuning parameters */
1033     -#define HPT_RESET_STATE_ENGINE
1034     +#undef HPT_RESET_STATE_ENGINE
1035     #undef HPT_DELAY_INTERRUPT
1036    
1037     static const char *quirk_drives[] = {
1038     diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
1039     index e9d042d..53a9e8d 100644
1040     --- a/drivers/ide/ide-atapi.c
1041     +++ b/drivers/ide/ide-atapi.c
1042     @@ -6,6 +6,8 @@
1043     #include <linux/cdrom.h>
1044     #include <linux/delay.h>
1045     #include <linux/ide.h>
1046     +#include <linux/scatterlist.h>
1047     +
1048     #include <scsi/scsi.h>
1049    
1050     #ifdef DEBUG
1051     @@ -566,6 +568,10 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
1052     : ide_pc_intr),
1053     timeout, expiry);
1054    
1055     + /* Send the actual packet */
1056     + if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
1057     + hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
1058     +
1059     /* Begin DMA, if necessary */
1060     if (dev_is_idecd(drive)) {
1061     if (drive->dma)
1062     @@ -577,10 +583,6 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
1063     }
1064     }
1065    
1066     - /* Send the actual packet */
1067     - if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
1068     - hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
1069     -
1070     return ide_started;
1071     }
1072    
1073     diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
1074     index a9a6c20..af70777 100644
1075     --- a/drivers/ide/ide-io.c
1076     +++ b/drivers/ide/ide-io.c
1077     @@ -736,11 +736,10 @@ repeat:
1078     prev_port = hwif->host->cur_port;
1079     hwif->rq = NULL;
1080    
1081     - if (drive->dev_flags & IDE_DFLAG_SLEEPING) {
1082     - if (time_before(drive->sleep, jiffies)) {
1083     - ide_unlock_port(hwif);
1084     - goto plug_device;
1085     - }
1086     + if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
1087     + time_after(drive->sleep, jiffies)) {
1088     + ide_unlock_port(hwif);
1089     + goto plug_device;
1090     }
1091    
1092     if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
1093     diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
1094     index ebf4be5..2d175b5 100644
1095     --- a/drivers/input/gameport/gameport.c
1096     +++ b/drivers/input/gameport/gameport.c
1097     @@ -50,9 +50,8 @@ static LIST_HEAD(gameport_list);
1098    
1099     static struct bus_type gameport_bus;
1100    
1101     -static void gameport_add_driver(struct gameport_driver *drv);
1102     static void gameport_add_port(struct gameport *gameport);
1103     -static void gameport_destroy_port(struct gameport *gameport);
1104     +static void gameport_attach_driver(struct gameport_driver *drv);
1105     static void gameport_reconnect_port(struct gameport *gameport);
1106     static void gameport_disconnect_port(struct gameport *gameport);
1107    
1108     @@ -230,7 +229,6 @@ static void gameport_find_driver(struct gameport *gameport)
1109    
1110     enum gameport_event_type {
1111     GAMEPORT_REGISTER_PORT,
1112     - GAMEPORT_REGISTER_DRIVER,
1113     GAMEPORT_ATTACH_DRIVER,
1114     };
1115    
1116     @@ -374,8 +372,8 @@ static void gameport_handle_event(void)
1117     gameport_add_port(event->object);
1118     break;
1119    
1120     - case GAMEPORT_REGISTER_DRIVER:
1121     - gameport_add_driver(event->object);
1122     + case GAMEPORT_ATTACH_DRIVER:
1123     + gameport_attach_driver(event->object);
1124     break;
1125    
1126     default:
1127     @@ -706,14 +704,14 @@ static int gameport_driver_remove(struct device *dev)
1128     return 0;
1129     }
1130    
1131     -static void gameport_add_driver(struct gameport_driver *drv)
1132     +static void gameport_attach_driver(struct gameport_driver *drv)
1133     {
1134     int error;
1135    
1136     - error = driver_register(&drv->driver);
1137     + error = driver_attach(&drv->driver);
1138     if (error)
1139     printk(KERN_ERR
1140     - "gameport: driver_register() failed for %s, error: %d\n",
1141     + "gameport: driver_attach() failed for %s, error: %d\n",
1142     drv->driver.name, error);
1143     }
1144    
1145     diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
1146     index d3ec217..3a8cfa2 100644
1147     --- a/drivers/md/dm-bio-record.h
1148     +++ b/drivers/md/dm-bio-record.h
1149     @@ -16,30 +16,56 @@
1150     * functions in this file help the target record and restore the
1151     * original bio state.
1152     */
1153     +
1154     +struct dm_bio_vec_details {
1155     +#if PAGE_SIZE < 65536
1156     + __u16 bv_len;
1157     + __u16 bv_offset;
1158     +#else
1159     + unsigned bv_len;
1160     + unsigned bv_offset;
1161     +#endif
1162     +};
1163     +
1164     struct dm_bio_details {
1165     sector_t bi_sector;
1166     struct block_device *bi_bdev;
1167     unsigned int bi_size;
1168     unsigned short bi_idx;
1169     unsigned long bi_flags;
1170     + struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
1171     };
1172    
1173     static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
1174     {
1175     + unsigned i;
1176     +
1177     bd->bi_sector = bio->bi_sector;
1178     bd->bi_bdev = bio->bi_bdev;
1179     bd->bi_size = bio->bi_size;
1180     bd->bi_idx = bio->bi_idx;
1181     bd->bi_flags = bio->bi_flags;
1182     +
1183     + for (i = 0; i < bio->bi_vcnt; i++) {
1184     + bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
1185     + bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
1186     + }
1187     }
1188    
1189     static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
1190     {
1191     + unsigned i;
1192     +
1193     bio->bi_sector = bd->bi_sector;
1194     bio->bi_bdev = bd->bi_bdev;
1195     bio->bi_size = bd->bi_size;
1196     bio->bi_idx = bd->bi_idx;
1197     bio->bi_flags = bd->bi_flags;
1198     +
1199     + for (i = 0; i < bio->bi_vcnt; i++) {
1200     + bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
1201     + bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
1202     + }
1203     }
1204    
1205     #endif
1206     diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
1207     index 36e2b5e..e73aabd 100644
1208     --- a/drivers/md/dm-io.c
1209     +++ b/drivers/md/dm-io.c
1210     @@ -370,16 +370,13 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
1211     while (1) {
1212     set_current_state(TASK_UNINTERRUPTIBLE);
1213    
1214     - if (!atomic_read(&io.count) || signal_pending(current))
1215     + if (!atomic_read(&io.count))
1216     break;
1217    
1218     io_schedule();
1219     }
1220     set_current_state(TASK_RUNNING);
1221    
1222     - if (atomic_read(&io.count))
1223     - return -EINTR;
1224     -
1225     if (error_bits)
1226     *error_bits = io.error_bits;
1227    
1228     diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
1229     index 0a225da..3e3fc06 100644
1230     --- a/drivers/md/dm-kcopyd.c
1231     +++ b/drivers/md/dm-kcopyd.c
1232     @@ -297,7 +297,8 @@ static int run_complete_job(struct kcopyd_job *job)
1233     dm_kcopyd_notify_fn fn = job->fn;
1234     struct dm_kcopyd_client *kc = job->kc;
1235    
1236     - kcopyd_put_pages(kc, job->pages);
1237     + if (job->pages)
1238     + kcopyd_put_pages(kc, job->pages);
1239     mempool_free(job, kc->job_pool);
1240     fn(read_err, write_err, context);
1241    
1242     @@ -461,6 +462,7 @@ static void segment_complete(int read_err, unsigned long write_err,
1243     sector_t progress = 0;
1244     sector_t count = 0;
1245     struct kcopyd_job *job = (struct kcopyd_job *) context;
1246     + struct dm_kcopyd_client *kc = job->kc;
1247    
1248     mutex_lock(&job->lock);
1249    
1250     @@ -490,7 +492,7 @@ static void segment_complete(int read_err, unsigned long write_err,
1251    
1252     if (count) {
1253     int i;
1254     - struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
1255     + struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
1256     GFP_NOIO);
1257    
1258     *sub_job = *job;
1259     @@ -509,13 +511,16 @@ static void segment_complete(int read_err, unsigned long write_err,
1260     } else if (atomic_dec_and_test(&job->sub_jobs)) {
1261    
1262     /*
1263     - * To avoid a race we must keep the job around
1264     - * until after the notify function has completed.
1265     - * Otherwise the client may try and stop the job
1266     - * after we've completed.
1267     + * Queue the completion callback to the kcopyd thread.
1268     + *
1269     + * Some callers assume that all the completions are called
1270     + * from a single thread and don't race with each other.
1271     + *
1272     + * We must not call the callback directly here because this
1273     + * code may not be executing in the thread.
1274     */
1275     - job->fn(read_err, write_err, job->context);
1276     - mempool_free(job, job->kc->job_pool);
1277     + push(&kc->complete_jobs, job);
1278     + wake(kc);
1279     }
1280     }
1281    
1282     @@ -528,6 +533,8 @@ static void split_job(struct kcopyd_job *job)
1283     {
1284     int i;
1285    
1286     + atomic_inc(&job->kc->nr_jobs);
1287     +
1288     atomic_set(&job->sub_jobs, SPLIT_COUNT);
1289     for (i = 0; i < SPLIT_COUNT; i++)
1290     segment_complete(0, 0u, job);
1291     diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
1292     index 96ea226..42c04f0 100644
1293     --- a/drivers/md/dm-path-selector.c
1294     +++ b/drivers/md/dm-path-selector.c
1295     @@ -17,9 +17,7 @@
1296    
1297     struct ps_internal {
1298     struct path_selector_type pst;
1299     -
1300     struct list_head list;
1301     - long use;
1302     };
1303    
1304     #define pst_to_psi(__pst) container_of((__pst), struct ps_internal, pst)
1305     @@ -45,12 +43,8 @@ static struct ps_internal *get_path_selector(const char *name)
1306    
1307     down_read(&_ps_lock);
1308     psi = __find_path_selector_type(name);
1309     - if (psi) {
1310     - if ((psi->use == 0) && !try_module_get(psi->pst.module))
1311     - psi = NULL;
1312     - else
1313     - psi->use++;
1314     - }
1315     + if (psi && !try_module_get(psi->pst.module))
1316     + psi = NULL;
1317     up_read(&_ps_lock);
1318    
1319     return psi;
1320     @@ -84,11 +78,7 @@ void dm_put_path_selector(struct path_selector_type *pst)
1321     if (!psi)
1322     goto out;
1323    
1324     - if (--psi->use == 0)
1325     - module_put(psi->pst.module);
1326     -
1327     - BUG_ON(psi->use < 0);
1328     -
1329     + module_put(psi->pst.module);
1330     out:
1331     up_read(&_ps_lock);
1332     }
1333     @@ -136,11 +126,6 @@ int dm_unregister_path_selector(struct path_selector_type *pst)
1334     return -EINVAL;
1335     }
1336    
1337     - if (psi->use) {
1338     - up_write(&_ps_lock);
1339     - return -ETXTBSY;
1340     - }
1341     -
1342     list_del(&psi->list);
1343    
1344     up_write(&_ps_lock);
1345     diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
1346     index 4d6bc10..62d5948 100644
1347     --- a/drivers/md/dm-raid1.c
1348     +++ b/drivers/md/dm-raid1.c
1349     @@ -145,6 +145,8 @@ struct dm_raid1_read_record {
1350     struct dm_bio_details details;
1351     };
1352    
1353     +static struct kmem_cache *_dm_raid1_read_record_cache;
1354     +
1355     /*
1356     * Every mirror should look like this one.
1357     */
1358     @@ -764,9 +766,9 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1359     atomic_set(&ms->suspend, 0);
1360     atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
1361    
1362     - len = sizeof(struct dm_raid1_read_record);
1363     - ms->read_record_pool = mempool_create_kmalloc_pool(MIN_READ_RECORDS,
1364     - len);
1365     + ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
1366     + _dm_raid1_read_record_cache);
1367     +
1368     if (!ms->read_record_pool) {
1369     ti->error = "Error creating mirror read_record_pool";
1370     kfree(ms);
1371     @@ -1279,16 +1281,31 @@ static int __init dm_mirror_init(void)
1372     {
1373     int r;
1374    
1375     + _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1376     + if (!_dm_raid1_read_record_cache) {
1377     + DMERR("Can't allocate dm_raid1_read_record cache");
1378     + r = -ENOMEM;
1379     + goto bad_cache;
1380     + }
1381     +
1382     r = dm_register_target(&mirror_target);
1383     - if (r < 0)
1384     + if (r < 0) {
1385     DMERR("Failed to register mirror target");
1386     + goto bad_target;
1387     + }
1388     +
1389     + return 0;
1390    
1391     +bad_target:
1392     + kmem_cache_destroy(_dm_raid1_read_record_cache);
1393     +bad_cache:
1394     return r;
1395     }
1396    
1397     static void __exit dm_mirror_exit(void)
1398     {
1399     dm_unregister_target(&mirror_target);
1400     + kmem_cache_destroy(_dm_raid1_read_record_cache);
1401     }
1402    
1403     /* Module hooks */
1404     diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
1405     index 65ff82f..462750c 100644
1406     --- a/drivers/md/dm-snap.c
1407     +++ b/drivers/md/dm-snap.c
1408     @@ -972,6 +972,17 @@ static void start_copy(struct dm_snap_pending_exception *pe)
1409     &src, 1, &dest, 0, copy_callback, pe);
1410     }
1411    
1412     +static struct dm_snap_pending_exception *
1413     +__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1414     +{
1415     + struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
1416     +
1417     + if (!e)
1418     + return NULL;
1419     +
1420     + return container_of(e, struct dm_snap_pending_exception, e);
1421     +}
1422     +
1423     /*
1424     * Looks to see if this snapshot already has a pending exception
1425     * for this chunk, otherwise it allocates a new one and inserts
1426     @@ -981,40 +992,15 @@ static void start_copy(struct dm_snap_pending_exception *pe)
1427     * this.
1428     */
1429     static struct dm_snap_pending_exception *
1430     -__find_pending_exception(struct dm_snapshot *s, struct bio *bio)
1431     +__find_pending_exception(struct dm_snapshot *s,
1432     + struct dm_snap_pending_exception *pe, chunk_t chunk)
1433     {
1434     - struct dm_snap_exception *e;
1435     - struct dm_snap_pending_exception *pe;
1436     - chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
1437     -
1438     - /*
1439     - * Is there a pending exception for this already ?
1440     - */
1441     - e = lookup_exception(&s->pending, chunk);
1442     - if (e) {
1443     - /* cast the exception to a pending exception */
1444     - pe = container_of(e, struct dm_snap_pending_exception, e);
1445     - goto out;
1446     - }
1447     -
1448     - /*
1449     - * Create a new pending exception, we don't want
1450     - * to hold the lock while we do this.
1451     - */
1452     - up_write(&s->lock);
1453     - pe = alloc_pending_exception(s);
1454     - down_write(&s->lock);
1455     -
1456     - if (!s->valid) {
1457     - free_pending_exception(pe);
1458     - return NULL;
1459     - }
1460     + struct dm_snap_pending_exception *pe2;
1461    
1462     - e = lookup_exception(&s->pending, chunk);
1463     - if (e) {
1464     + pe2 = __lookup_pending_exception(s, chunk);
1465     + if (pe2) {
1466     free_pending_exception(pe);
1467     - pe = container_of(e, struct dm_snap_pending_exception, e);
1468     - goto out;
1469     + return pe2;
1470     }
1471    
1472     pe->e.old_chunk = chunk;
1473     @@ -1032,7 +1018,6 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
1474     get_pending_exception(pe);
1475     insert_exception(&s->pending, &pe->e);
1476    
1477     - out:
1478     return pe;
1479     }
1480    
1481     @@ -1083,11 +1068,31 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
1482     * writeable.
1483     */
1484     if (bio_rw(bio) == WRITE) {
1485     - pe = __find_pending_exception(s, bio);
1486     + pe = __lookup_pending_exception(s, chunk);
1487     if (!pe) {
1488     - __invalidate_snapshot(s, -ENOMEM);
1489     - r = -EIO;
1490     - goto out_unlock;
1491     + up_write(&s->lock);
1492     + pe = alloc_pending_exception(s);
1493     + down_write(&s->lock);
1494     +
1495     + if (!s->valid) {
1496     + free_pending_exception(pe);
1497     + r = -EIO;
1498     + goto out_unlock;
1499     + }
1500     +
1501     + e = lookup_exception(&s->complete, chunk);
1502     + if (e) {
1503     + free_pending_exception(pe);
1504     + remap_exception(s, e, bio, chunk);
1505     + goto out_unlock;
1506     + }
1507     +
1508     + pe = __find_pending_exception(s, pe, chunk);
1509     + if (!pe) {
1510     + __invalidate_snapshot(s, -ENOMEM);
1511     + r = -EIO;
1512     + goto out_unlock;
1513     + }
1514     }
1515    
1516     remap_exception(s, &pe->e, bio, chunk);
1517     @@ -1217,10 +1222,28 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
1518     if (e)
1519     goto next_snapshot;
1520    
1521     - pe = __find_pending_exception(snap, bio);
1522     + pe = __lookup_pending_exception(snap, chunk);
1523     if (!pe) {
1524     - __invalidate_snapshot(snap, -ENOMEM);
1525     - goto next_snapshot;
1526     + up_write(&snap->lock);
1527     + pe = alloc_pending_exception(snap);
1528     + down_write(&snap->lock);
1529     +
1530     + if (!snap->valid) {
1531     + free_pending_exception(pe);
1532     + goto next_snapshot;
1533     + }
1534     +
1535     + e = lookup_exception(&snap->complete, chunk);
1536     + if (e) {
1537     + free_pending_exception(pe);
1538     + goto next_snapshot;
1539     + }
1540     +
1541     + pe = __find_pending_exception(snap, pe, chunk);
1542     + if (!pe) {
1543     + __invalidate_snapshot(snap, -ENOMEM);
1544     + goto next_snapshot;
1545     + }
1546     }
1547    
1548     if (!primary_pe) {
1549     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1550     index 2fd66c3..e8361b1 100644
1551     --- a/drivers/md/dm-table.c
1552     +++ b/drivers/md/dm-table.c
1553     @@ -399,28 +399,30 @@ static int check_device_area(struct dm_dev_internal *dd, sector_t start,
1554     }
1555    
1556     /*
1557     - * This upgrades the mode on an already open dm_dev. Being
1558     + * This upgrades the mode on an already open dm_dev, being
1559     * careful to leave things as they were if we fail to reopen the
1560     - * device.
1561     + * device and not to touch the existing bdev field in case
1562     + * it is accessed concurrently inside dm_table_any_congested().
1563     */
1564     static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
1565     struct mapped_device *md)
1566     {
1567     int r;
1568     - struct dm_dev_internal dd_copy;
1569     - dev_t dev = dd->dm_dev.bdev->bd_dev;
1570     + struct dm_dev_internal dd_new, dd_old;
1571    
1572     - dd_copy = *dd;
1573     + dd_new = dd_old = *dd;
1574     +
1575     + dd_new.dm_dev.mode |= new_mode;
1576     + dd_new.dm_dev.bdev = NULL;
1577     +
1578     + r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
1579     + if (r)
1580     + return r;
1581    
1582     dd->dm_dev.mode |= new_mode;
1583     - dd->dm_dev.bdev = NULL;
1584     - r = open_dev(dd, dev, md);
1585     - if (!r)
1586     - close_dev(&dd_copy, md);
1587     - else
1588     - *dd = dd_copy;
1589     + close_dev(&dd_old, md);
1590    
1591     - return r;
1592     + return 0;
1593     }
1594    
1595     /*
1596     diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
1597     index 7decf10..db72c94 100644
1598     --- a/drivers/md/dm-target.c
1599     +++ b/drivers/md/dm-target.c
1600     @@ -18,7 +18,6 @@ struct tt_internal {
1601     struct target_type tt;
1602    
1603     struct list_head list;
1604     - long use;
1605     };
1606    
1607     static LIST_HEAD(_targets);
1608     @@ -44,12 +43,8 @@ static struct tt_internal *get_target_type(const char *name)
1609     down_read(&_lock);
1610    
1611     ti = __find_target_type(name);
1612     - if (ti) {
1613     - if ((ti->use == 0) && !try_module_get(ti->tt.module))
1614     - ti = NULL;
1615     - else
1616     - ti->use++;
1617     - }
1618     + if (ti && !try_module_get(ti->tt.module))
1619     + ti = NULL;
1620    
1621     up_read(&_lock);
1622     return ti;
1623     @@ -77,10 +72,7 @@ void dm_put_target_type(struct target_type *t)
1624     struct tt_internal *ti = (struct tt_internal *) t;
1625    
1626     down_read(&_lock);
1627     - if (--ti->use == 0)
1628     - module_put(ti->tt.module);
1629     -
1630     - BUG_ON(ti->use < 0);
1631     + module_put(ti->tt.module);
1632     up_read(&_lock);
1633    
1634     return;
1635     @@ -140,12 +132,6 @@ void dm_unregister_target(struct target_type *t)
1636     BUG();
1637     }
1638    
1639     - if (ti->use) {
1640     - DMCRIT("Attempt to unregister target still in use: %s",
1641     - t->name);
1642     - BUG();
1643     - }
1644     -
1645     list_del(&ti->list);
1646     kfree(ti);
1647    
1648     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1649     index e246642..4a25fa9 100644
1650     --- a/drivers/md/raid1.c
1651     +++ b/drivers/md/raid1.c
1652     @@ -120,6 +120,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
1653     goto out_free_pages;
1654    
1655     bio->bi_io_vec[i].bv_page = page;
1656     + bio->bi_vcnt = i+1;
1657     }
1658     }
1659     /* If not user-requests, copy the page pointers to all bios */
1660     @@ -135,9 +136,9 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
1661     return r1_bio;
1662    
1663     out_free_pages:
1664     - for (i=0; i < RESYNC_PAGES ; i++)
1665     - for (j=0 ; j < pi->raid_disks; j++)
1666     - safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
1667     + for (j=0 ; j < pi->raid_disks; j++)
1668     + for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
1669     + put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
1670     j = -1;
1671     out_free_bio:
1672     while ( ++j < pi->raid_disks )
1673     diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
1674     index 8683d10..5b107fa 100644
1675     --- a/drivers/media/video/cx88/cx88-input.c
1676     +++ b/drivers/media/video/cx88/cx88-input.c
1677     @@ -48,8 +48,7 @@ struct cx88_IR {
1678    
1679     /* poll external decoder */
1680     int polling;
1681     - struct work_struct work;
1682     - struct timer_list timer;
1683     + struct delayed_work work;
1684     u32 gpio_addr;
1685     u32 last_gpio;
1686     u32 mask_keycode;
1687     @@ -143,27 +142,19 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
1688     }
1689     }
1690    
1691     -static void ir_timer(unsigned long data)
1692     -{
1693     - struct cx88_IR *ir = (struct cx88_IR *)data;
1694     -
1695     - schedule_work(&ir->work);
1696     -}
1697     -
1698     static void cx88_ir_work(struct work_struct *work)
1699     {
1700     - struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
1701     + struct cx88_IR *ir = container_of(work, struct cx88_IR, work.work);
1702    
1703     cx88_ir_handle_key(ir);
1704     - mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
1705     + schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
1706     }
1707    
1708     void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
1709     {
1710     if (ir->polling) {
1711     - setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
1712     - INIT_WORK(&ir->work, cx88_ir_work);
1713     - schedule_work(&ir->work);
1714     + INIT_DELAYED_WORK(&ir->work, cx88_ir_work);
1715     + schedule_delayed_work(&ir->work, 0);
1716     }
1717     if (ir->sampling) {
1718     core->pci_irqmask |= PCI_INT_IR_SMPINT;
1719     @@ -179,10 +170,8 @@ void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
1720     core->pci_irqmask &= ~PCI_INT_IR_SMPINT;
1721     }
1722    
1723     - if (ir->polling) {
1724     - del_timer_sync(&ir->timer);
1725     - flush_scheduled_work();
1726     - }
1727     + if (ir->polling)
1728     + cancel_delayed_work_sync(&ir->work);
1729     }
1730    
1731     /* ---------------------------------------------------------------------- */
1732     diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
1733     index ea3aafb..6fc789e 100644
1734     --- a/drivers/message/fusion/mptbase.c
1735     +++ b/drivers/message/fusion/mptbase.c
1736     @@ -5934,7 +5934,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
1737    
1738     /* Initalize the timer
1739     */
1740     - init_timer(&pCfg->timer);
1741     + init_timer_on_stack(&pCfg->timer);
1742     pCfg->timer.data = (unsigned long) ioc;
1743     pCfg->timer.function = mpt_timer_expired;
1744     pCfg->wait_done = 0;
1745     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1746     index 3d76686..87045f8 100644
1747     --- a/drivers/net/bonding/bond_main.c
1748     +++ b/drivers/net/bonding/bond_main.c
1749     @@ -2565,7 +2565,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
1750    
1751     for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
1752     if (!targets[i])
1753     - continue;
1754     + break;
1755     pr_debug("basa: target %x\n", targets[i]);
1756     if (list_empty(&bond->vlan_list)) {
1757     pr_debug("basa: empty vlan: arp_send\n");
1758     @@ -2672,7 +2672,6 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
1759     int i;
1760     __be32 *targets = bond->params.arp_targets;
1761    
1762     - targets = bond->params.arp_targets;
1763     for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
1764     pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n",
1765     &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip));
1766     @@ -3294,7 +3293,7 @@ static void bond_info_show_master(struct seq_file *seq)
1767    
1768     for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) {
1769     if (!bond->params.arp_targets[i])
1770     - continue;
1771     + break;
1772     if (printed)
1773     seq_printf(seq, ",");
1774     seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
1775     diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
1776     index 18cf478..d287315 100644
1777     --- a/drivers/net/bonding/bond_sysfs.c
1778     +++ b/drivers/net/bonding/bond_sysfs.c
1779     @@ -684,17 +684,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
1780     goto out;
1781     }
1782     /* look for an empty slot to put the target in, and check for dupes */
1783     - for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
1784     + for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
1785     if (targets[i] == newtarget) { /* duplicate */
1786     printk(KERN_ERR DRV_NAME
1787     ": %s: ARP target %pI4 is already present\n",
1788     bond->dev->name, &newtarget);
1789     - if (done)
1790     - targets[i] = 0;
1791     ret = -EINVAL;
1792     goto out;
1793     }
1794     - if (targets[i] == 0 && !done) {
1795     + if (targets[i] == 0) {
1796     printk(KERN_INFO DRV_NAME
1797     ": %s: adding ARP target %pI4.\n",
1798     bond->dev->name, &newtarget);
1799     @@ -720,12 +718,16 @@ static ssize_t bonding_store_arp_targets(struct device *d,
1800     goto out;
1801     }
1802    
1803     - for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
1804     + for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
1805     if (targets[i] == newtarget) {
1806     + int j;
1807     printk(KERN_INFO DRV_NAME
1808     ": %s: removing ARP target %pI4.\n",
1809     bond->dev->name, &newtarget);
1810     - targets[i] = 0;
1811     + for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
1812     + targets[j] = targets[j+1];
1813     +
1814     + targets[j] = 0;
1815     done = 1;
1816     }
1817     }
1818     diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
1819     index 67f87a7..090ada6 100644
1820     --- a/drivers/net/ixgbe/ixgbe_ethtool.c
1821     +++ b/drivers/net/ixgbe/ixgbe_ethtool.c
1822     @@ -691,9 +691,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
1823     struct ethtool_ringparam *ring)
1824     {
1825     struct ixgbe_adapter *adapter = netdev_priv(netdev);
1826     - struct ixgbe_ring *temp_ring;
1827     + struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
1828     int i, err;
1829     u32 new_rx_count, new_tx_count;
1830     + bool need_update = false;
1831    
1832     if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1833     return -EINVAL;
1834     @@ -712,80 +713,94 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
1835     return 0;
1836     }
1837    
1838     - temp_ring = kcalloc(adapter->num_tx_queues,
1839     - sizeof(struct ixgbe_ring), GFP_KERNEL);
1840     - if (!temp_ring)
1841     - return -ENOMEM;
1842     -
1843     while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1844     msleep(1);
1845    
1846     - if (new_tx_count != adapter->tx_ring->count) {
1847     + temp_tx_ring = kcalloc(adapter->num_tx_queues,
1848     + sizeof(struct ixgbe_ring), GFP_KERNEL);
1849     + if (!temp_tx_ring) {
1850     + err = -ENOMEM;
1851     + goto err_setup;
1852     + }
1853     +
1854     + if (new_tx_count != adapter->tx_ring_count) {
1855     + memcpy(temp_tx_ring, adapter->tx_ring,
1856     + adapter->num_tx_queues * sizeof(struct ixgbe_ring));
1857     for (i = 0; i < adapter->num_tx_queues; i++) {
1858     - temp_ring[i].count = new_tx_count;
1859     - err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
1860     + temp_tx_ring[i].count = new_tx_count;
1861     + err = ixgbe_setup_tx_resources(adapter,
1862     + &temp_tx_ring[i]);
1863     if (err) {
1864     while (i) {
1865     i--;
1866     ixgbe_free_tx_resources(adapter,
1867     - &temp_ring[i]);
1868     + &temp_tx_ring[i]);
1869     }
1870     goto err_setup;
1871     }
1872     - temp_ring[i].v_idx = adapter->tx_ring[i].v_idx;
1873     + temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
1874     }
1875     - if (netif_running(netdev))
1876     - netdev->netdev_ops->ndo_stop(netdev);
1877     - ixgbe_reset_interrupt_capability(adapter);
1878     - ixgbe_napi_del_all(adapter);
1879     - INIT_LIST_HEAD(&netdev->napi_list);
1880     - kfree(adapter->tx_ring);
1881     - adapter->tx_ring = temp_ring;
1882     - temp_ring = NULL;
1883     - adapter->tx_ring_count = new_tx_count;
1884     + need_update = true;
1885     }
1886    
1887     - temp_ring = kcalloc(adapter->num_rx_queues,
1888     - sizeof(struct ixgbe_ring), GFP_KERNEL);
1889     - if (!temp_ring) {
1890     - if (netif_running(netdev))
1891     - netdev->netdev_ops->ndo_open(netdev);
1892     - return -ENOMEM;
1893     + temp_rx_ring = kcalloc(adapter->num_rx_queues,
1894     + sizeof(struct ixgbe_ring), GFP_KERNEL);
1895     + if ((!temp_rx_ring) && (need_update)) {
1896     + for (i = 0; i < adapter->num_tx_queues; i++)
1897     + ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
1898     + kfree(temp_tx_ring);
1899     + err = -ENOMEM;
1900     + goto err_setup;
1901     }
1902    
1903     - if (new_rx_count != adapter->rx_ring->count) {
1904     + if (new_rx_count != adapter->rx_ring_count) {
1905     + memcpy(temp_rx_ring, adapter->rx_ring,
1906     + adapter->num_rx_queues * sizeof(struct ixgbe_ring));
1907     for (i = 0; i < adapter->num_rx_queues; i++) {
1908     - temp_ring[i].count = new_rx_count;
1909     - err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1910     + temp_rx_ring[i].count = new_rx_count;
1911     + err = ixgbe_setup_rx_resources(adapter,
1912     + &temp_rx_ring[i]);
1913     if (err) {
1914     while (i) {
1915     i--;
1916     ixgbe_free_rx_resources(adapter,
1917     - &temp_ring[i]);
1918     + &temp_rx_ring[i]);
1919     }
1920     goto err_setup;
1921     }
1922     - temp_ring[i].v_idx = adapter->rx_ring[i].v_idx;
1923     + temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
1924     }
1925     + need_update = true;
1926     + }
1927     +
1928     + /* if rings need to be updated, here's the place to do it in one shot */
1929     + if (need_update) {
1930     if (netif_running(netdev))
1931     - netdev->netdev_ops->ndo_stop(netdev);
1932     - ixgbe_reset_interrupt_capability(adapter);
1933     - ixgbe_napi_del_all(adapter);
1934     - INIT_LIST_HEAD(&netdev->napi_list);
1935     - kfree(adapter->rx_ring);
1936     - adapter->rx_ring = temp_ring;
1937     - temp_ring = NULL;
1938     -
1939     - adapter->rx_ring_count = new_rx_count;
1940     + ixgbe_down(adapter);
1941     +
1942     + /* tx */
1943     + if (new_tx_count != adapter->tx_ring_count) {
1944     + kfree(adapter->tx_ring);
1945     + adapter->tx_ring = temp_tx_ring;
1946     + temp_tx_ring = NULL;
1947     + adapter->tx_ring_count = new_tx_count;
1948     + }
1949     +
1950     + /* rx */
1951     + if (new_rx_count != adapter->rx_ring_count) {
1952     + kfree(adapter->rx_ring);
1953     + adapter->rx_ring = temp_rx_ring;
1954     + temp_rx_ring = NULL;
1955     + adapter->rx_ring_count = new_rx_count;
1956     + }
1957     }
1958    
1959     /* success! */
1960     err = 0;
1961     -err_setup:
1962     - ixgbe_init_interrupt_scheme(adapter);
1963     if (netif_running(netdev))
1964     - netdev->netdev_ops->ndo_open(netdev);
1965     + ixgbe_up(adapter);
1966    
1967     +err_setup:
1968     clear_bit(__IXGBE_RESETTING, &adapter->state);
1969     return err;
1970     }
1971     diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1972     index 43fedb9..9201e5a 100644
1973     --- a/drivers/net/r8169.c
1974     +++ b/drivers/net/r8169.c
1975     @@ -2075,8 +2075,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1976     if (!tp->pcie_cap && netif_msg_probe(tp))
1977     dev_info(&pdev->dev, "no PCI Express capability\n");
1978    
1979     - /* Unneeded ? Don't mess with Mrs. Murphy. */
1980     - rtl8169_irq_mask_and_ack(ioaddr);
1981     + RTL_W16(IntrMask, 0x0000);
1982    
1983     /* Soft reset the chip. */
1984     RTL_W8(ChipCmd, CmdReset);
1985     @@ -2088,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1986     msleep_interruptible(1);
1987     }
1988    
1989     + RTL_W16(IntrStatus, 0xffff);
1990     +
1991     /* Identify chip attached to board */
1992     rtl8169_get_mac_version(tp, ioaddr);
1993    
1994     diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
1995     index ab0e09b..655e9b2 100644
1996     --- a/drivers/net/sfc/efx.c
1997     +++ b/drivers/net/sfc/efx.c
1998     @@ -424,10 +424,6 @@ static void efx_start_channel(struct efx_channel *channel)
1999    
2000     EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
2001    
2002     - if (!(channel->efx->net_dev->flags & IFF_UP))
2003     - netif_napi_add(channel->napi_dev, &channel->napi_str,
2004     - efx_poll, napi_weight);
2005     -
2006     /* The interrupt handler for this channel may set work_pending
2007     * as soon as we enable it. Make sure it's cleared before
2008     * then. Similarly, make sure it sees the enabled flag set. */
2009     @@ -1273,6 +1269,8 @@ static int efx_init_napi(struct efx_nic *efx)
2010    
2011     efx_for_each_channel(channel, efx) {
2012     channel->napi_dev = efx->net_dev;
2013     + netif_napi_add(channel->napi_dev, &channel->napi_str,
2014     + efx_poll, napi_weight);
2015     rc = efx_lro_init(&channel->lro_mgr, efx);
2016     if (rc)
2017     goto err;
2018     @@ -1289,6 +1287,8 @@ static void efx_fini_napi(struct efx_nic *efx)
2019    
2020     efx_for_each_channel(channel, efx) {
2021     efx_lro_fini(&channel->lro_mgr);
2022     + if (channel->napi_dev)
2023     + netif_napi_del(&channel->napi_str);
2024     channel->napi_dev = NULL;
2025     }
2026     }
2027     diff --git a/drivers/net/skge.c b/drivers/net/skge.c
2028     index c9dbb06..2bbb44b 100644
2029     --- a/drivers/net/skge.c
2030     +++ b/drivers/net/skge.c
2031     @@ -2674,7 +2674,7 @@ static int skge_down(struct net_device *dev)
2032     if (netif_msg_ifdown(skge))
2033     printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2034    
2035     - netif_stop_queue(dev);
2036     + netif_tx_disable(dev);
2037    
2038     if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
2039     del_timer_sync(&skge->link_timer);
2040     @@ -2881,7 +2881,6 @@ static void skge_tx_clean(struct net_device *dev)
2041     }
2042    
2043     skge->tx_ring.to_clean = e;
2044     - netif_wake_queue(dev);
2045     }
2046    
2047     static void skge_tx_timeout(struct net_device *dev)
2048     @@ -2893,6 +2892,7 @@ static void skge_tx_timeout(struct net_device *dev)
2049    
2050     skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2051     skge_tx_clean(dev);
2052     + netif_wake_queue(dev);
2053     }
2054    
2055     static int skge_change_mtu(struct net_device *dev, int new_mtu)
2056     diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
2057     index 39ecf3b..820fdb2 100644
2058     --- a/drivers/net/wireless/rt2x00/rt2x00.h
2059     +++ b/drivers/net/wireless/rt2x00/rt2x00.h
2060     @@ -687,8 +687,7 @@ struct rt2x00_dev {
2061     */
2062     #ifdef CONFIG_RT2X00_LIB_RFKILL
2063     unsigned long rfkill_state;
2064     -#define RFKILL_STATE_ALLOCATED 1
2065     -#define RFKILL_STATE_REGISTERED 2
2066     +#define RFKILL_STATE_REGISTERED 1
2067     struct rfkill *rfkill;
2068     struct delayed_work rfkill_work;
2069     #endif /* CONFIG_RT2X00_LIB_RFKILL */
2070     diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
2071     index 87c0f2c..e694bb7 100644
2072     --- a/drivers/net/wireless/rt2x00/rt2x00dev.c
2073     +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
2074     @@ -1105,7 +1105,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
2075     * Register extra components.
2076     */
2077     rt2x00leds_register(rt2x00dev);
2078     - rt2x00rfkill_allocate(rt2x00dev);
2079     rt2x00debug_register(rt2x00dev);
2080    
2081     set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
2082     @@ -1137,7 +1136,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
2083     * Free extra components
2084     */
2085     rt2x00debug_deregister(rt2x00dev);
2086     - rt2x00rfkill_free(rt2x00dev);
2087     rt2x00leds_unregister(rt2x00dev);
2088    
2089     /*
2090     diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
2091     index 86cd26f..49309d4 100644
2092     --- a/drivers/net/wireless/rt2x00/rt2x00lib.h
2093     +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
2094     @@ -260,8 +260,6 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
2095     #ifdef CONFIG_RT2X00_LIB_RFKILL
2096     void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev);
2097     void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev);
2098     -void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev);
2099     -void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev);
2100     #else
2101     static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
2102     {
2103     @@ -270,14 +268,6 @@ static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
2104     static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
2105     {
2106     }
2107     -
2108     -static inline void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
2109     -{
2110     -}
2111     -
2112     -static inline void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
2113     -{
2114     -}
2115     #endif /* CONFIG_RT2X00_LIB_RFKILL */
2116    
2117     /*
2118     diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
2119     index 3298cae..08ffc6d 100644
2120     --- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
2121     +++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
2122     @@ -94,14 +94,50 @@ static void rt2x00rfkill_poll(struct work_struct *work)
2123     &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL);
2124     }
2125    
2126     +static int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
2127     +{
2128     + struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy);
2129     +
2130     + rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN);
2131     + if (!rt2x00dev->rfkill)
2132     + return -ENOMEM;
2133     +
2134     + rt2x00dev->rfkill->name = rt2x00dev->ops->name;
2135     + rt2x00dev->rfkill->data = rt2x00dev;
2136     + rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio;
2137     + if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) {
2138     + rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state;
2139     + rt2x00dev->rfkill->state =
2140     + rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
2141     + RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
2142     + } else {
2143     + rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED;
2144     + }
2145     +
2146     + INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll);
2147     +
2148     + return 0;
2149     +}
2150     +
2151     +static void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
2152     +{
2153     + rfkill_free(rt2x00dev->rfkill);
2154     + rt2x00dev->rfkill = NULL;
2155     +}
2156     +
2157     void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
2158     {
2159     - if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
2160     - test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
2161     + if (test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
2162     + return;
2163     +
2164     + if (rt2x00rfkill_allocate(rt2x00dev)) {
2165     + ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n");
2166     return;
2167     + }
2168    
2169     if (rfkill_register(rt2x00dev->rfkill)) {
2170     ERROR(rt2x00dev, "Failed to register rfkill handler.\n");
2171     + rt2x00rfkill_free(rt2x00dev);
2172     return;
2173     }
2174    
2175     @@ -117,8 +153,7 @@ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
2176    
2177     void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
2178     {
2179     - if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
2180     - !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
2181     + if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
2182     return;
2183    
2184     cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
2185     @@ -127,46 +162,3 @@ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
2186    
2187     __clear_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state);
2188     }
2189     -
2190     -void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
2191     -{
2192     - struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy);
2193     -
2194     - if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
2195     - return;
2196     -
2197     - rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN);
2198     - if (!rt2x00dev->rfkill) {
2199     - ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n");
2200     - return;
2201     - }
2202     -
2203     - __set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state);
2204     -
2205     - rt2x00dev->rfkill->name = rt2x00dev->ops->name;
2206     - rt2x00dev->rfkill->data = rt2x00dev;
2207     - rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio;
2208     - if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) {
2209     - rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state;
2210     - rt2x00dev->rfkill->state =
2211     - rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
2212     - RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
2213     - } else {
2214     - rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED;
2215     - }
2216     -
2217     - INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll);
2218     -
2219     - return;
2220     -}
2221     -
2222     -void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
2223     -{
2224     - if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
2225     - return;
2226     -
2227     - cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
2228     -
2229     - rfkill_free(rt2x00dev->rfkill);
2230     - rt2x00dev->rfkill = NULL;
2231     -}
2232     diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2233     index 55ec44a..31cfd86 100644
2234     --- a/drivers/pci/probe.c
2235     +++ b/drivers/pci/probe.c
2236     @@ -847,6 +847,11 @@ int pci_cfg_space_size(struct pci_dev *dev)
2237     {
2238     int pos;
2239     u32 status;
2240     + u16 class;
2241     +
2242     + class = dev->class >> 8;
2243     + if (class == PCI_CLASS_BRIDGE_HOST)
2244     + return pci_cfg_space_size_ext(dev);
2245    
2246     pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
2247     if (!pos) {
2248     @@ -936,7 +941,6 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2249     dev->multifunction = !!(hdr_type & 0x80);
2250     dev->vendor = l & 0xffff;
2251     dev->device = (l >> 16) & 0xffff;
2252     - dev->cfg_size = pci_cfg_space_size(dev);
2253     dev->error_state = pci_channel_io_normal;
2254     set_pcie_port_type(dev);
2255    
2256     @@ -952,6 +956,9 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2257     return NULL;
2258     }
2259    
2260     + /* need to have dev->class ready */
2261     + dev->cfg_size = pci_cfg_space_size(dev);
2262     +
2263     return dev;
2264     }
2265    
2266     diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
2267     index a6a42e8..60fbef2 100644
2268     --- a/drivers/platform/x86/acer-wmi.c
2269     +++ b/drivers/platform/x86/acer-wmi.c
2270     @@ -225,6 +225,25 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = {
2271     .wireless = 2,
2272     };
2273    
2274     +/* The Aspire One has a dummy ACPI-WMI interface - disable it */
2275     +static struct dmi_system_id __devinitdata acer_blacklist[] = {
2276     + {
2277     + .ident = "Acer Aspire One (SSD)",
2278     + .matches = {
2279     + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2280     + DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
2281     + },
2282     + },
2283     + {
2284     + .ident = "Acer Aspire One (HDD)",
2285     + .matches = {
2286     + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2287     + DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
2288     + },
2289     + },
2290     + {}
2291     +};
2292     +
2293     static struct dmi_system_id acer_quirks[] = {
2294     {
2295     .callback = dmi_matched,
2296     @@ -1254,6 +1273,12 @@ static int __init acer_wmi_init(void)
2297    
2298     printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n");
2299    
2300     + if (dmi_check_system(acer_blacklist)) {
2301     + printk(ACER_INFO "Blacklisted hardware detected - "
2302     + "not loading\n");
2303     + return -ENODEV;
2304     + }
2305     +
2306     find_quirks();
2307    
2308     /*
2309     diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
2310     index 809d32d..ca4467c 100644
2311     --- a/drivers/scsi/libiscsi.c
2312     +++ b/drivers/scsi/libiscsi.c
2313     @@ -1944,12 +1944,14 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2314     num_arrays++;
2315     q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
2316     if (q->pool == NULL)
2317     - goto enomem;
2318     + return -ENOMEM;
2319    
2320     q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
2321     GFP_KERNEL, NULL);
2322     - if (q->queue == ERR_PTR(-ENOMEM))
2323     + if (IS_ERR(q->queue)) {
2324     + q->queue = NULL;
2325     goto enomem;
2326     + }
2327    
2328     for (i = 0; i < max; i++) {
2329     q->pool[i] = kzalloc(item_size, GFP_KERNEL);
2330     @@ -1979,8 +1981,7 @@ void iscsi_pool_free(struct iscsi_pool *q)
2331    
2332     for (i = 0; i < q->max; i++)
2333     kfree(q->pool[i]);
2334     - if (q->pool)
2335     - kfree(q->pool);
2336     + kfree(q->pool);
2337     kfree(q->queue);
2338     }
2339     EXPORT_SYMBOL_GPL(iscsi_pool_free);
2340     diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
2341     index 516925d..5e390d2 100644
2342     --- a/drivers/scsi/sg.c
2343     +++ b/drivers/scsi/sg.c
2344     @@ -101,6 +101,7 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
2345     #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
2346    
2347     static int sg_add(struct device *, struct class_interface *);
2348     +static void sg_device_destroy(struct kref *kref);
2349     static void sg_remove(struct device *, struct class_interface *);
2350    
2351     static DEFINE_IDR(sg_index_idr);
2352     @@ -137,6 +138,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
2353     volatile char done; /* 0->before bh, 1->before read, 2->read */
2354     struct request *rq;
2355     struct bio *bio;
2356     + struct execute_work ew;
2357     } Sg_request;
2358    
2359     typedef struct sg_fd { /* holds the state of a file descriptor */
2360     @@ -158,6 +160,8 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
2361     char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
2362     char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
2363     char mmap_called; /* 0 -> mmap() never called on this fd */
2364     + struct kref f_ref;
2365     + struct execute_work ew;
2366     } Sg_fd;
2367    
2368     typedef struct sg_device { /* holds the state of each scsi generic device */
2369     @@ -171,6 +175,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
2370     char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
2371     struct gendisk *disk;
2372     struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
2373     + struct kref d_ref;
2374     } Sg_device;
2375    
2376     static int sg_fasync(int fd, struct file *filp, int mode);
2377     @@ -185,7 +190,7 @@ static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
2378     Sg_request * srp);
2379     static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
2380     const char __user *buf, size_t count, int blocking,
2381     - int read_only, Sg_request **o_srp);
2382     + int read_only, int sg_io_owned, Sg_request **o_srp);
2383     static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
2384     unsigned char *cmnd, int timeout, int blocking);
2385     static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
2386     @@ -194,13 +199,14 @@ static void sg_build_reserve(Sg_fd * sfp, int req_size);
2387     static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
2388     static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
2389     static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
2390     -static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
2391     -static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
2392     +static void sg_remove_sfp(struct kref *);
2393     static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
2394     static Sg_request *sg_add_request(Sg_fd * sfp);
2395     static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
2396     static int sg_res_in_use(Sg_fd * sfp);
2397     +static Sg_device *sg_lookup_dev(int dev);
2398     static Sg_device *sg_get_dev(int dev);
2399     +static void sg_put_dev(Sg_device *sdp);
2400     #ifdef CONFIG_SCSI_PROC_FS
2401     static int sg_last_dev(void);
2402     #endif
2403     @@ -237,22 +243,17 @@ sg_open(struct inode *inode, struct file *filp)
2404     nonseekable_open(inode, filp);
2405     SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
2406     sdp = sg_get_dev(dev);
2407     - if ((!sdp) || (!sdp->device)) {
2408     - unlock_kernel();
2409     - return -ENXIO;
2410     - }
2411     - if (sdp->detached) {
2412     - unlock_kernel();
2413     - return -ENODEV;
2414     + if (IS_ERR(sdp)) {
2415     + retval = PTR_ERR(sdp);
2416     + sdp = NULL;
2417     + goto sg_put;
2418     }
2419    
2420     /* This driver's module count bumped by fops_get in <linux/fs.h> */
2421     /* Prevent the device driver from vanishing while we sleep */
2422     retval = scsi_device_get(sdp->device);
2423     - if (retval) {
2424     - unlock_kernel();
2425     - return retval;
2426     - }
2427     + if (retval)
2428     + goto sg_put;
2429    
2430     if (!((flags & O_NONBLOCK) ||
2431     scsi_block_when_processing_errors(sdp->device))) {
2432     @@ -303,16 +304,20 @@ sg_open(struct inode *inode, struct file *filp)
2433     if ((sfp = sg_add_sfp(sdp, dev)))
2434     filp->private_data = sfp;
2435     else {
2436     - if (flags & O_EXCL)
2437     + if (flags & O_EXCL) {
2438     sdp->exclude = 0; /* undo if error */
2439     + wake_up_interruptible(&sdp->o_excl_wait);
2440     + }
2441     retval = -ENOMEM;
2442     goto error_out;
2443     }
2444     - unlock_kernel();
2445     - return 0;
2446     -
2447     - error_out:
2448     - scsi_device_put(sdp->device);
2449     + retval = 0;
2450     +error_out:
2451     + if (retval)
2452     + scsi_device_put(sdp->device);
2453     +sg_put:
2454     + if (sdp)
2455     + sg_put_dev(sdp);
2456     unlock_kernel();
2457     return retval;
2458     }
2459     @@ -327,13 +332,13 @@ sg_release(struct inode *inode, struct file *filp)
2460     if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
2461     return -ENXIO;
2462     SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
2463     - if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
2464     - if (!sdp->detached) {
2465     - scsi_device_put(sdp->device);
2466     - }
2467     - sdp->exclude = 0;
2468     - wake_up_interruptible(&sdp->o_excl_wait);
2469     - }
2470     +
2471     + sfp->closed = 1;
2472     +
2473     + sdp->exclude = 0;
2474     + wake_up_interruptible(&sdp->o_excl_wait);
2475     +
2476     + kref_put(&sfp->f_ref, sg_remove_sfp);
2477     return 0;
2478     }
2479    
2480     @@ -557,7 +562,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
2481     return -EFAULT;
2482     blocking = !(filp->f_flags & O_NONBLOCK);
2483     if (old_hdr.reply_len < 0)
2484     - return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
2485     + return sg_new_write(sfp, filp, buf, count,
2486     + blocking, 0, 0, NULL);
2487     if (count < (SZ_SG_HEADER + 6))
2488     return -EIO; /* The minimum scsi command length is 6 bytes. */
2489    
2490     @@ -638,7 +644,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
2491    
2492     static ssize_t
2493     sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
2494     - size_t count, int blocking, int read_only,
2495     + size_t count, int blocking, int read_only, int sg_io_owned,
2496     Sg_request **o_srp)
2497     {
2498     int k;
2499     @@ -658,6 +664,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
2500     SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
2501     return -EDOM;
2502     }
2503     + srp->sg_io_owned = sg_io_owned;
2504     hp = &srp->header;
2505     if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
2506     sg_remove_request(sfp, srp);
2507     @@ -755,24 +762,13 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
2508     hp->duration = jiffies_to_msecs(jiffies);
2509    
2510     srp->rq->timeout = timeout;
2511     + kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
2512     blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
2513     srp->rq, 1, sg_rq_end_io);
2514     return 0;
2515     }
2516    
2517     static int
2518     -sg_srp_done(Sg_request *srp, Sg_fd *sfp)
2519     -{
2520     - unsigned long iflags;
2521     - int done;
2522     -
2523     - read_lock_irqsave(&sfp->rq_list_lock, iflags);
2524     - done = srp->done;
2525     - read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2526     - return done;
2527     -}
2528     -
2529     -static int
2530     sg_ioctl(struct inode *inode, struct file *filp,
2531     unsigned int cmd_in, unsigned long arg)
2532     {
2533     @@ -804,27 +800,26 @@ sg_ioctl(struct inode *inode, struct file *filp,
2534     return -EFAULT;
2535     result =
2536     sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
2537     - blocking, read_only, &srp);
2538     + blocking, read_only, 1, &srp);
2539     if (result < 0)
2540     return result;
2541     - srp->sg_io_owned = 1;
2542     while (1) {
2543     result = 0; /* following macro to beat race condition */
2544     __wait_event_interruptible(sfp->read_wait,
2545     - (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
2546     - result);
2547     + (srp->done || sdp->detached),
2548     + result);
2549     if (sdp->detached)
2550     return -ENODEV;
2551     - if (sfp->closed)
2552     - return 0; /* request packet dropped already */
2553     - if (0 == result)
2554     + write_lock_irq(&sfp->rq_list_lock);
2555     + if (srp->done) {
2556     + srp->done = 2;
2557     + write_unlock_irq(&sfp->rq_list_lock);
2558     break;
2559     + }
2560     srp->orphan = 1;
2561     + write_unlock_irq(&sfp->rq_list_lock);
2562     return result; /* -ERESTARTSYS because signal hit process */
2563     }
2564     - write_lock_irqsave(&sfp->rq_list_lock, iflags);
2565     - srp->done = 2;
2566     - write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2567     result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
2568     return (result < 0) ? result : 0;
2569     }
2570     @@ -1240,6 +1235,15 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
2571     return 0;
2572     }
2573    
2574     +static void sg_rq_end_io_usercontext(struct work_struct *work)
2575     +{
2576     + struct sg_request *srp = container_of(work, struct sg_request, ew.work);
2577     + struct sg_fd *sfp = srp->parentfp;
2578     +
2579     + sg_finish_rem_req(srp);
2580     + kref_put(&sfp->f_ref, sg_remove_sfp);
2581     +}
2582     +
2583     /*
2584     * This function is a "bottom half" handler that is called by the mid
2585     * level when a command is completed (or has failed).
2586     @@ -1247,24 +1251,23 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
2587     static void sg_rq_end_io(struct request *rq, int uptodate)
2588     {
2589     struct sg_request *srp = rq->end_io_data;
2590     - Sg_device *sdp = NULL;
2591     + Sg_device *sdp;
2592     Sg_fd *sfp;
2593     unsigned long iflags;
2594     unsigned int ms;
2595     char *sense;
2596     - int result, resid;
2597     + int result, resid, done = 1;
2598    
2599     - if (NULL == srp) {
2600     - printk(KERN_ERR "sg_cmd_done: NULL request\n");
2601     + if (WARN_ON(srp->done != 0))
2602     return;
2603     - }
2604     +
2605     sfp = srp->parentfp;
2606     - if (sfp)
2607     - sdp = sfp->parentdp;
2608     - if ((NULL == sdp) || sdp->detached) {
2609     - printk(KERN_INFO "sg_cmd_done: device detached\n");
2610     + if (WARN_ON(sfp == NULL))
2611     return;
2612     - }
2613     +
2614     + sdp = sfp->parentdp;
2615     + if (unlikely(sdp->detached))
2616     + printk(KERN_INFO "sg_rq_end_io: device detached\n");
2617    
2618     sense = rq->sense;
2619     result = rq->errors;
2620     @@ -1303,32 +1306,26 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
2621     }
2622     /* Rely on write phase to clean out srp status values, so no "else" */
2623    
2624     - if (sfp->closed) { /* whoops this fd already released, cleanup */
2625     - SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
2626     - sg_finish_rem_req(srp);
2627     - srp = NULL;
2628     - if (NULL == sfp->headrp) {
2629     - SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n"));
2630     - if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
2631     - scsi_device_put(sdp->device);
2632     - }
2633     - sfp = NULL;
2634     - }
2635     - } else if (srp && srp->orphan) {
2636     + write_lock_irqsave(&sfp->rq_list_lock, iflags);
2637     + if (unlikely(srp->orphan)) {
2638     if (sfp->keep_orphan)
2639     srp->sg_io_owned = 0;
2640     - else {
2641     - sg_finish_rem_req(srp);
2642     - srp = NULL;
2643     - }
2644     + else
2645     + done = 0;
2646     }
2647     - if (sfp && srp) {
2648     - /* Now wake up any sg_read() that is waiting for this packet. */
2649     - kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
2650     - write_lock_irqsave(&sfp->rq_list_lock, iflags);
2651     - srp->done = 1;
2652     + srp->done = done;
2653     + write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2654     +
2655     + if (likely(done)) {
2656     + /* Now wake up any sg_read() that is waiting for this
2657     + * packet.
2658     + */
2659     wake_up_interruptible(&sfp->read_wait);
2660     - write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2661     + kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
2662     + kref_put(&sfp->f_ref, sg_remove_sfp);
2663     + } else {
2664     + INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
2665     + schedule_work(&srp->ew.work);
2666     }
2667     }
2668    
2669     @@ -1364,17 +1361,18 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
2670     printk(KERN_WARNING "kmalloc Sg_device failure\n");
2671     return ERR_PTR(-ENOMEM);
2672     }
2673     - error = -ENOMEM;
2674     +
2675     if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
2676     printk(KERN_WARNING "idr expansion Sg_device failure\n");
2677     + error = -ENOMEM;
2678     goto out;
2679     }
2680    
2681     write_lock_irqsave(&sg_index_lock, iflags);
2682     - error = idr_get_new(&sg_index_idr, sdp, &k);
2683     - write_unlock_irqrestore(&sg_index_lock, iflags);
2684    
2685     + error = idr_get_new(&sg_index_idr, sdp, &k);
2686     if (error) {
2687     + write_unlock_irqrestore(&sg_index_lock, iflags);
2688     printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
2689     error);
2690     goto out;
2691     @@ -1391,6 +1389,9 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
2692     init_waitqueue_head(&sdp->o_excl_wait);
2693     sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
2694     sdp->index = k;
2695     + kref_init(&sdp->d_ref);
2696     +
2697     + write_unlock_irqrestore(&sg_index_lock, iflags);
2698    
2699     error = 0;
2700     out:
2701     @@ -1401,6 +1402,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
2702     return sdp;
2703    
2704     overflow:
2705     + idr_remove(&sg_index_idr, k);
2706     + write_unlock_irqrestore(&sg_index_lock, iflags);
2707     sdev_printk(KERN_WARNING, scsidp,
2708     "Unable to attach sg device type=%d, minor "
2709     "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
2710     @@ -1488,49 +1491,46 @@ out:
2711     return error;
2712     }
2713    
2714     -static void
2715     -sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
2716     +static void sg_device_destroy(struct kref *kref)
2717     +{
2718     + struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
2719     + unsigned long flags;
2720     +
2721     + /* CAUTION! Note that the device can still be found via idr_find()
2722     + * even though the refcount is 0. Therefore, do idr_remove() BEFORE
2723     + * any other cleanup.
2724     + */
2725     +
2726     + write_lock_irqsave(&sg_index_lock, flags);
2727     + idr_remove(&sg_index_idr, sdp->index);
2728     + write_unlock_irqrestore(&sg_index_lock, flags);
2729     +
2730     + SCSI_LOG_TIMEOUT(3,
2731     + printk("sg_device_destroy: %s\n",
2732     + sdp->disk->disk_name));
2733     +
2734     + put_disk(sdp->disk);
2735     + kfree(sdp);
2736     +}
2737     +
2738     +static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
2739     {
2740     struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
2741     Sg_device *sdp = dev_get_drvdata(cl_dev);
2742     unsigned long iflags;
2743     Sg_fd *sfp;
2744     - Sg_fd *tsfp;
2745     - Sg_request *srp;
2746     - Sg_request *tsrp;
2747     - int delay;
2748    
2749     - if (!sdp)
2750     + if (!sdp || sdp->detached)
2751     return;
2752    
2753     - delay = 0;
2754     + SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
2755     +
2756     + /* Need a write lock to set sdp->detached. */
2757     write_lock_irqsave(&sg_index_lock, iflags);
2758     - if (sdp->headfp) {
2759     - sdp->detached = 1;
2760     - for (sfp = sdp->headfp; sfp; sfp = tsfp) {
2761     - tsfp = sfp->nextfp;
2762     - for (srp = sfp->headrp; srp; srp = tsrp) {
2763     - tsrp = srp->nextrp;
2764     - if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
2765     - sg_finish_rem_req(srp);
2766     - }
2767     - if (sfp->closed) {
2768     - scsi_device_put(sdp->device);
2769     - __sg_remove_sfp(sdp, sfp);
2770     - } else {
2771     - delay = 1;
2772     - wake_up_interruptible(&sfp->read_wait);
2773     - kill_fasync(&sfp->async_qp, SIGPOLL,
2774     - POLL_HUP);
2775     - }
2776     - }
2777     - SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index));
2778     - if (NULL == sdp->headfp) {
2779     - idr_remove(&sg_index_idr, sdp->index);
2780     - }
2781     - } else { /* nothing active, simple case */
2782     - SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index));
2783     - idr_remove(&sg_index_idr, sdp->index);
2784     + sdp->detached = 1;
2785     + for (sfp = sdp->headfp; sfp; sfp = sfp->nextfp) {
2786     + wake_up_interruptible(&sfp->read_wait);
2787     + kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
2788     }
2789     write_unlock_irqrestore(&sg_index_lock, iflags);
2790    
2791     @@ -1538,13 +1538,8 @@ sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
2792     device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
2793     cdev_del(sdp->cdev);
2794     sdp->cdev = NULL;
2795     - put_disk(sdp->disk);
2796     - sdp->disk = NULL;
2797     - if (NULL == sdp->headfp)
2798     - kfree(sdp);
2799    
2800     - if (delay)
2801     - msleep(10); /* dirty detach so delay device destruction */
2802     + sg_put_dev(sdp);
2803     }
2804    
2805     module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
2806     @@ -1673,10 +1668,30 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
2807     md->null_mapped = hp->dxferp ? 0 : 1;
2808     }
2809    
2810     - if (iov_count)
2811     - res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
2812     - hp->dxfer_len, GFP_ATOMIC);
2813     - else
2814     + if (iov_count) {
2815     + int len, size = sizeof(struct sg_iovec) * iov_count;
2816     + struct iovec *iov;
2817     +
2818     + iov = kmalloc(size, GFP_ATOMIC);
2819     + if (!iov)
2820     + return -ENOMEM;
2821     +
2822     + if (copy_from_user(iov, hp->dxferp, size)) {
2823     + kfree(iov);
2824     + return -EFAULT;
2825     + }
2826     +
2827     + len = iov_length(iov, iov_count);
2828     + if (hp->dxfer_len < len) {
2829     + iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
2830     + len = hp->dxfer_len;
2831     + }
2832     +
2833     + res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
2834     + iov_count,
2835     + len, GFP_ATOMIC);
2836     + kfree(iov);
2837     + } else
2838     res = blk_rq_map_user(q, rq, md, hp->dxferp,
2839     hp->dxfer_len, GFP_ATOMIC);
2840    
2841     @@ -1941,22 +1956,6 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2842     return resp;
2843     }
2844    
2845     -#ifdef CONFIG_SCSI_PROC_FS
2846     -static Sg_request *
2847     -sg_get_nth_request(Sg_fd * sfp, int nth)
2848     -{
2849     - Sg_request *resp;
2850     - unsigned long iflags;
2851     - int k;
2852     -
2853     - read_lock_irqsave(&sfp->rq_list_lock, iflags);
2854     - for (k = 0, resp = sfp->headrp; resp && (k < nth);
2855     - ++k, resp = resp->nextrp) ;
2856     - read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2857     - return resp;
2858     -}
2859     -#endif
2860     -
2861     /* always adds to end of list */
2862     static Sg_request *
2863     sg_add_request(Sg_fd * sfp)
2864     @@ -2032,22 +2031,6 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2865     return res;
2866     }
2867    
2868     -#ifdef CONFIG_SCSI_PROC_FS
2869     -static Sg_fd *
2870     -sg_get_nth_sfp(Sg_device * sdp, int nth)
2871     -{
2872     - Sg_fd *resp;
2873     - unsigned long iflags;
2874     - int k;
2875     -
2876     - read_lock_irqsave(&sg_index_lock, iflags);
2877     - for (k = 0, resp = sdp->headfp; resp && (k < nth);
2878     - ++k, resp = resp->nextfp) ;
2879     - read_unlock_irqrestore(&sg_index_lock, iflags);
2880     - return resp;
2881     -}
2882     -#endif
2883     -
2884     static Sg_fd *
2885     sg_add_sfp(Sg_device * sdp, int dev)
2886     {
2887     @@ -2062,6 +2045,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
2888     init_waitqueue_head(&sfp->read_wait);
2889     rwlock_init(&sfp->rq_list_lock);
2890    
2891     + kref_init(&sfp->f_ref);
2892     sfp->timeout = SG_DEFAULT_TIMEOUT;
2893     sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2894     sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2895     @@ -2089,15 +2073,54 @@ sg_add_sfp(Sg_device * sdp, int dev)
2896     sg_build_reserve(sfp, bufflen);
2897     SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2898     sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2899     +
2900     + kref_get(&sdp->d_ref);
2901     + __module_get(THIS_MODULE);
2902     return sfp;
2903     }
2904    
2905     -static void
2906     -__sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2907     +static void sg_remove_sfp_usercontext(struct work_struct *work)
2908     +{
2909     + struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2910     + struct sg_device *sdp = sfp->parentdp;
2911     +
2912     + /* Cleanup any responses which were never read(). */
2913     + while (sfp->headrp)
2914     + sg_finish_rem_req(sfp->headrp);
2915     +
2916     + if (sfp->reserve.bufflen > 0) {
2917     + SCSI_LOG_TIMEOUT(6,
2918     + printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2919     + (int) sfp->reserve.bufflen,
2920     + (int) sfp->reserve.k_use_sg));
2921     + sg_remove_scat(&sfp->reserve);
2922     + }
2923     +
2924     + SCSI_LOG_TIMEOUT(6,
2925     + printk("sg_remove_sfp: %s, sfp=0x%p\n",
2926     + sdp->disk->disk_name,
2927     + sfp));
2928     + kfree(sfp);
2929     +
2930     + scsi_device_put(sdp->device);
2931     + sg_put_dev(sdp);
2932     + module_put(THIS_MODULE);
2933     +}
2934     +
2935     +static void sg_remove_sfp(struct kref *kref)
2936     {
2937     + struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2938     + struct sg_device *sdp = sfp->parentdp;
2939     Sg_fd *fp;
2940     Sg_fd *prev_fp;
2941     + unsigned long iflags;
2942    
2943     + /* CAUTION! Note that sfp can still be found by walking sdp->headfp
2944     + * even though the refcount is now 0. Therefore, unlink sfp from
2945     + * sdp->headfp BEFORE doing any other cleanup.
2946     + */
2947     +
2948     + write_lock_irqsave(&sg_index_lock, iflags);
2949     prev_fp = sdp->headfp;
2950     if (sfp == prev_fp)
2951     sdp->headfp = prev_fp->nextfp;
2952     @@ -2110,54 +2133,11 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2953     prev_fp = fp;
2954     }
2955     }
2956     - if (sfp->reserve.bufflen > 0) {
2957     - SCSI_LOG_TIMEOUT(6,
2958     - printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2959     - (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2960     - sg_remove_scat(&sfp->reserve);
2961     - }
2962     - sfp->parentdp = NULL;
2963     - SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
2964     - kfree(sfp);
2965     -}
2966     -
2967     -/* Returns 0 in normal case, 1 when detached and sdp object removed */
2968     -static int
2969     -sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2970     -{
2971     - Sg_request *srp;
2972     - Sg_request *tsrp;
2973     - int dirty = 0;
2974     - int res = 0;
2975     -
2976     - for (srp = sfp->headrp; srp; srp = tsrp) {
2977     - tsrp = srp->nextrp;
2978     - if (sg_srp_done(srp, sfp))
2979     - sg_finish_rem_req(srp);
2980     - else
2981     - ++dirty;
2982     - }
2983     - if (0 == dirty) {
2984     - unsigned long iflags;
2985     + write_unlock_irqrestore(&sg_index_lock, iflags);
2986     + wake_up_interruptible(&sdp->o_excl_wait);
2987    
2988     - write_lock_irqsave(&sg_index_lock, iflags);
2989     - __sg_remove_sfp(sdp, sfp);
2990     - if (sdp->detached && (NULL == sdp->headfp)) {
2991     - idr_remove(&sg_index_idr, sdp->index);
2992     - kfree(sdp);
2993     - res = 1;
2994     - }
2995     - write_unlock_irqrestore(&sg_index_lock, iflags);
2996     - } else {
2997     - /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2998     - /* only bump the access_count if we actually succeeded in
2999     - * throwing another counter on the host module */
3000     - scsi_device_get(sdp->device); /* XXX: retval ignored? */
3001     - sfp->closed = 1; /* flag dirty state on this fd */
3002     - SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
3003     - dirty));
3004     - }
3005     - return res;
3006     + INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
3007     + schedule_work(&sfp->ew.work);
3008     }
3009    
3010     static int
3011     @@ -2199,19 +2179,38 @@ sg_last_dev(void)
3012     }
3013     #endif
3014    
3015     -static Sg_device *
3016     -sg_get_dev(int dev)
3017     +/* must be called with sg_index_lock held */
3018     +static Sg_device *sg_lookup_dev(int dev)
3019     {
3020     - Sg_device *sdp;
3021     - unsigned long iflags;
3022     + return idr_find(&sg_index_idr, dev);
3023     +}
3024    
3025     - read_lock_irqsave(&sg_index_lock, iflags);
3026     - sdp = idr_find(&sg_index_idr, dev);
3027     - read_unlock_irqrestore(&sg_index_lock, iflags);
3028     +static Sg_device *sg_get_dev(int dev)
3029     +{
3030     + struct sg_device *sdp;
3031     + unsigned long flags;
3032     +
3033     + read_lock_irqsave(&sg_index_lock, flags);
3034     + sdp = sg_lookup_dev(dev);
3035     + if (!sdp)
3036     + sdp = ERR_PTR(-ENXIO);
3037     + else if (sdp->detached) {
3038     + /* If sdp->detached, then the refcount may already be 0, in
3039     + * which case it would be a bug to do kref_get().
3040     + */
3041     + sdp = ERR_PTR(-ENODEV);
3042     + } else
3043     + kref_get(&sdp->d_ref);
3044     + read_unlock_irqrestore(&sg_index_lock, flags);
3045    
3046     return sdp;
3047     }
3048    
3049     +static void sg_put_dev(struct sg_device *sdp)
3050     +{
3051     + kref_put(&sdp->d_ref, sg_device_destroy);
3052     +}
3053     +
3054     #ifdef CONFIG_SCSI_PROC_FS
3055    
3056     static struct proc_dir_entry *sg_proc_sgp = NULL;
3057     @@ -2468,8 +2467,10 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
3058     struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3059     Sg_device *sdp;
3060     struct scsi_device *scsidp;
3061     + unsigned long iflags;
3062    
3063     - sdp = it ? sg_get_dev(it->index) : NULL;
3064     + read_lock_irqsave(&sg_index_lock, iflags);
3065     + sdp = it ? sg_lookup_dev(it->index) : NULL;
3066     if (sdp && (scsidp = sdp->device) && (!sdp->detached))
3067     seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
3068     scsidp->host->host_no, scsidp->channel,
3069     @@ -2480,6 +2481,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
3070     (int) scsi_device_online(scsidp));
3071     else
3072     seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
3073     + read_unlock_irqrestore(&sg_index_lock, iflags);
3074     return 0;
3075     }
3076    
3077     @@ -2493,16 +2495,20 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
3078     struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3079     Sg_device *sdp;
3080     struct scsi_device *scsidp;
3081     + unsigned long iflags;
3082    
3083     - sdp = it ? sg_get_dev(it->index) : NULL;
3084     + read_lock_irqsave(&sg_index_lock, iflags);
3085     + sdp = it ? sg_lookup_dev(it->index) : NULL;
3086     if (sdp && (scsidp = sdp->device) && (!sdp->detached))
3087     seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
3088     scsidp->vendor, scsidp->model, scsidp->rev);
3089     else
3090     seq_printf(s, "<no active device>\n");
3091     + read_unlock_irqrestore(&sg_index_lock, iflags);
3092     return 0;
3093     }
3094    
3095     +/* must be called while holding sg_index_lock */
3096     static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
3097     {
3098     int k, m, new_interface, blen, usg;
3099     @@ -2512,7 +2518,8 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
3100     const char * cp;
3101     unsigned int ms;
3102    
3103     - for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
3104     + for (k = 0, fp = sdp->headfp; fp != NULL; ++k, fp = fp->nextfp) {
3105     + read_lock(&fp->rq_list_lock); /* irqs already disabled */
3106     seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
3107     "(res)sgat=%d low_dma=%d\n", k + 1,
3108     jiffies_to_msecs(fp->timeout),
3109     @@ -2522,7 +2529,9 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
3110     seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
3111     (int) fp->cmd_q, (int) fp->force_packid,
3112     (int) fp->keep_orphan, (int) fp->closed);
3113     - for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
3114     + for (m = 0, srp = fp->headrp;
3115     + srp != NULL;
3116     + ++m, srp = srp->nextrp) {
3117     hp = &srp->header;
3118     new_interface = (hp->interface_id == '\0') ? 0 : 1;
3119     if (srp->res_used) {
3120     @@ -2559,6 +2568,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
3121     }
3122     if (0 == m)
3123     seq_printf(s, " No requests active\n");
3124     + read_unlock(&fp->rq_list_lock);
3125     }
3126     }
3127    
3128     @@ -2571,39 +2581,34 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
3129     {
3130     struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
3131     Sg_device *sdp;
3132     + unsigned long iflags;
3133    
3134     if (it && (0 == it->index)) {
3135     seq_printf(s, "max_active_device=%d(origin 1)\n",
3136     (int)it->max);
3137     seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
3138     }
3139     - sdp = it ? sg_get_dev(it->index) : NULL;
3140     - if (sdp) {
3141     - struct scsi_device *scsidp = sdp->device;
3142    
3143     - if (NULL == scsidp) {
3144     - seq_printf(s, "device %d detached ??\n",
3145     - (int)it->index);
3146     - return 0;
3147     - }
3148     + read_lock_irqsave(&sg_index_lock, iflags);
3149     + sdp = it ? sg_lookup_dev(it->index) : NULL;
3150     + if (sdp && sdp->headfp) {
3151     + struct scsi_device *scsidp = sdp->device;
3152    
3153     - if (sg_get_nth_sfp(sdp, 0)) {
3154     - seq_printf(s, " >>> device=%s ",
3155     - sdp->disk->disk_name);
3156     - if (sdp->detached)
3157     - seq_printf(s, "detached pending close ");
3158     - else
3159     - seq_printf
3160     - (s, "scsi%d chan=%d id=%d lun=%d em=%d",
3161     - scsidp->host->host_no,
3162     - scsidp->channel, scsidp->id,
3163     - scsidp->lun,
3164     - scsidp->host->hostt->emulated);
3165     - seq_printf(s, " sg_tablesize=%d excl=%d\n",
3166     - sdp->sg_tablesize, sdp->exclude);
3167     - }
3168     + seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
3169     + if (sdp->detached)
3170     + seq_printf(s, "detached pending close ");
3171     + else
3172     + seq_printf
3173     + (s, "scsi%d chan=%d id=%d lun=%d em=%d",
3174     + scsidp->host->host_no,
3175     + scsidp->channel, scsidp->id,
3176     + scsidp->lun,
3177     + scsidp->host->hostt->emulated);
3178     + seq_printf(s, " sg_tablesize=%d excl=%d\n",
3179     + sdp->sg_tablesize, sdp->exclude);
3180     sg_proc_debug_helper(s, sdp);
3181     }
3182     + read_unlock_irqrestore(&sg_index_lock, iflags);
3183     return 0;
3184     }
3185    
3186     diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
3187     index 643908b..8eba98c 100644
3188     --- a/drivers/spi/spi.c
3189     +++ b/drivers/spi/spi.c
3190     @@ -658,7 +658,7 @@ int spi_write_then_read(struct spi_device *spi,
3191    
3192     int status;
3193     struct spi_message message;
3194     - struct spi_transfer x;
3195     + struct spi_transfer x[2];
3196     u8 *local_buf;
3197    
3198     /* Use preallocated DMA-safe buffer. We can't avoid copying here,
3199     @@ -669,9 +669,15 @@ int spi_write_then_read(struct spi_device *spi,
3200     return -EINVAL;
3201    
3202     spi_message_init(&message);
3203     - memset(&x, 0, sizeof x);
3204     - x.len = n_tx + n_rx;
3205     - spi_message_add_tail(&x, &message);
3206     + memset(x, 0, sizeof x);
3207     + if (n_tx) {
3208     + x[0].len = n_tx;
3209     + spi_message_add_tail(&x[0], &message);
3210     + }
3211     + if (n_rx) {
3212     + x[1].len = n_rx;
3213     + spi_message_add_tail(&x[1], &message);
3214     + }
3215    
3216     /* ... unless someone else is using the pre-allocated buffer */
3217     if (!mutex_trylock(&lock)) {
3218     @@ -682,15 +688,15 @@ int spi_write_then_read(struct spi_device *spi,
3219     local_buf = buf;
3220    
3221     memcpy(local_buf, txbuf, n_tx);
3222     - x.tx_buf = local_buf;
3223     - x.rx_buf = local_buf;
3224     + x[0].tx_buf = local_buf;
3225     + x[1].rx_buf = local_buf + n_tx;
3226    
3227     /* do the i/o */
3228     status = spi_sync(spi, &message);
3229     if (status == 0)
3230     - memcpy(rxbuf, x.rx_buf + n_tx, n_rx);
3231     + memcpy(rxbuf, x[1].rx_buf, n_rx);
3232    
3233     - if (x.tx_buf == buf)
3234     + if (x[0].tx_buf == buf)
3235     mutex_unlock(&lock);
3236     else
3237     kfree(local_buf);
3238     diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
3239     index 3771d6e..34e6108 100644
3240     --- a/drivers/usb/class/cdc-wdm.c
3241     +++ b/drivers/usb/class/cdc-wdm.c
3242     @@ -652,7 +652,7 @@ next_desc:
3243    
3244     iface = &intf->altsetting[0];
3245     ep = &iface->endpoint[0].desc;
3246     - if (!usb_endpoint_is_int_in(ep)) {
3247     + if (!ep || !usb_endpoint_is_int_in(ep)) {
3248     rv = -EINVAL;
3249     goto err;
3250     }
3251     diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
3252     index 96d65ca..4007770 100644
3253     --- a/drivers/usb/gadget/u_ether.c
3254     +++ b/drivers/usb/gadget/u_ether.c
3255     @@ -175,12 +175,6 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
3256     strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
3257     }
3258    
3259     -static u32 eth_get_link(struct net_device *net)
3260     -{
3261     - struct eth_dev *dev = netdev_priv(net);
3262     - return dev->gadget->speed != USB_SPEED_UNKNOWN;
3263     -}
3264     -
3265     /* REVISIT can also support:
3266     * - WOL (by tracking suspends and issuing remote wakeup)
3267     * - msglevel (implies updated messaging)
3268     @@ -189,7 +183,7 @@ static u32 eth_get_link(struct net_device *net)
3269    
3270     static struct ethtool_ops ops = {
3271     .get_drvinfo = eth_get_drvinfo,
3272     - .get_link = eth_get_link
3273     + .get_link = ethtool_op_get_link,
3274     };
3275    
3276     static void defer_kevent(struct eth_dev *dev, int flag)
3277     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3278     index ae84c32..bb3143e 100644
3279     --- a/drivers/usb/serial/ftdi_sio.c
3280     +++ b/drivers/usb/serial/ftdi_sio.c
3281     @@ -668,6 +668,7 @@ static struct usb_device_id id_table_combined [] = {
3282     { USB_DEVICE(DE_VID, WHT_PID) },
3283     { USB_DEVICE(ADI_VID, ADI_GNICE_PID),
3284     .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
3285     + { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
3286     { }, /* Optional parameter entry */
3287     { } /* Terminating entry */
3288     };
3289     diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
3290     index daaf63d..c09f658 100644
3291     --- a/drivers/usb/serial/ftdi_sio.h
3292     +++ b/drivers/usb/serial/ftdi_sio.h
3293     @@ -913,6 +913,13 @@
3294     #define ADI_GNICE_PID 0xF000
3295    
3296     /*
3297     + * JETI SPECTROMETER SPECBOS 1201
3298     + * http://www.jeti.com/products/sys/scb/scb1201.php
3299     + */
3300     +#define JETI_VID 0x0c6c
3301     +#define JETI_SPC1201_PID 0x04b2
3302     +
3303     +/*
3304     * BmRequestType: 1100 0000b
3305     * bRequest: FTDI_E2_READ
3306     * wValue: 0
3307     diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
3308     index 2620bf6..9c4c700 100644
3309     --- a/drivers/usb/serial/ti_usb_3410_5052.c
3310     +++ b/drivers/usb/serial/ti_usb_3410_5052.c
3311     @@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct urb *urb)
3312     }
3313    
3314     tty = tty_port_tty_get(&port->port);
3315     - if (tty && urb->actual_length) {
3316     - usb_serial_debug_data(debug, dev, __func__,
3317     - urb->actual_length, urb->transfer_buffer);
3318     -
3319     - if (!tport->tp_is_open)
3320     - dbg("%s - port closed, dropping data", __func__);
3321     - else
3322     - ti_recv(&urb->dev->dev, tty,
3323     + if (tty) {
3324     + if (urb->actual_length) {
3325     + usb_serial_debug_data(debug, dev, __func__,
3326     + urb->actual_length, urb->transfer_buffer);
3327     +
3328     + if (!tport->tp_is_open)
3329     + dbg("%s - port closed, dropping data",
3330     + __func__);
3331     + else
3332     + ti_recv(&urb->dev->dev, tty,
3333     urb->transfer_buffer,
3334     urb->actual_length);
3335     -
3336     - spin_lock(&tport->tp_lock);
3337     - tport->tp_icount.rx += urb->actual_length;
3338     - spin_unlock(&tport->tp_lock);
3339     + spin_lock(&tport->tp_lock);
3340     + tport->tp_icount.rx += urb->actual_length;
3341     + spin_unlock(&tport->tp_lock);
3342     + }
3343     tty_kref_put(tty);
3344     }
3345    
3346     diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
3347     index cfde74a..0f54399 100644
3348     --- a/drivers/usb/storage/unusual_devs.h
3349     +++ b/drivers/usb/storage/unusual_devs.h
3350     @@ -1218,12 +1218,14 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
3351     US_SC_DEVICE, US_PR_DEVICE, NULL,
3352     US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ),
3353    
3354     -/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
3355     +/* Reported by Rauch Wolke <rauchwolke@gmx.net>
3356     + * and augmented by binbin <binbinsh@gmail.com> (Bugzilla #12882)
3357     + */
3358     UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
3359     "Simple Tech/Datafab",
3360     "CF+SM Reader",
3361     US_SC_DEVICE, US_PR_DEVICE, NULL,
3362     - US_FL_IGNORE_RESIDUE ),
3363     + US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ),
3364    
3365     /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
3366     * to the USB storage specification in two ways:
3367     diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
3368     index 1657b96..471a9a6 100644
3369     --- a/drivers/video/console/fbcon.c
3370     +++ b/drivers/video/console/fbcon.c
3371     @@ -2263,9 +2263,12 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
3372     }
3373    
3374    
3375     + if (!lock_fb_info(info))
3376     + return;
3377     event.info = info;
3378     event.data = &blank;
3379     fb_notifier_call_chain(FB_EVENT_CONBLANK, &event);
3380     + unlock_fb_info(info);
3381     }
3382    
3383     static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
3384     @@ -2954,8 +2957,9 @@ static int fbcon_fb_unbind(int idx)
3385    
3386     static int fbcon_fb_unregistered(struct fb_info *info)
3387     {
3388     - int i, idx = info->node;
3389     + int i, idx;
3390    
3391     + idx = info->node;
3392     for (i = first_fb_vc; i <= last_fb_vc; i++) {
3393     if (con2fb_map[i] == idx)
3394     con2fb_map[i] = -1;
3395     @@ -2979,13 +2983,12 @@ static int fbcon_fb_unregistered(struct fb_info *info)
3396     }
3397     }
3398    
3399     - if (!num_registered_fb)
3400     - unregister_con_driver(&fb_con);
3401     -
3402     -
3403     if (primary_device == idx)
3404     primary_device = -1;
3405    
3406     + if (!num_registered_fb)
3407     + unregister_con_driver(&fb_con);
3408     +
3409     return 0;
3410     }
3411    
3412     @@ -3021,8 +3024,9 @@ static inline void fbcon_select_primary(struct fb_info *info)
3413    
3414     static int fbcon_fb_registered(struct fb_info *info)
3415     {
3416     - int ret = 0, i, idx = info->node;
3417     + int ret = 0, i, idx;
3418    
3419     + idx = info->node;
3420     fbcon_select_primary(info);
3421    
3422     if (info_idx == -1) {
3423     @@ -3124,7 +3128,7 @@ static void fbcon_get_requirement(struct fb_info *info,
3424     }
3425     }
3426    
3427     -static int fbcon_event_notify(struct notifier_block *self,
3428     +static int fbcon_event_notify(struct notifier_block *self,
3429     unsigned long action, void *data)
3430     {
3431     struct fb_event *event = data;
3432     @@ -3132,7 +3136,7 @@ static int fbcon_event_notify(struct notifier_block *self,
3433     struct fb_videomode *mode;
3434     struct fb_con2fbmap *con2fb;
3435     struct fb_blit_caps *caps;
3436     - int ret = 0;
3437     + int idx, ret = 0;
3438    
3439     /*
3440     * ignore all events except driver registration and deregistration
3441     @@ -3160,7 +3164,8 @@ static int fbcon_event_notify(struct notifier_block *self,
3442     ret = fbcon_mode_deleted(info, mode);
3443     break;
3444     case FB_EVENT_FB_UNBIND:
3445     - ret = fbcon_fb_unbind(info->node);
3446     + idx = info->node;
3447     + ret = fbcon_fb_unbind(idx);
3448     break;
3449     case FB_EVENT_FB_REGISTERED:
3450     ret = fbcon_fb_registered(info);
3451     @@ -3188,7 +3193,6 @@ static int fbcon_event_notify(struct notifier_block *self,
3452     fbcon_get_requirement(info, caps);
3453     break;
3454     }
3455     -
3456     done:
3457     return ret;
3458     }
3459     diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
3460     index cfd9dce..1d6fb41 100644
3461     --- a/drivers/video/fbmem.c
3462     +++ b/drivers/video/fbmem.c
3463     @@ -1086,13 +1086,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
3464     return -EINVAL;
3465     con2fb.framebuffer = -1;
3466     event.data = &con2fb;
3467     -
3468     if (!lock_fb_info(info))
3469     return -ENODEV;
3470     event.info = info;
3471     fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
3472     unlock_fb_info(info);
3473     -
3474     ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
3475     break;
3476     case FBIOPUT_CON2FBMAP:
3477     @@ -1112,8 +1110,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
3478     if (!lock_fb_info(info))
3479     return -ENODEV;
3480     event.info = info;
3481     - ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP,
3482     - &event);
3483     + ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
3484     unlock_fb_info(info);
3485     break;
3486     case FBIOBLANK:
3487     @@ -1519,7 +1516,10 @@ register_framebuffer(struct fb_info *fb_info)
3488     registered_fb[i] = fb_info;
3489    
3490     event.info = fb_info;
3491     + if (!lock_fb_info(fb_info))
3492     + return -ENODEV;
3493     fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
3494     + unlock_fb_info(fb_info);
3495     return 0;
3496     }
3497    
3498     @@ -1553,8 +1553,12 @@ unregister_framebuffer(struct fb_info *fb_info)
3499     goto done;
3500     }
3501    
3502     +
3503     + if (!lock_fb_info(fb_info))
3504     + return -ENODEV;
3505     event.info = fb_info;
3506     ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
3507     + unlock_fb_info(fb_info);
3508    
3509     if (ret) {
3510     ret = -EINVAL;
3511     @@ -1588,6 +1592,8 @@ void fb_set_suspend(struct fb_info *info, int state)
3512     {
3513     struct fb_event event;
3514    
3515     + if (!lock_fb_info(info))
3516     + return;
3517     event.info = info;
3518     if (state) {
3519     fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
3520     @@ -1596,6 +1602,7 @@ void fb_set_suspend(struct fb_info *info, int state)
3521     info->state = FBINFO_STATE_RUNNING;
3522     fb_notifier_call_chain(FB_EVENT_RESUME, &event);
3523     }
3524     + unlock_fb_info(info);
3525     }
3526    
3527     /**
3528     @@ -1665,8 +1672,11 @@ int fb_new_modelist(struct fb_info *info)
3529     err = 1;
3530    
3531     if (!list_empty(&info->modelist)) {
3532     + if (!lock_fb_info(info))
3533     + return -ENODEV;
3534     event.info = info;
3535     err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
3536     + unlock_fb_info(info);
3537     }
3538    
3539     return err;
3540     diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
3541     index 5926826..9c76a06 100644
3542     --- a/drivers/virtio/virtio_balloon.c
3543     +++ b/drivers/virtio/virtio_balloon.c
3544     @@ -190,7 +190,8 @@ static int balloon(void *_vballoon)
3545     try_to_freeze();
3546     wait_event_interruptible(vb->config_change,
3547     (diff = towards_target(vb)) != 0
3548     - || kthread_should_stop());
3549     + || kthread_should_stop()
3550     + || freezing(current));
3551     if (diff > 0)
3552     fill_balloon(vb, diff);
3553     else if (diff < 0)
3554     diff --git a/fs/dquot.c b/fs/dquot.c
3555     index bca3cac..5a0059d 100644
3556     --- a/fs/dquot.c
3557     +++ b/fs/dquot.c
3558     @@ -793,7 +793,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
3559     continue;
3560     if (!dqinit_needed(inode, type))
3561     continue;
3562     - if (inode->i_state & (I_FREEING|I_WILL_FREE))
3563     + if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
3564     continue;
3565    
3566     __iget(inode);
3567     diff --git a/fs/drop_caches.c b/fs/drop_caches.c
3568     index 3e5637f..f7e66c0 100644
3569     --- a/fs/drop_caches.c
3570     +++ b/fs/drop_caches.c
3571     @@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb)
3572    
3573     spin_lock(&inode_lock);
3574     list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
3575     - if (inode->i_state & (I_FREEING|I_WILL_FREE))
3576     + if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
3577     continue;
3578     if (inode->i_mapping->nrpages == 0)
3579     continue;
3580     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3581     index 9f61e62..27b3741 100644
3582     --- a/fs/ext4/mballoc.c
3583     +++ b/fs/ext4/mballoc.c
3584     @@ -2693,7 +2693,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
3585     i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
3586     sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3587     if (sbi->s_mb_maxs == NULL) {
3588     - kfree(sbi->s_mb_maxs);
3589     + kfree(sbi->s_mb_offsets);
3590     return -ENOMEM;
3591     }
3592    
3593     @@ -4439,7 +4439,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
3594     pa_inode_list) {
3595     spin_lock(&tmp_pa->pa_lock);
3596     if (tmp_pa->pa_deleted) {
3597     - spin_unlock(&pa->pa_lock);
3598     + spin_unlock(&tmp_pa->pa_lock);
3599     continue;
3600     }
3601     if (!added && pa->pa_free < tmp_pa->pa_free) {
3602     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
3603     index e3fe991..f81f9e7 100644
3604     --- a/fs/fs-writeback.c
3605     +++ b/fs/fs-writeback.c
3606     @@ -538,7 +538,8 @@ void generic_sync_sb_inodes(struct super_block *sb,
3607     list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
3608     struct address_space *mapping;
3609    
3610     - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
3611     + if (inode->i_state &
3612     + (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
3613     continue;
3614     mapping = inode->i_mapping;
3615     if (mapping->nrpages == 0)
3616     diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
3617     index 9b800d9..c91a818 100644
3618     --- a/fs/hugetlbfs/inode.c
3619     +++ b/fs/hugetlbfs/inode.c
3620     @@ -26,7 +26,6 @@
3621     #include <linux/pagevec.h>
3622     #include <linux/parser.h>
3623     #include <linux/mman.h>
3624     -#include <linux/quotaops.h>
3625     #include <linux/slab.h>
3626     #include <linux/dnotify.h>
3627     #include <linux/statfs.h>
3628     @@ -842,7 +841,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
3629     bad_val:
3630     printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
3631     args[0].from, p);
3632     - return 1;
3633     + return -EINVAL;
3634     }
3635    
3636     static int
3637     diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
3638     index 6cdeacf..4bd49c1 100644
3639     --- a/fs/nfs/nfs3xdr.c
3640     +++ b/fs/nfs/nfs3xdr.c
3641     @@ -716,7 +716,8 @@ nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p,
3642     if (args->npages != 0)
3643     xdr_encode_pages(buf, args->pages, 0, args->len);
3644     else
3645     - req->rq_slen += args->len;
3646     + req->rq_slen = xdr_adjust_iovec(req->rq_svec,
3647     + p + XDR_QUADLEN(args->len));
3648    
3649     err = nfsacl_encode(buf, base, args->inode,
3650     (args->mask & NFS_ACL) ?
3651     diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
3652     index a5887df..8672b95 100644
3653     --- a/fs/ocfs2/file.c
3654     +++ b/fs/ocfs2/file.c
3655     @@ -1926,7 +1926,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
3656     out->f_path.dentry->d_name.len,
3657     out->f_path.dentry->d_name.name);
3658    
3659     - inode_double_lock(inode, pipe->inode);
3660     + mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
3661    
3662     ret = ocfs2_rw_lock(inode, 1);
3663     if (ret < 0) {
3664     @@ -1941,12 +1941,16 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
3665     goto out_unlock;
3666     }
3667    
3668     + if (pipe->inode)
3669     + mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
3670     ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
3671     + if (pipe->inode)
3672     + mutex_unlock(&pipe->inode->i_mutex);
3673    
3674     out_unlock:
3675     ocfs2_rw_unlock(inode, 1);
3676     out:
3677     - inode_double_unlock(inode, pipe->inode);
3678     + mutex_unlock(&inode->i_mutex);
3679    
3680     mlog_exit(ret);
3681     return ret;
3682     diff --git a/fs/splice.c b/fs/splice.c
3683     index 4ed0ba4..4c1029a 100644
3684     --- a/fs/splice.c
3685     +++ b/fs/splice.c
3686     @@ -736,10 +736,19 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
3687     * ->write_end. Most of the time, these expect i_mutex to
3688     * be held. Since this may result in an ABBA deadlock with
3689     * pipe->inode, we have to order lock acquiry here.
3690     + *
3691     + * Outer lock must be inode->i_mutex, as pipe_wait() will
3692     + * release and reacquire pipe->inode->i_mutex, AND inode must
3693     + * never be a pipe.
3694     */
3695     - inode_double_lock(inode, pipe->inode);
3696     + WARN_ON(S_ISFIFO(inode->i_mode));
3697     + mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
3698     + if (pipe->inode)
3699     + mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
3700     ret = __splice_from_pipe(pipe, &sd, actor);
3701     - inode_double_unlock(inode, pipe->inode);
3702     + if (pipe->inode)
3703     + mutex_unlock(&pipe->inode->i_mutex);
3704     + mutex_unlock(&inode->i_mutex);
3705    
3706     return ret;
3707     }
3708     @@ -830,11 +839,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
3709     };
3710     ssize_t ret;
3711    
3712     - inode_double_lock(inode, pipe->inode);
3713     + WARN_ON(S_ISFIFO(inode->i_mode));
3714     + mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
3715     ret = file_remove_suid(out);
3716     - if (likely(!ret))
3717     + if (likely(!ret)) {
3718     + if (pipe->inode)
3719     + mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
3720     ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
3721     - inode_double_unlock(inode, pipe->inode);
3722     + if (pipe->inode)
3723     + mutex_unlock(&pipe->inode->i_mutex);
3724     + }
3725     + mutex_unlock(&inode->i_mutex);
3726     if (ret > 0) {
3727     unsigned long nr_pages;
3728    
3729     diff --git a/include/linux/capability.h b/include/linux/capability.h
3730     index 4864a43..c302110 100644
3731     --- a/include/linux/capability.h
3732     +++ b/include/linux/capability.h
3733     @@ -377,7 +377,21 @@ struct cpu_vfs_cap_data {
3734     #define CAP_FOR_EACH_U32(__capi) \
3735     for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
3736    
3737     +/*
3738     + * CAP_FS_MASK and CAP_NFSD_MASKS:
3739     + *
3740     + * The fs mask is all the privileges that fsuid==0 historically meant.
3741     + * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE.
3742     + *
3743     + * It has never meant setting security.* and trusted.* xattrs.
3744     + *
3745     + * We could also define fsmask as follows:
3746     + * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions
3747     + * 2. The security.* and trusted.* xattrs are fs-related MAC permissions
3748     + */
3749     +
3750     # define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
3751     + | CAP_TO_MASK(CAP_MKNOD) \
3752     | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
3753     | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
3754     | CAP_TO_MASK(CAP_FOWNER) \
3755     @@ -392,11 +406,12 @@ struct cpu_vfs_cap_data {
3756     # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
3757     # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
3758     # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
3759     -# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } })
3760     +# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
3761     + | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
3762     + CAP_FS_MASK_B1 } })
3763     # define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
3764     - | CAP_TO_MASK(CAP_SYS_RESOURCE) \
3765     - | CAP_TO_MASK(CAP_MKNOD), \
3766     - CAP_FS_MASK_B1 } })
3767     + | CAP_TO_MASK(CAP_SYS_RESOURCE), \
3768     + CAP_FS_MASK_B1 } })
3769    
3770     #endif /* _KERNEL_CAPABILITY_U32S != 2 */
3771    
3772     diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
3773     index bd37078..0d2f7c8 100644
3774     --- a/include/linux/hrtimer.h
3775     +++ b/include/linux/hrtimer.h
3776     @@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
3777     const enum hrtimer_mode mode);
3778     extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
3779     unsigned long range_ns, const enum hrtimer_mode mode);
3780     +extern int
3781     +__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
3782     + unsigned long delta_ns,
3783     + const enum hrtimer_mode mode, int wakeup);
3784     +
3785     extern int hrtimer_cancel(struct hrtimer *timer);
3786     extern int hrtimer_try_to_cancel(struct hrtimer *timer);
3787    
3788     diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
3789     index 9127f6b..564d1c0 100644
3790     --- a/include/linux/interrupt.h
3791     +++ b/include/linux/interrupt.h
3792     @@ -274,6 +274,7 @@ extern void softirq_init(void);
3793     #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
3794     extern void raise_softirq_irqoff(unsigned int nr);
3795     extern void raise_softirq(unsigned int nr);
3796     +extern void wakeup_softirqd(void);
3797    
3798     /* This is the worklist that queues up per-cpu softirq work.
3799     *
3800     diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
3801     index bf6f703..552ef4f 100644
3802     --- a/include/linux/kvm_host.h
3803     +++ b/include/linux/kvm_host.h
3804     @@ -127,6 +127,10 @@ struct kvm {
3805     struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
3806     #endif
3807    
3808     +#ifdef CONFIG_HAVE_KVM_IRQCHIP
3809     + struct hlist_head mask_notifier_list;
3810     +#endif
3811     +
3812     #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
3813     struct mmu_notifier mmu_notifier;
3814     unsigned long mmu_notifier_seq;
3815     @@ -321,6 +325,19 @@ struct kvm_assigned_dev_kernel {
3816     struct pci_dev *dev;
3817     struct kvm *kvm;
3818     };
3819     +
3820     +struct kvm_irq_mask_notifier {
3821     + void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
3822     + int irq;
3823     + struct hlist_node link;
3824     +};
3825     +
3826     +void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
3827     + struct kvm_irq_mask_notifier *kimn);
3828     +void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
3829     + struct kvm_irq_mask_notifier *kimn);
3830     +void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
3831     +
3832     void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
3833     void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
3834     void kvm_register_irq_ack_notifier(struct kvm *kvm,
3835     diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
3836     index 01ca085..076a7dc 100644
3837     --- a/include/linux/pagemap.h
3838     +++ b/include/linux/pagemap.h
3839     @@ -18,9 +18,14 @@
3840     * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
3841     * allocation mode flags.
3842     */
3843     -#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
3844     -#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
3845     -#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */
3846     +enum mapping_flags {
3847     + AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
3848     + AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
3849     + AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
3850     +#ifdef CONFIG_UNEVICTABLE_LRU
3851     + AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
3852     +#endif
3853     +};
3854    
3855     static inline void mapping_set_error(struct address_space *mapping, int error)
3856     {
3857     @@ -33,7 +38,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
3858     }
3859    
3860     #ifdef CONFIG_UNEVICTABLE_LRU
3861     -#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
3862    
3863     static inline void mapping_set_unevictable(struct address_space *mapping)
3864     {
3865     diff --git a/include/linux/sched.h b/include/linux/sched.h
3866     index 011db2f..f8af167 100644
3867     --- a/include/linux/sched.h
3868     +++ b/include/linux/sched.h
3869     @@ -202,7 +202,8 @@ extern unsigned long long time_sync_thresh;
3870     #define task_is_stopped_or_traced(task) \
3871     ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
3872     #define task_contributes_to_load(task) \
3873     - ((task->state & TASK_UNINTERRUPTIBLE) != 0)
3874     + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
3875     + (task->flags & PF_FROZEN) == 0)
3876    
3877     #define __set_task_state(tsk, state_value) \
3878     do { (tsk)->state = (state_value); } while (0)
3879     diff --git a/kernel/fork.c b/kernel/fork.c
3880     index 4854c2c..9b51a1b 100644
3881     --- a/kernel/fork.c
3882     +++ b/kernel/fork.c
3883     @@ -808,6 +808,12 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
3884     sig->cputime_expires.virt_exp = cputime_zero;
3885     sig->cputime_expires.sched_exp = 0;
3886    
3887     + if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
3888     + sig->cputime_expires.prof_exp =
3889     + secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
3890     + sig->cputimer.running = 1;
3891     + }
3892     +
3893     /* The timer lists. */
3894     INIT_LIST_HEAD(&sig->cpu_timers[0]);
3895     INIT_LIST_HEAD(&sig->cpu_timers[1]);
3896     @@ -823,11 +829,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
3897     atomic_inc(&current->signal->live);
3898     return 0;
3899     }
3900     - sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
3901     -
3902     - if (sig)
3903     - posix_cpu_timers_init_group(sig);
3904    
3905     + sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
3906     tsk->signal = sig;
3907     if (!sig)
3908     return -ENOMEM;
3909     @@ -865,6 +868,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
3910     memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
3911     task_unlock(current->group_leader);
3912    
3913     + posix_cpu_timers_init_group(sig);
3914     +
3915     acct_init_pacct(&sig->pacct);
3916    
3917     tty_audit_fork(sig);
3918     diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
3919     index f394d2a..cb8a15c 100644
3920     --- a/kernel/hrtimer.c
3921     +++ b/kernel/hrtimer.c
3922     @@ -651,14 +651,20 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
3923     * and expiry check is done in the hrtimer_interrupt or in the softirq.
3924     */
3925     static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
3926     - struct hrtimer_clock_base *base)
3927     + struct hrtimer_clock_base *base,
3928     + int wakeup)
3929     {
3930     if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
3931     - spin_unlock(&base->cpu_base->lock);
3932     - raise_softirq_irqoff(HRTIMER_SOFTIRQ);
3933     - spin_lock(&base->cpu_base->lock);
3934     + if (wakeup) {
3935     + spin_unlock(&base->cpu_base->lock);
3936     + raise_softirq_irqoff(HRTIMER_SOFTIRQ);
3937     + spin_lock(&base->cpu_base->lock);
3938     + } else
3939     + __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
3940     +
3941     return 1;
3942     }
3943     +
3944     return 0;
3945     }
3946    
3947     @@ -703,7 +709,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
3948     static inline int hrtimer_switch_to_hres(void) { return 0; }
3949     static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
3950     static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
3951     - struct hrtimer_clock_base *base)
3952     + struct hrtimer_clock_base *base,
3953     + int wakeup)
3954     {
3955     return 0;
3956     }
3957     @@ -886,20 +893,9 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
3958     return 0;
3959     }
3960    
3961     -/**
3962     - * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
3963     - * @timer: the timer to be added
3964     - * @tim: expiry time
3965     - * @delta_ns: "slack" range for the timer
3966     - * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
3967     - *
3968     - * Returns:
3969     - * 0 on success
3970     - * 1 when the timer was active
3971     - */
3972     -int
3973     -hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
3974     - const enum hrtimer_mode mode)
3975     +int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
3976     + unsigned long delta_ns, const enum hrtimer_mode mode,
3977     + int wakeup)
3978     {
3979     struct hrtimer_clock_base *base, *new_base;
3980     unsigned long flags;
3981     @@ -940,12 +936,29 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
3982     * XXX send_remote_softirq() ?
3983     */
3984     if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
3985     - hrtimer_enqueue_reprogram(timer, new_base);
3986     + hrtimer_enqueue_reprogram(timer, new_base, wakeup);
3987    
3988     unlock_hrtimer_base(timer, &flags);
3989    
3990     return ret;
3991     }
3992     +
3993     +/**
3994     + * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
3995     + * @timer: the timer to be added
3996     + * @tim: expiry time
3997     + * @delta_ns: "slack" range for the timer
3998     + * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
3999     + *
4000     + * Returns:
4001     + * 0 on success
4002     + * 1 when the timer was active
4003     + */
4004     +int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
4005     + unsigned long delta_ns, const enum hrtimer_mode mode)
4006     +{
4007     + return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
4008     +}
4009     EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
4010    
4011     /**
4012     @@ -961,7 +974,7 @@ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
4013     int
4014     hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
4015     {
4016     - return hrtimer_start_range_ns(timer, tim, 0, mode);
4017     + return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
4018     }
4019     EXPORT_SYMBOL_GPL(hrtimer_start);
4020    
4021     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4022     index 7ba8cd9..6589776 100644
4023     --- a/kernel/kprobes.c
4024     +++ b/kernel/kprobes.c
4025     @@ -912,10 +912,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
4026     ri->rp = rp;
4027     ri->task = current;
4028    
4029     - if (rp->entry_handler && rp->entry_handler(ri, regs)) {
4030     - spin_unlock_irqrestore(&rp->lock, flags);
4031     + if (rp->entry_handler && rp->entry_handler(ri, regs))
4032     return 0;
4033     - }
4034    
4035     arch_prepare_kretprobe(ri, regs);
4036    
4037     diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
4038     index e976e50..68647c1 100644
4039     --- a/kernel/posix-cpu-timers.c
4040     +++ b/kernel/posix-cpu-timers.c
4041     @@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rlim_new)
4042    
4043     cputime = secs_to_cputime(rlim_new);
4044     if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
4045     - cputime_lt(current->signal->it_prof_expires, cputime)) {
4046     + cputime_gt(current->signal->it_prof_expires, cputime)) {
4047     spin_lock_irq(&current->sighand->siglock);
4048     set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
4049     spin_unlock_irq(&current->sighand->siglock);
4050     @@ -224,7 +224,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
4051     cpu->cpu = virt_ticks(p);
4052     break;
4053     case CPUCLOCK_SCHED:
4054     - cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
4055     + cpu->sched = task_sched_runtime(p);
4056     break;
4057     }
4058     return 0;
4059     @@ -305,18 +305,19 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
4060     {
4061     struct task_cputime cputime;
4062    
4063     - thread_group_cputime(p, &cputime);
4064     switch (CPUCLOCK_WHICH(which_clock)) {
4065     default:
4066     return -EINVAL;
4067     case CPUCLOCK_PROF:
4068     + thread_group_cputime(p, &cputime);
4069     cpu->cpu = cputime_add(cputime.utime, cputime.stime);
4070     break;
4071     case CPUCLOCK_VIRT:
4072     + thread_group_cputime(p, &cputime);
4073     cpu->cpu = cputime.utime;
4074     break;
4075     case CPUCLOCK_SCHED:
4076     - cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
4077     + cpu->sched = thread_group_sched_runtime(p);
4078     break;
4079     }
4080     return 0;
4081     diff --git a/kernel/sched.c b/kernel/sched.c
4082     index 8e2558c..5e80629 100644
4083     --- a/kernel/sched.c
4084     +++ b/kernel/sched.c
4085     @@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
4086    
4087     spin_lock(&rt_b->rt_runtime_lock);
4088     for (;;) {
4089     + unsigned long delta;
4090     + ktime_t soft, hard;
4091     +
4092     if (hrtimer_active(&rt_b->rt_period_timer))
4093     break;
4094    
4095     now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
4096     hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
4097     - hrtimer_start_expires(&rt_b->rt_period_timer,
4098     - HRTIMER_MODE_ABS);
4099     +
4100     + soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
4101     + hard = hrtimer_get_expires(&rt_b->rt_period_timer);
4102     + delta = ktime_to_ns(ktime_sub(hard, soft));
4103     + __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
4104     + HRTIMER_MODE_ABS, 0);
4105     }
4106     spin_unlock(&rt_b->rt_runtime_lock);
4107     }
4108     @@ -1129,7 +1136,8 @@ static __init void init_hrtick(void)
4109     */
4110     static void hrtick_start(struct rq *rq, u64 delay)
4111     {
4112     - hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
4113     + __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
4114     + HRTIMER_MODE_REL, 0);
4115     }
4116    
4117     static inline void init_hrtick(void)
4118     @@ -4134,9 +4142,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
4119     EXPORT_PER_CPU_SYMBOL(kstat);
4120    
4121     /*
4122     - * Return any ns on the sched_clock that have not yet been banked in
4123     + * Return any ns on the sched_clock that have not yet been accounted in
4124     * @p in case that task is currently running.
4125     + *
4126     + * Called with task_rq_lock() held on @rq.
4127     */
4128     +static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
4129     +{
4130     + u64 ns = 0;
4131     +
4132     + if (task_current(rq, p)) {
4133     + update_rq_clock(rq);
4134     + ns = rq->clock - p->se.exec_start;
4135     + if ((s64)ns < 0)
4136     + ns = 0;
4137     + }
4138     +
4139     + return ns;
4140     +}
4141     +
4142     unsigned long long task_delta_exec(struct task_struct *p)
4143     {
4144     unsigned long flags;
4145     @@ -4144,16 +4168,49 @@ unsigned long long task_delta_exec(struct task_struct *p)
4146     u64 ns = 0;
4147    
4148     rq = task_rq_lock(p, &flags);
4149     + ns = do_task_delta_exec(p, rq);
4150     + task_rq_unlock(rq, &flags);
4151    
4152     - if (task_current(rq, p)) {
4153     - u64 delta_exec;
4154     + return ns;
4155     +}
4156    
4157     - update_rq_clock(rq);
4158     - delta_exec = rq->clock - p->se.exec_start;
4159     - if ((s64)delta_exec > 0)
4160     - ns = delta_exec;
4161     - }
4162     +/*
4163     + * Return accounted runtime for the task.
4164     + * In case the task is currently running, return the runtime plus current's
4165     + * pending runtime that have not been accounted yet.
4166     + */
4167     +unsigned long long task_sched_runtime(struct task_struct *p)
4168     +{
4169     + unsigned long flags;
4170     + struct rq *rq;
4171     + u64 ns = 0;
4172    
4173     + rq = task_rq_lock(p, &flags);
4174     + ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
4175     + task_rq_unlock(rq, &flags);
4176     +
4177     + return ns;
4178     +}
4179     +
4180     +/*
4181     + * Return sum_exec_runtime for the thread group.
4182     + * In case the task is currently running, return the sum plus current's
4183     + * pending runtime that have not been accounted yet.
4184     + *
4185     + * Note that the thread group might have other running tasks as well,
4186     + * so the return value not includes other pending runtime that other
4187     + * running tasks might have.
4188     + */
4189     +unsigned long long thread_group_sched_runtime(struct task_struct *p)
4190     +{
4191     + struct task_cputime totals;
4192     + unsigned long flags;
4193     + struct rq *rq;
4194     + u64 ns;
4195     +
4196     + rq = task_rq_lock(p, &flags);
4197     + thread_group_cputime(p, &totals);
4198     + ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
4199     task_rq_unlock(rq, &flags);
4200    
4201     return ns;
4202     diff --git a/kernel/softirq.c b/kernel/softirq.c
4203     index 9041ea7..d2b183e 100644
4204     --- a/kernel/softirq.c
4205     +++ b/kernel/softirq.c
4206     @@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
4207     * to the pending events, so lets the scheduler to balance
4208     * the softirq load for us.
4209     */
4210     -static inline void wakeup_softirqd(void)
4211     +void wakeup_softirqd(void)
4212     {
4213     /* Interrupts are disabled: no need to stop preemption */
4214     struct task_struct *tsk = __get_cpu_var(ksoftirqd);
4215     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
4216     index c5ef44f..7755ae7 100644
4217     --- a/kernel/sysctl.c
4218     +++ b/kernel/sysctl.c
4219     @@ -95,12 +95,9 @@ static int sixty = 60;
4220     static int neg_one = -1;
4221     #endif
4222    
4223     -#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
4224     -static int two = 2;
4225     -#endif
4226     -
4227     static int zero;
4228     static int one = 1;
4229     +static int two = 2;
4230     static unsigned long one_ul = 1;
4231     static int one_hundred = 100;
4232    
4233     @@ -1373,10 +1370,7 @@ static struct ctl_table fs_table[] = {
4234     .data = &lease_break_time,
4235     .maxlen = sizeof(int),
4236     .mode = 0644,
4237     - .proc_handler = &proc_dointvec_minmax,
4238     - .strategy = &sysctl_intvec,
4239     - .extra1 = &zero,
4240     - .extra2 = &two,
4241     + .proc_handler = &proc_dointvec,
4242     },
4243     #endif
4244     #ifdef CONFIG_AIO
4245     @@ -1417,7 +1411,10 @@ static struct ctl_table fs_table[] = {
4246     .data = &suid_dumpable,
4247     .maxlen = sizeof(int),
4248     .mode = 0644,
4249     - .proc_handler = &proc_dointvec,
4250     + .proc_handler = &proc_dointvec_minmax,
4251     + .strategy = &sysctl_intvec,
4252     + .extra1 = &zero,
4253     + .extra2 = &two,
4254     },
4255     #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
4256     {
4257     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
4258     index 17bb88d..b2387c0 100644
4259     --- a/kernel/trace/trace.c
4260     +++ b/kernel/trace/trace.c
4261     @@ -3886,7 +3886,8 @@ __init static int tracer_alloc_buffers(void)
4262     &trace_panic_notifier);
4263    
4264     register_die_notifier(&trace_die_notifier);
4265     - ret = 0;
4266     +
4267     + return 0;
4268    
4269     out_free_cpumask:
4270     free_cpumask_var(tracing_cpumask);
4271     diff --git a/lib/cpumask.c b/lib/cpumask.c
4272     index 3389e24..1f71b97 100644
4273     --- a/lib/cpumask.c
4274     +++ b/lib/cpumask.c
4275     @@ -109,10 +109,10 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
4276     #endif
4277     /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
4278     if (*mask) {
4279     + unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
4280     unsigned int tail;
4281     tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
4282     - memset(cpumask_bits(*mask) + cpumask_size() - tail,
4283     - 0, tail);
4284     + memset(ptr + cpumask_size() - tail, 0, tail);
4285     }
4286    
4287     return *mask != NULL;
4288     diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
4289     index 0c04615..427dfe3 100644
4290     --- a/mm/filemap_xip.c
4291     +++ b/mm/filemap_xip.c
4292     @@ -89,8 +89,8 @@ do_xip_mapping_read(struct address_space *mapping,
4293     }
4294     }
4295     nr = nr - offset;
4296     - if (nr > len)
4297     - nr = len;
4298     + if (nr > len - copied)
4299     + nr = len - copied;
4300    
4301     error = mapping->a_ops->get_xip_mem(mapping, index, 0,
4302     &xip_mem, &xip_pfn);
4303     diff --git a/mm/mmap.c b/mm/mmap.c
4304     index 00ced3e..f1aa6f9 100644
4305     --- a/mm/mmap.c
4306     +++ b/mm/mmap.c
4307     @@ -1571,7 +1571,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
4308     * Overcommit.. This must be the final test, as it will
4309     * update security statistics.
4310     */
4311     - if (security_vm_enough_memory(grow))
4312     + if (security_vm_enough_memory_mm(mm, grow))
4313     return -ENOMEM;
4314    
4315     /* Ok, everything looks good - let it rip */
4316     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
4317     index c6a6b16..eae6954 100644
4318     --- a/net/core/skbuff.c
4319     +++ b/net/core/skbuff.c
4320     @@ -2496,7 +2496,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
4321     skb_network_header_len(skb));
4322     skb_copy_from_linear_data(skb, nskb->data, doffset);
4323    
4324     - if (pos >= offset + len)
4325     + if (fskb != skb_shinfo(skb)->frag_list)
4326     continue;
4327    
4328     if (!sg) {
4329     diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
4330     index 7ea88b6..39879ae 100644
4331     --- a/net/ipv4/netfilter/arp_tables.c
4332     +++ b/net/ipv4/netfilter/arp_tables.c
4333     @@ -374,7 +374,9 @@ static int mark_source_chains(struct xt_table_info *newinfo,
4334     && unconditional(&e->arp)) || visited) {
4335     unsigned int oldpos, size;
4336    
4337     - if (t->verdict < -NF_MAX_VERDICT - 1) {
4338     + if ((strcmp(t->target.u.user.name,
4339     + ARPT_STANDARD_TARGET) == 0) &&
4340     + t->verdict < -NF_MAX_VERDICT - 1) {
4341     duprintf("mark_source_chains: bad "
4342     "negative verdict (%i)\n",
4343     t->verdict);
4344     diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
4345     index ef8b6ca..ec362a3 100644
4346     --- a/net/ipv4/netfilter/ip_tables.c
4347     +++ b/net/ipv4/netfilter/ip_tables.c
4348     @@ -496,7 +496,9 @@ mark_source_chains(struct xt_table_info *newinfo,
4349     && unconditional(&e->ip)) || visited) {
4350     unsigned int oldpos, size;
4351    
4352     - if (t->verdict < -NF_MAX_VERDICT - 1) {
4353     + if ((strcmp(t->target.u.user.name,
4354     + IPT_STANDARD_TARGET) == 0) &&
4355     + t->verdict < -NF_MAX_VERDICT - 1) {
4356     duprintf("mark_source_chains: bad "
4357     "negative verdict (%i)\n",
4358     t->verdict);
4359     diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
4360     index a33485d..def375b 100644
4361     --- a/net/ipv6/netfilter/ip6_tables.c
4362     +++ b/net/ipv6/netfilter/ip6_tables.c
4363     @@ -525,7 +525,9 @@ mark_source_chains(struct xt_table_info *newinfo,
4364     && unconditional(&e->ipv6)) || visited) {
4365     unsigned int oldpos, size;
4366    
4367     - if (t->verdict < -NF_MAX_VERDICT - 1) {
4368     + if ((strcmp(t->target.u.user.name,
4369     + IP6T_STANDARD_TARGET) == 0) &&
4370     + t->verdict < -NF_MAX_VERDICT - 1) {
4371     duprintf("mark_source_chains: bad "
4372     "negative verdict (%i)\n",
4373     t->verdict);
4374     diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
4375     index e9c05b8..dcce778 100644
4376     --- a/net/netrom/af_netrom.c
4377     +++ b/net/netrom/af_netrom.c
4378     @@ -1082,7 +1082,13 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
4379    
4380     SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
4381    
4382     - /* Build a packet */
4383     + /* Build a packet - the conventional user limit is 236 bytes. We can
4384     + do ludicrously large NetROM frames but must not overflow */
4385     + if (len > 65536) {
4386     + err = -EMSGSIZE;
4387     + goto out;
4388     + }
4389     +
4390     SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
4391     size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
4392    
4393     diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
4394     index 0139264..5e75bbf 100644
4395     --- a/net/rose/af_rose.c
4396     +++ b/net/rose/af_rose.c
4397     @@ -1124,6 +1124,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
4398    
4399     /* Build a packet */
4400     SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
4401     + /* Sanity check the packet size */
4402     + if (len > 65535)
4403     + return -EMSGSIZE;
4404     +
4405     size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
4406    
4407     if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
4408     diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
4409     index 9fc5b02..88d80f5 100644
4410     --- a/net/x25/af_x25.c
4411     +++ b/net/x25/af_x25.c
4412     @@ -1037,6 +1037,12 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
4413     sx25.sx25_addr = x25->dest_addr;
4414     }
4415    
4416     + /* Sanity check the packet size */
4417     + if (len > 65535) {
4418     + rc = -EMSGSIZE;
4419     + goto out;
4420     + }
4421     +
4422     SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
4423    
4424     /* Build a packet */
4425     diff --git a/security/commoncap.c b/security/commoncap.c
4426     index 7cd61a5..beac025 100644
4427     --- a/security/commoncap.c
4428     +++ b/security/commoncap.c
4429     @@ -916,7 +916,6 @@ changed:
4430     return commit_creds(new);
4431    
4432     no_change:
4433     - error = 0;
4434     error:
4435     abort_creds(new);
4436     return error;
4437     diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
4438     index e7ded13..c1c5f36 100644
4439     --- a/security/smack/smack_lsm.c
4440     +++ b/security/smack/smack_lsm.c
4441     @@ -607,6 +607,8 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
4442     strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
4443     if (!capable(CAP_MAC_ADMIN))
4444     rc = -EPERM;
4445     + if (size == 0)
4446     + rc = -EINVAL;
4447     } else
4448     rc = cap_inode_setxattr(dentry, name, value, size, flags);
4449    
4450     @@ -1430,7 +1432,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
4451     struct socket *sock;
4452     int rc = 0;
4453    
4454     - if (value == NULL || size > SMK_LABELLEN)
4455     + if (value == NULL || size > SMK_LABELLEN || size == 0)
4456     return -EACCES;
4457    
4458     sp = smk_import(value, size);
4459     diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
4460     index d03f992..cef1ce0 100644
4461     --- a/sound/pci/hda/hda_codec.c
4462     +++ b/sound/pci/hda/hda_codec.c
4463     @@ -2003,7 +2003,11 @@ int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
4464     err = bus->ops.command(bus, res);
4465     if (!err) {
4466     struct hda_cache_head *c;
4467     - u32 key = build_cmd_cache_key(nid, verb);
4468     + u32 key;
4469     + /* parm may contain the verb stuff for get/set amp */
4470     + verb = verb | (parm >> 8);
4471     + parm &= 0xff;
4472     + key = build_cmd_cache_key(nid, verb);
4473     c = get_alloc_hash(&codec->cmd_cache, key);
4474     if (c)
4475     c->val = parm;
4476     diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
4477     index e486123..5a6d6d8 100644
4478     --- a/sound/pci/hda/patch_analog.c
4479     +++ b/sound/pci/hda/patch_analog.c
4480     @@ -3239,7 +3239,7 @@ static const char *ad1884_slave_vols[] = {
4481     "Mic Playback Volume",
4482     "CD Playback Volume",
4483     "Internal Mic Playback Volume",
4484     - "Docking Mic Playback Volume"
4485     + "Docking Mic Playback Volume",
4486     "Beep Playback Volume",
4487     "IEC958 Playback Volume",
4488     NULL
4489     diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
4490     index 23b81cf..e85a2bc 100644
4491     --- a/virt/kvm/ioapic.c
4492     +++ b/virt/kvm/ioapic.c
4493     @@ -101,6 +101,7 @@ static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
4494     static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
4495     {
4496     unsigned index;
4497     + bool mask_before, mask_after;
4498    
4499     switch (ioapic->ioregsel) {
4500     case IOAPIC_REG_VERSION:
4501     @@ -120,6 +121,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
4502     ioapic_debug("change redir index %x val %x\n", index, val);
4503     if (index >= IOAPIC_NUM_PINS)
4504     return;
4505     + mask_before = ioapic->redirtbl[index].fields.mask;
4506     if (ioapic->ioregsel & 1) {
4507     ioapic->redirtbl[index].bits &= 0xffffffff;
4508     ioapic->redirtbl[index].bits |= (u64) val << 32;
4509     @@ -128,6 +130,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
4510     ioapic->redirtbl[index].bits |= (u32) val;
4511     ioapic->redirtbl[index].fields.remote_irr = 0;
4512     }
4513     + mask_after = ioapic->redirtbl[index].fields.mask;
4514     + if (mask_before != mask_after)
4515     + kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
4516     if (ioapic->irr & (1 << index))
4517     ioapic_service(ioapic, index);
4518     break;
4519     @@ -426,3 +431,4 @@ int kvm_ioapic_init(struct kvm *kvm)
4520     kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev);
4521     return 0;
4522     }
4523     +
4524     diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
4525     index aa5d1e5..5162a41 100644
4526     --- a/virt/kvm/irq_comm.c
4527     +++ b/virt/kvm/irq_comm.c
4528     @@ -99,3 +99,27 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
4529     clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
4530     clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
4531     }
4532     +
4533     +void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
4534     + struct kvm_irq_mask_notifier *kimn)
4535     +{
4536     + kimn->irq = irq;
4537     + hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
4538     +}
4539     +
4540     +void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
4541     + struct kvm_irq_mask_notifier *kimn)
4542     +{
4543     + hlist_del(&kimn->link);
4544     +}
4545     +
4546     +void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
4547     +{
4548     + struct kvm_irq_mask_notifier *kimn;
4549     + struct hlist_node *n;
4550     +
4551     + hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
4552     + if (kimn->irq == irq)
4553     + kimn->func(kimn, mask);
4554     +}
4555     +
4556     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4557     index 29a667c..6723411 100644
4558     --- a/virt/kvm/kvm_main.c
4559     +++ b/virt/kvm/kvm_main.c
4560     @@ -563,7 +563,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
4561     goto out;
4562     }
4563    
4564     - if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
4565     + if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
4566     kvm_deassign_device(kvm, match);
4567    
4568     kvm_free_assigned_device(kvm, match);
4569     @@ -581,8 +581,10 @@ static inline int valid_vcpu(int n)
4570    
4571     inline int kvm_is_mmio_pfn(pfn_t pfn)
4572     {
4573     - if (pfn_valid(pfn))
4574     - return PageReserved(pfn_to_page(pfn));
4575     + if (pfn_valid(pfn)) {
4576     + struct page *page = compound_head(pfn_to_page(pfn));
4577     + return PageReserved(page);
4578     + }
4579    
4580     return true;
4581     }
4582     @@ -828,6 +830,9 @@ static struct kvm *kvm_create_vm(void)
4583    
4584     if (IS_ERR(kvm))
4585     goto out;
4586     +#ifdef CONFIG_HAVE_KVM_IRQCHIP
4587     + INIT_HLIST_HEAD(&kvm->mask_notifier_list);
4588     +#endif
4589    
4590     #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
4591     page = alloc_page(GFP_KERNEL | __GFP_ZERO);