Magellan Linux

Annotation of /trunk/kernel26-alx/patches-2.6.27-r3/0131-2.6.27.32-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1176 - (hide annotations) (download)
Thu Oct 14 15:11:06 2010 UTC (13 years, 7 months ago) by niro
File size: 59270 byte(s)
-2.6.27-alx-r3: new magellan 0.5.2 kernel
1 niro 1176 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
2     index 3da2508..95c65e3 100644
3     --- a/arch/x86/kvm/mmu.c
4     +++ b/arch/x86/kvm/mmu.c
5     @@ -135,13 +135,6 @@ module_param(dbg, bool, 0644);
6     #define ACC_USER_MASK PT_USER_MASK
7     #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
8    
9     -struct kvm_pv_mmu_op_buffer {
10     - void *ptr;
11     - unsigned len;
12     - unsigned processed;
13     - char buf[512] __aligned(sizeof(long));
14     -};
15     -
16     struct kvm_rmap_desc {
17     u64 *shadow_ptes[RMAP_EXT];
18     struct kvm_rmap_desc *more;
19     @@ -305,7 +298,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
20     if (r)
21     goto out;
22     r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
23     - rmap_desc_cache, 1);
24     + rmap_desc_cache, 4);
25     if (r)
26     goto out;
27     r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
28     @@ -1162,7 +1155,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
29     */
30     spte = shadow_base_present_pte | shadow_dirty_mask;
31     if (!speculative)
32     - pte_access |= PT_ACCESSED_MASK;
33     + spte |= shadow_accessed_mask;
34     if (!dirty)
35     pte_access &= ~ACC_WRITE_MASK;
36     if (pte_access & ACC_EXEC_MASK)
37     @@ -1357,7 +1350,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
38     vcpu->arch.mmu.root_hpa = INVALID_PAGE;
39     }
40    
41     -static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
42     +static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
43     +{
44     + int ret = 0;
45     +
46     + if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
47     + set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
48     + ret = 1;
49     + }
50     +
51     + return ret;
52     +}
53     +
54     +static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
55     {
56     int i;
57     gfn_t root_gfn;
58     @@ -1372,13 +1377,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
59     ASSERT(!VALID_PAGE(root));
60     if (tdp_enabled)
61     metaphysical = 1;
62     + if (mmu_check_root(vcpu, root_gfn))
63     + return 1;
64     sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
65     PT64_ROOT_LEVEL, metaphysical,
66     ACC_ALL, NULL);
67     root = __pa(sp->spt);
68     ++sp->root_count;
69     vcpu->arch.mmu.root_hpa = root;
70     - return;
71     + return 0;
72     }
73     metaphysical = !is_paging(vcpu);
74     if (tdp_enabled)
75     @@ -1395,6 +1402,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
76     root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
77     } else if (vcpu->arch.mmu.root_level == 0)
78     root_gfn = 0;
79     + if (mmu_check_root(vcpu, root_gfn))
80     + return 1;
81     sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
82     PT32_ROOT_LEVEL, metaphysical,
83     ACC_ALL, NULL);
84     @@ -1403,6 +1412,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
85     vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
86     }
87     vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
88     + return 0;
89     }
90    
91     static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
92     @@ -1646,8 +1656,10 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
93     goto out;
94     spin_lock(&vcpu->kvm->mmu_lock);
95     kvm_mmu_free_some_pages(vcpu);
96     - mmu_alloc_roots(vcpu);
97     + r = mmu_alloc_roots(vcpu);
98     spin_unlock(&vcpu->kvm->mmu_lock);
99     + if (r)
100     + goto out;
101     kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
102     kvm_mmu_flush_tlb(vcpu);
103     out:
104     @@ -1983,14 +1995,6 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
105    
106     static void free_mmu_pages(struct kvm_vcpu *vcpu)
107     {
108     - struct kvm_mmu_page *sp;
109     -
110     - while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
111     - sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
112     - struct kvm_mmu_page, link);
113     - kvm_mmu_zap_page(vcpu->kvm, sp);
114     - cond_resched();
115     - }
116     free_page((unsigned long)vcpu->arch.mmu.pae_root);
117     }
118    
119     @@ -2068,6 +2072,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
120     if (pt[i] & PT_WRITABLE_MASK)
121     pt[i] &= ~PT_WRITABLE_MASK;
122     }
123     + kvm_flush_remote_tlbs(kvm);
124     }
125    
126     void kvm_mmu_zap_all(struct kvm *kvm)
127     @@ -2237,7 +2242,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
128    
129     static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
130     {
131     - kvm_x86_ops->tlb_flush(vcpu);
132     + kvm_set_cr3(vcpu, vcpu->arch.cr3);
133     return 1;
134     }
135    
136     @@ -2291,18 +2296,18 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
137     gpa_t addr, unsigned long *ret)
138     {
139     int r;
140     - struct kvm_pv_mmu_op_buffer buffer;
141     + struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
142    
143     - buffer.ptr = buffer.buf;
144     - buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
145     - buffer.processed = 0;
146     + buffer->ptr = buffer->buf;
147     + buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
148     + buffer->processed = 0;
149    
150     - r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
151     + r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
152     if (r)
153     goto out;
154    
155     - while (buffer.len) {
156     - r = kvm_pv_mmu_op_one(vcpu, &buffer);
157     + while (buffer->len) {
158     + r = kvm_pv_mmu_op_one(vcpu, buffer);
159     if (r < 0)
160     goto out;
161     if (r == 0)
162     @@ -2311,7 +2316,7 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
163    
164     r = 1;
165     out:
166     - *ret = buffer.processed;
167     + *ret = buffer->processed;
168     return r;
169     }
170    
171     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
172     index 8233b86..77cae01 100644
173     --- a/arch/x86/kvm/svm.c
174     +++ b/arch/x86/kvm/svm.c
175     @@ -429,7 +429,6 @@ static __init int svm_hardware_setup(void)
176    
177     iopm_va = page_address(iopm_pages);
178     memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
179     - clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
180     iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
181    
182     if (boot_cpu_has(X86_FEATURE_NX))
183     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
184     index 7041cc5..4cee61a 100644
185     --- a/arch/x86/kvm/vmx.c
186     +++ b/arch/x86/kvm/vmx.c
187     @@ -898,11 +898,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
188     int ret = 0;
189    
190     switch (msr_index) {
191     -#ifdef CONFIG_X86_64
192     case MSR_EFER:
193     vmx_load_host_state(vmx);
194     ret = kvm_set_msr_common(vcpu, msr_index, data);
195     break;
196     +#ifdef CONFIG_X86_64
197     case MSR_FS_BASE:
198     vmcs_writel(GUEST_FS_BASE, data);
199     break;
200     @@ -1789,7 +1789,7 @@ static void seg_setup(int seg)
201     vmcs_write16(sf->selector, 0);
202     vmcs_writel(sf->base, 0);
203     vmcs_write32(sf->limit, 0xffff);
204     - vmcs_write32(sf->ar_bytes, 0x93);
205     + vmcs_write32(sf->ar_bytes, 0xf3);
206     }
207    
208     static int alloc_apic_access_page(struct kvm *kvm)
209     @@ -2036,6 +2036,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
210    
211     fx_init(&vmx->vcpu);
212    
213     + seg_setup(VCPU_SREG_CS);
214     /*
215     * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
216     * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
217     @@ -2047,8 +2048,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
218     vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
219     vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
220     }
221     - vmcs_write32(GUEST_CS_LIMIT, 0xffff);
222     - vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
223    
224     seg_setup(VCPU_SREG_DS);
225     seg_setup(VCPU_SREG_ES);
226     @@ -2583,6 +2582,12 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
227     return 1;
228     }
229    
230     +static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
231     +{
232     + kvm_queue_exception(vcpu, UD_VECTOR);
233     + return 1;
234     +}
235     +
236     static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
237     {
238     skip_emulated_instruction(vcpu);
239     @@ -2715,6 +2720,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
240     [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
241     [EXIT_REASON_HLT] = handle_halt,
242     [EXIT_REASON_VMCALL] = handle_vmcall,
243     + [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
244     + [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
245     + [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
246     + [EXIT_REASON_VMPTRST] = handle_vmx_insn,
247     + [EXIT_REASON_VMREAD] = handle_vmx_insn,
248     + [EXIT_REASON_VMRESUME] = handle_vmx_insn,
249     + [EXIT_REASON_VMWRITE] = handle_vmx_insn,
250     + [EXIT_REASON_VMOFF] = handle_vmx_insn,
251     + [EXIT_REASON_VMON] = handle_vmx_insn,
252     [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
253     [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
254     [EXIT_REASON_WBINVD] = handle_wbinvd,
255     @@ -3300,7 +3314,8 @@ static int __init vmx_init(void)
256     bypass_guest_pf = 0;
257     kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
258     VMX_EPT_WRITABLE_MASK |
259     - VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
260     + VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
261     + VMX_EPT_IGMT_BIT);
262     kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
263     VMX_EPT_EXECUTABLE_MASK);
264     kvm_enable_tdp();
265     diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
266     index 23e8373..198cdf3 100644
267     --- a/arch/x86/kvm/vmx.h
268     +++ b/arch/x86/kvm/vmx.h
269     @@ -370,6 +370,7 @@ enum vmcs_field {
270     #define VMX_EPT_READABLE_MASK 0x1ull
271     #define VMX_EPT_WRITABLE_MASK 0x2ull
272     #define VMX_EPT_EXECUTABLE_MASK 0x4ull
273     +#define VMX_EPT_IGMT_BIT (1ull << 6)
274    
275     #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
276    
277     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
278     index 0d682fc..f7c7142 100644
279     --- a/arch/x86/kvm/x86.c
280     +++ b/arch/x86/kvm/x86.c
281     @@ -318,6 +318,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
282    
283     void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
284     {
285     + unsigned long old_cr4 = vcpu->arch.cr4;
286     + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
287     +
288     if (cr4 & CR4_RESERVED_BITS) {
289     printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
290     kvm_inject_gp(vcpu, 0);
291     @@ -331,7 +334,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
292     kvm_inject_gp(vcpu, 0);
293     return;
294     }
295     - } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
296     + } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
297     + && ((cr4 ^ old_cr4) & pdptr_bits)
298     && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
299     printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
300     kvm_inject_gp(vcpu, 0);
301     @@ -752,6 +756,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
302     case MSR_IA32_MC0_MISC+8:
303     case MSR_IA32_MC0_MISC+12:
304     case MSR_IA32_MC0_MISC+16:
305     + case MSR_IA32_MC0_MISC+20:
306     case MSR_IA32_UCODE_REV:
307     case MSR_IA32_EBL_CR_POWERON:
308     data = 0;
309     @@ -982,9 +987,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
310    
311     static int is_efer_nx(void)
312     {
313     - u64 efer;
314     + unsigned long long efer = 0;
315    
316     - rdmsrl(MSR_EFER, efer);
317     + rdmsrl_safe(MSR_EFER, &efer);
318     return efer & EFER_NX;
319     }
320    
321     @@ -1303,28 +1308,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
322     struct kvm_vcpu *vcpu = filp->private_data;
323     void __user *argp = (void __user *)arg;
324     int r;
325     + struct kvm_lapic_state *lapic = NULL;
326    
327     switch (ioctl) {
328     case KVM_GET_LAPIC: {
329     - struct kvm_lapic_state lapic;
330     + lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
331    
332     - memset(&lapic, 0, sizeof lapic);
333     - r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
334     + r = -ENOMEM;
335     + if (!lapic)
336     + goto out;
337     + r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
338     if (r)
339     goto out;
340     r = -EFAULT;
341     - if (copy_to_user(argp, &lapic, sizeof lapic))
342     + if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
343     goto out;
344     r = 0;
345     break;
346     }
347     case KVM_SET_LAPIC: {
348     - struct kvm_lapic_state lapic;
349     -
350     + lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
351     + r = -ENOMEM;
352     + if (!lapic)
353     + goto out;
354     r = -EFAULT;
355     - if (copy_from_user(&lapic, argp, sizeof lapic))
356     + if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
357     goto out;
358     - r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
359     + r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
360     if (r)
361     goto out;
362     r = 0;
363     @@ -1422,6 +1432,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
364     r = -EINVAL;
365     }
366     out:
367     + if (lapic)
368     + kfree(lapic);
369     return r;
370     }
371    
372     @@ -1442,10 +1454,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
373     return -EINVAL;
374    
375     down_write(&kvm->slots_lock);
376     + spin_lock(&kvm->mmu_lock);
377    
378     kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
379     kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
380    
381     + spin_unlock(&kvm->mmu_lock);
382     up_write(&kvm->slots_lock);
383     return 0;
384     }
385     @@ -1612,7 +1626,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
386    
387     /* If nothing is dirty, don't bother messing with page tables. */
388     if (is_dirty) {
389     + spin_lock(&kvm->mmu_lock);
390     kvm_mmu_slot_remove_write_access(kvm, log->slot);
391     + spin_unlock(&kvm->mmu_lock);
392     kvm_flush_remote_tlbs(kvm);
393     memslot = &kvm->memslots[log->slot];
394     n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
395     @@ -1630,6 +1646,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
396     struct kvm *kvm = filp->private_data;
397     void __user *argp = (void __user *)arg;
398     int r = -EINVAL;
399     + /*
400     + * This union makes it completely explicit to gcc-3.x
401     + * that these two variables' stack usage should be
402     + * combined, not added together.
403     + */
404     + union {
405     + struct kvm_pit_state ps;
406     + struct kvm_memory_alias alias;
407     + } u;
408    
409     switch (ioctl) {
410     case KVM_SET_TSS_ADDR:
411     @@ -1661,17 +1686,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
412     case KVM_GET_NR_MMU_PAGES:
413     r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
414     break;
415     - case KVM_SET_MEMORY_ALIAS: {
416     - struct kvm_memory_alias alias;
417     -
418     + case KVM_SET_MEMORY_ALIAS:
419     r = -EFAULT;
420     - if (copy_from_user(&alias, argp, sizeof alias))
421     + if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
422     goto out;
423     - r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
424     + r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
425     if (r)
426     goto out;
427     break;
428     - }
429     case KVM_CREATE_IRQCHIP:
430     r = -ENOMEM;
431     kvm->arch.vpic = kvm_create_pic(kvm);
432     @@ -1713,65 +1735,77 @@ long kvm_arch_vm_ioctl(struct file *filp,
433     }
434     case KVM_GET_IRQCHIP: {
435     /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
436     - struct kvm_irqchip chip;
437     + struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
438    
439     - r = -EFAULT;
440     - if (copy_from_user(&chip, argp, sizeof chip))
441     + r = -ENOMEM;
442     + if (!chip)
443     goto out;
444     + r = -EFAULT;
445     + if (copy_from_user(chip, argp, sizeof *chip))
446     + goto get_irqchip_out;
447     r = -ENXIO;
448     if (!irqchip_in_kernel(kvm))
449     - goto out;
450     - r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
451     + goto get_irqchip_out;
452     + r = kvm_vm_ioctl_get_irqchip(kvm, chip);
453     if (r)
454     - goto out;
455     + goto get_irqchip_out;
456     r = -EFAULT;
457     - if (copy_to_user(argp, &chip, sizeof chip))
458     - goto out;
459     + if (copy_to_user(argp, chip, sizeof *chip))
460     + goto get_irqchip_out;
461     r = 0;
462     + get_irqchip_out:
463     + kfree(chip);
464     + if (r)
465     + goto out;
466     break;
467     }
468     case KVM_SET_IRQCHIP: {
469     /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
470     - struct kvm_irqchip chip;
471     + struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
472    
473     - r = -EFAULT;
474     - if (copy_from_user(&chip, argp, sizeof chip))
475     + r = -ENOMEM;
476     + if (!chip)
477     goto out;
478     + r = -EFAULT;
479     + if (copy_from_user(chip, argp, sizeof *chip))
480     + goto set_irqchip_out;
481     r = -ENXIO;
482     if (!irqchip_in_kernel(kvm))
483     - goto out;
484     - r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
485     + goto set_irqchip_out;
486     + r = kvm_vm_ioctl_set_irqchip(kvm, chip);
487     if (r)
488     - goto out;
489     + goto set_irqchip_out;
490     r = 0;
491     + set_irqchip_out:
492     + kfree(chip);
493     + if (r)
494     + goto out;
495     break;
496     }
497     case KVM_GET_PIT: {
498     - struct kvm_pit_state ps;
499     r = -EFAULT;
500     - if (copy_from_user(&ps, argp, sizeof ps))
501     + if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
502     goto out;
503     r = -ENXIO;
504     if (!kvm->arch.vpit)
505     goto out;
506     - r = kvm_vm_ioctl_get_pit(kvm, &ps);
507     + r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
508     if (r)
509     goto out;
510     r = -EFAULT;
511     - if (copy_to_user(argp, &ps, sizeof ps))
512     + if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
513     goto out;
514     r = 0;
515     break;
516     }
517     case KVM_SET_PIT: {
518     - struct kvm_pit_state ps;
519     r = -EFAULT;
520     - if (copy_from_user(&ps, argp, sizeof ps))
521     + if (copy_from_user(&u.ps, argp, sizeof u.ps))
522     goto out;
523     r = -ENXIO;
524     if (!kvm->arch.vpit)
525     goto out;
526     - r = kvm_vm_ioctl_set_pit(kvm, &ps);
527     + r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
528     if (r)
529     goto out;
530     r = 0;
531     @@ -2813,10 +2847,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
532     down_read(&vcpu->kvm->slots_lock);
533     vapic_enter(vcpu);
534    
535     -preempted:
536     - if (vcpu->guest_debug.enabled)
537     - kvm_x86_ops->guest_debug_pre(vcpu);
538     -
539     again:
540     if (vcpu->requests)
541     if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
542     @@ -2870,6 +2900,9 @@ again:
543     goto out;
544     }
545    
546     + if (vcpu->guest_debug.enabled)
547     + kvm_x86_ops->guest_debug_pre(vcpu);
548     +
549     vcpu->guest_mode = 1;
550     /*
551     * Make sure that guest_mode assignment won't happen after
552     @@ -2944,7 +2977,7 @@ out:
553     if (r > 0) {
554     kvm_resched(vcpu);
555     down_read(&vcpu->kvm->slots_lock);
556     - goto preempted;
557     + goto again;
558     }
559    
560     post_kvm_run_save(vcpu, kvm_run);
561     @@ -3294,11 +3327,33 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
562     return 0;
563     }
564    
565     +int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
566     +{
567     + struct kvm_segment segvar = {
568     + .base = selector << 4,
569     + .limit = 0xffff,
570     + .selector = selector,
571     + .type = 3,
572     + .present = 1,
573     + .dpl = 3,
574     + .db = 0,
575     + .s = 1,
576     + .l = 0,
577     + .g = 0,
578     + .avl = 0,
579     + .unusable = 0,
580     + };
581     + kvm_x86_ops->set_segment(vcpu, &segvar, seg);
582     + return 0;
583     +}
584     +
585     int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
586     int type_bits, int seg)
587     {
588     struct kvm_segment kvm_seg;
589    
590     + if (!(vcpu->arch.cr0 & X86_CR0_PE))
591     + return kvm_load_realmode_segment(vcpu, selector, seg);
592     if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
593     return 1;
594     kvm_seg.type |= type_bits;
595     @@ -3981,7 +4036,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
596     userspace_addr = do_mmap(NULL, 0,
597     npages * PAGE_SIZE,
598     PROT_READ | PROT_WRITE,
599     - MAP_SHARED | MAP_ANONYMOUS,
600     + MAP_PRIVATE | MAP_ANONYMOUS,
601     0);
602     up_write(&current->mm->mmap_sem);
603    
604     @@ -4008,12 +4063,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
605     }
606     }
607    
608     + spin_lock(&kvm->mmu_lock);
609     if (!kvm->arch.n_requested_mmu_pages) {
610     unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
611     kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
612     }
613    
614     kvm_mmu_slot_remove_write_access(kvm, mem->slot);
615     + spin_unlock(&kvm->mmu_lock);
616     kvm_flush_remote_tlbs(kvm);
617    
618     return 0;
619     @@ -4022,6 +4079,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
620     void kvm_arch_flush_shadow(struct kvm *kvm)
621     {
622     kvm_mmu_zap_all(kvm);
623     + kvm_reload_remote_mmus(kvm);
624     }
625    
626     int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
627     diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
628     index ebda9a8..3340c62 100644
629     --- a/drivers/char/mxser.c
630     +++ b/drivers/char/mxser.c
631     @@ -1099,8 +1099,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
632     if (retval)
633     return retval;
634    
635     - /* unmark here for very high baud rate (ex. 921600 bps) used */
636     - tty->low_latency = 1;
637     return 0;
638     }
639    
640     diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
641     index 66a0f93..4dfb5a1 100644
642     --- a/drivers/char/nozomi.c
643     +++ b/drivers/char/nozomi.c
644     @@ -1584,7 +1584,6 @@ static int ntty_open(struct tty_struct *tty, struct file *file)
645    
646     /* Enable interrupt downlink for channel */
647     if (port->tty_open_count == 1) {
648     - tty->low_latency = 1;
649     tty->driver_data = port;
650     port->tty = tty;
651     DBG1("open: %d", port->token_dl);
652     diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
653     index a6e730f..682f411 100644
654     --- a/drivers/net/ehea/ehea_main.c
655     +++ b/drivers/net/ehea/ehea_main.c
656     @@ -1530,6 +1530,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
657     {
658     int ret, i;
659    
660     + if (pr->qp)
661     + netif_napi_del(&pr->napi);
662     +
663     ret = ehea_destroy_qp(pr->qp);
664    
665     if (!ret) {
666     diff --git a/drivers/parport/share.c b/drivers/parport/share.c
667     index a8a62bb..a592f29 100644
668     --- a/drivers/parport/share.c
669     +++ b/drivers/parport/share.c
670     @@ -614,7 +614,10 @@ parport_register_device(struct parport *port, const char *name,
671     * pardevice fields. -arca
672     */
673     port->ops->init_state(tmp, tmp->state);
674     - parport_device_proc_register(tmp);
675     + if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
676     + port->proc_device = tmp;
677     + parport_device_proc_register(tmp);
678     + }
679     return tmp;
680    
681     out_free_all:
682     @@ -646,10 +649,14 @@ void parport_unregister_device(struct pardevice *dev)
683     }
684     #endif
685    
686     - parport_device_proc_unregister(dev);
687     -
688     port = dev->port->physport;
689    
690     + if (port->proc_device == dev) {
691     + port->proc_device = NULL;
692     + clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
693     + parport_device_proc_unregister(dev);
694     + }
695     +
696     if (port->cad == dev) {
697     printk(KERN_DEBUG "%s: %s forgot to release port\n",
698     port->name, dev->name);
699     diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
700     index ae87d08..25f2008 100644
701     --- a/drivers/scsi/sr_ioctl.c
702     +++ b/drivers/scsi/sr_ioctl.c
703     @@ -309,6 +309,11 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
704     if (0 == sr_test_unit_ready(cd->device, &sshdr))
705     return CDS_DISC_OK;
706    
707     + /* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */
708     + if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
709     + && sshdr.asc == 0x04 && sshdr.ascq == 0x01)
710     + return CDS_DRIVE_NOT_READY;
711     +
712     if (!cdrom_get_media_event(cdi, &med)) {
713     if (med.media_present)
714     return CDS_DISC_OK;
715     diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
716     index b4d7235..7b3df8e 100644
717     --- a/drivers/usb/serial/cyberjack.c
718     +++ b/drivers/usb/serial/cyberjack.c
719     @@ -174,13 +174,6 @@ static int cyberjack_open(struct tty_struct *tty,
720     dbg("%s - usb_clear_halt", __func__);
721     usb_clear_halt(port->serial->dev, port->write_urb->pipe);
722    
723     - /* force low_latency on so that our tty_push actually forces
724     - * the data through, otherwise it is scheduled, and with high
725     - * data rates (like with OHCI) data can get lost.
726     - */
727     - if (tty)
728     - tty->low_latency = 1;
729     -
730     priv = usb_get_serial_port_data(port);
731     spin_lock_irqsave(&priv->lock, flags);
732     priv->rdtodo = 0;
733     diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
734     index 22837a3..7eb473b 100644
735     --- a/drivers/usb/serial/cypress_m8.c
736     +++ b/drivers/usb/serial/cypress_m8.c
737     @@ -655,10 +655,6 @@ static int cypress_open(struct tty_struct *tty,
738     priv->rx_flags = 0;
739     spin_unlock_irqrestore(&priv->lock, flags);
740    
741     - /* setting to zero could cause data loss */
742     - if (tty)
743     - tty->low_latency = 1;
744     -
745     /* raise both lines and set termios */
746     spin_lock_irqsave(&priv->lock, flags);
747     priv->line_control = CONTROL_DTR | CONTROL_RTS;
748     diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
749     index a6ab5b5..28ee28c 100644
750     --- a/drivers/usb/serial/empeg.c
751     +++ b/drivers/usb/serial/empeg.c
752     @@ -478,12 +478,6 @@ static void empeg_set_termios(struct tty_struct *tty,
753     termios->c_cflag
754     |= CS8; /* character size 8 bits */
755    
756     - /*
757     - * Force low_latency on; otherwise the pushes are scheduled;
758     - * this is bad as it opens up the possibility of dropping bytes
759     - * on the floor. We don't want to drop bytes on the floor. :)
760     - */
761     - tty->low_latency = 1;
762     tty_encode_baud_rate(tty, 115200, 115200);
763     }
764    
765     diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
766     index d953820..d860071 100644
767     --- a/drivers/usb/serial/garmin_gps.c
768     +++ b/drivers/usb/serial/garmin_gps.c
769     @@ -972,14 +972,6 @@ static int garmin_open(struct tty_struct *tty,
770    
771     dbg("%s - port %d", __func__, port->number);
772    
773     - /*
774     - * Force low_latency on so that our tty_push actually forces the data
775     - * through, otherwise it is scheduled, and with high data rates (like
776     - * with OHCI) data can get lost.
777     - */
778     - if (tty)
779     - tty->low_latency = 1;
780     -
781     spin_lock_irqsave(&garmin_data_p->lock, flags);
782     garmin_data_p->mode = initial_mode;
783     garmin_data_p->count = 0;
784     diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
785     index fe84c88..aa7f08b 100644
786     --- a/drivers/usb/serial/generic.c
787     +++ b/drivers/usb/serial/generic.c
788     @@ -122,12 +122,6 @@ int usb_serial_generic_open(struct tty_struct *tty,
789    
790     dbg("%s - port %d", __func__, port->number);
791    
792     - /* force low_latency on so that our tty_push actually forces the data
793     - through, otherwise it is scheduled, and with high data rates (like
794     - with OHCI) data can get lost. */
795     - if (tty)
796     - tty->low_latency = 1;
797     -
798     /* clear the throttle flags */
799     spin_lock_irqsave(&port->lock, flags);
800     port->throttled = 0;
801     diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
802     index bfa508d..183045a 100644
803     --- a/drivers/usb/serial/io_edgeport.c
804     +++ b/drivers/usb/serial/io_edgeport.c
805     @@ -193,8 +193,6 @@ static const struct divisor_table_entry divisor_table[] = {
806     /* local variables */
807     static int debug;
808    
809     -static int low_latency = 1; /* tty low latency flag, on by default */
810     -
811     static atomic_t CmdUrbs; /* Number of outstanding Command Write Urbs */
812    
813    
814     @@ -861,9 +859,6 @@ static int edge_open(struct tty_struct *tty,
815     if (edge_port == NULL)
816     return -ENODEV;
817    
818     - if (tty)
819     - tty->low_latency = low_latency;
820     -
821     /* see if we've set up our endpoint info yet (can't set it up
822     in edge_startup as the structures were not set up at that time.) */
823     serial = port->serial;
824     @@ -3281,6 +3276,3 @@ MODULE_FIRMWARE("edgeport/down2.fw");
825    
826     module_param(debug, bool, S_IRUGO | S_IWUSR);
827     MODULE_PARM_DESC(debug, "Debug enabled or not");
828     -
829     -module_param(low_latency, bool, S_IRUGO | S_IWUSR);
830     -MODULE_PARM_DESC(low_latency, "Low latency enabled or not");
831     diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
832     index cb4c543..0d744f0 100644
833     --- a/drivers/usb/serial/io_ti.c
834     +++ b/drivers/usb/serial/io_ti.c
835     @@ -76,7 +76,6 @@ struct edgeport_uart_buf_desc {
836     #define EDGE_READ_URB_STOPPING 1
837     #define EDGE_READ_URB_STOPPED 2
838    
839     -#define EDGE_LOW_LATENCY 1
840     #define EDGE_CLOSING_WAIT 4000 /* in .01 sec */
841    
842     #define EDGE_OUT_BUF_SIZE 1024
843     @@ -232,7 +231,6 @@ static unsigned short OperationalBuildNumber;
844    
845     static int debug;
846    
847     -static int low_latency = EDGE_LOW_LATENCY;
848     static int closing_wait = EDGE_CLOSING_WAIT;
849     static int ignore_cpu_rev;
850     static int default_uart_mode; /* RS232 */
851     @@ -1838,9 +1836,6 @@ static int edge_open(struct tty_struct *tty,
852     if (edge_port == NULL)
853     return -ENODEV;
854    
855     - if (tty)
856     - tty->low_latency = low_latency;
857     -
858     port_number = port->number - port->serial->minor;
859     switch (port_number) {
860     case 0:
861     @@ -2995,9 +2990,6 @@ MODULE_FIRMWARE("edgeport/down3.bin");
862     module_param(debug, bool, S_IRUGO | S_IWUSR);
863     MODULE_PARM_DESC(debug, "Debug enabled or not");
864    
865     -module_param(low_latency, bool, S_IRUGO | S_IWUSR);
866     -MODULE_PARM_DESC(low_latency, "Low latency enabled or not");
867     -
868     module_param(closing_wait, int, S_IRUGO | S_IWUSR);
869     MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs");
870    
871     diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
872     index cd9a2e1..ae0b0ff 100644
873     --- a/drivers/usb/serial/ipaq.c
874     +++ b/drivers/usb/serial/ipaq.c
875     @@ -635,13 +635,7 @@ static int ipaq_open(struct tty_struct *tty,
876     priv->free_len += PACKET_SIZE;
877     }
878    
879     - /*
880     - * Force low latency on. This will immediately push data to the line
881     - * discipline instead of queueing.
882     - */
883     -
884     if (tty) {
885     - tty->low_latency = 1;
886     /* FIXME: These two are bogus */
887     tty->raw = 1;
888     tty->real_raw = 1;
889     diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
890     index a842025..b1c0c9a 100644
891     --- a/drivers/usb/serial/ipw.c
892     +++ b/drivers/usb/serial/ipw.c
893     @@ -206,9 +206,6 @@ static int ipw_open(struct tty_struct *tty,
894     if (!buf_flow_init)
895     return -ENOMEM;
896    
897     - if (tty)
898     - tty->low_latency = 1;
899     -
900     /* --1: Tell the modem to initialize (we think) From sniffs this is
901     * always the first thing that gets sent to the modem during
902     * opening of the device */
903     diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
904     index ddff37f..d6da4c9 100644
905     --- a/drivers/usb/serial/iuu_phoenix.c
906     +++ b/drivers/usb/serial/iuu_phoenix.c
907     @@ -1046,7 +1046,6 @@ static int iuu_open(struct tty_struct *tty,
908     tty->termios->c_oflag = 0;
909     tty->termios->c_iflag = 0;
910     priv->termios_initialized = 1;
911     - tty->low_latency = 1;
912     priv->poll = 0;
913     }
914     spin_unlock_irqrestore(&priv->lock, flags);
915     diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
916     index deba28e..5326c59 100644
917     --- a/drivers/usb/serial/kobil_sct.c
918     +++ b/drivers/usb/serial/kobil_sct.c
919     @@ -231,13 +231,7 @@ static int kobil_open(struct tty_struct *tty,
920     /* someone sets the dev to 0 if the close method has been called */
921     port->interrupt_in_urb->dev = port->serial->dev;
922    
923     -
924     - /* force low_latency on so that our tty_push actually forces
925     - * the data through, otherwise it is scheduled, and with high
926     - * data rates (like with OHCI) data can get lost.
927     - */
928     if (tty) {
929     - tty->low_latency = 1;
930    
931     /* Default to echo off and other sane device settings */
932     tty->termios->c_lflag = 0;
933     diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
934     index 7c4917d..1c2402d 100644
935     --- a/drivers/usb/serial/mos7720.c
936     +++ b/drivers/usb/serial/mos7720.c
937     @@ -442,13 +442,6 @@ static int mos7720_open(struct tty_struct *tty,
938     data = 0x0c;
939     send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
940    
941     - /* force low_latency on so that our tty_push actually forces *
942     - * the data through,otherwise it is scheduled, and with *
943     - * high data rates (like with OHCI) data can get lost. */
944     -
945     - if (tty)
946     - tty->low_latency = 1;
947     -
948     /* see if we've set up our endpoint info yet *
949     * (can't set it up in mos7720_startup as the *
950     * structures were not set up at that time.) */
951     diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
952     index 09d8206..8befcbb 100644
953     --- a/drivers/usb/serial/mos7840.c
954     +++ b/drivers/usb/serial/mos7840.c
955     @@ -990,12 +990,6 @@ static int mos7840_open(struct tty_struct *tty,
956     status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
957     Data);
958    
959     - /* force low_latency on so that our tty_push actually forces *
960     - * the data through,otherwise it is scheduled, and with *
961     - * high data rates (like with OHCI) data can get lost. */
962     - if (tty)
963     - tty->low_latency = 1;
964     -
965     /* Check to see if we've set up our endpoint info yet *
966     * (can't set it up in mos7840_startup as the structures *
967     * were not set up at that time.) */
968     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
969     index 211cd61..faa30ad 100644
970     --- a/drivers/usb/serial/option.c
971     +++ b/drivers/usb/serial/option.c
972     @@ -914,9 +914,6 @@ static int option_open(struct tty_struct *tty,
973     usb_pipeout(urb->pipe), 0); */
974     }
975    
976     - if (tty)
977     - tty->low_latency = 1;
978     -
979     option_send_setup(tty, port);
980    
981     return 0;
982     diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
983     index ea1a103..639328b 100644
984     --- a/drivers/usb/serial/sierra.c
985     +++ b/drivers/usb/serial/sierra.c
986     @@ -576,9 +576,6 @@ static int sierra_open(struct tty_struct *tty,
987     }
988     }
989    
990     - if (tty)
991     - tty->low_latency = 1;
992     -
993     sierra_send_setup(tty, port);
994    
995     /* start up the interrupt endpoint if we have one */
996     diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
997     index bc5e905..55b9d67 100644
998     --- a/drivers/usb/serial/ti_usb_3410_5052.c
999     +++ b/drivers/usb/serial/ti_usb_3410_5052.c
1000     @@ -101,11 +101,10 @@
1001    
1002     #define TI_TRANSFER_TIMEOUT 2
1003    
1004     -#define TI_DEFAULT_LOW_LATENCY 0
1005     #define TI_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
1006    
1007     /* supported setserial flags */
1008     -#define TI_SET_SERIAL_FLAGS (ASYNC_LOW_LATENCY)
1009     +#define TI_SET_SERIAL_FLAGS 0
1010    
1011     /* read urb states */
1012     #define TI_READ_URB_RUNNING 0
1013     @@ -212,7 +211,6 @@ static int ti_buf_get(struct circ_buf *cb, char *buf, int count);
1014    
1015     /* module parameters */
1016     static int debug;
1017     -static int low_latency = TI_DEFAULT_LOW_LATENCY;
1018     static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
1019     static ushort vendor_3410[TI_EXTRA_VID_PID_COUNT];
1020     static unsigned int vendor_3410_count;
1021     @@ -333,10 +331,6 @@ MODULE_FIRMWARE("ti_5052.fw");
1022     module_param(debug, bool, S_IRUGO | S_IWUSR);
1023     MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes");
1024    
1025     -module_param(low_latency, bool, S_IRUGO | S_IWUSR);
1026     -MODULE_PARM_DESC(low_latency,
1027     - "TTY low_latency flag, 0=off, 1=on, default is off");
1028     -
1029     module_param(closing_wait, int, S_IRUGO | S_IWUSR);
1030     MODULE_PARM_DESC(closing_wait,
1031     "Maximum wait for data to drain in close, in .01 secs, default is 4000");
1032     @@ -480,7 +474,6 @@ static int ti_startup(struct usb_serial *serial)
1033     spin_lock_init(&tport->tp_lock);
1034     tport->tp_uart_base_addr = (i == 0 ?
1035     TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR);
1036     - tport->tp_flags = low_latency ? ASYNC_LOW_LATENCY : 0;
1037     tport->tp_closing_wait = closing_wait;
1038     init_waitqueue_head(&tport->tp_msr_wait);
1039     init_waitqueue_head(&tport->tp_write_wait);
1040     @@ -560,10 +553,6 @@ static int ti_open(struct tty_struct *tty,
1041     if (mutex_lock_interruptible(&tdev->td_open_close_lock))
1042     return -ERESTARTSYS;
1043    
1044     - if (tty)
1045     - tty->low_latency =
1046     - (tport->tp_flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1047     -
1048     port_number = port->number - port->serial->minor;
1049    
1050     memset(&(tport->tp_icount), 0x00, sizeof(tport->tp_icount));
1051     @@ -1480,10 +1469,6 @@ static int ti_set_serial_info(struct ti_port *tport,
1052     return -EFAULT;
1053    
1054     tport->tp_flags = new_serial.flags & TI_SET_SERIAL_FLAGS;
1055     - /* FIXME */
1056     - if (port->port.tty)
1057     - port->port.tty->low_latency =
1058     - (tport->tp_flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1059     tport->tp_closing_wait = new_serial.closing_wait;
1060    
1061     return 0;
1062     diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
1063     index cf8924f..ec33fa5 100644
1064     --- a/drivers/usb/serial/visor.c
1065     +++ b/drivers/usb/serial/visor.c
1066     @@ -296,14 +296,6 @@ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port,
1067     priv->throttled = 0;
1068     spin_unlock_irqrestore(&priv->lock, flags);
1069    
1070     - /*
1071     - * Force low_latency on so that our tty_push actually forces the data
1072     - * through, otherwise it is scheduled, and with high data rates (like
1073     - * with OHCI) data can get lost.
1074     - */
1075     - if (tty)
1076     - tty->low_latency = 1;
1077     -
1078     /* Start reading from the device */
1079     usb_fill_bulk_urb(port->read_urb, serial->dev,
1080     usb_rcvbulkpipe(serial->dev,
1081     diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
1082     index a53da14..0f04b70 100644
1083     --- a/fs/ocfs2/aops.c
1084     +++ b/fs/ocfs2/aops.c
1085     @@ -908,18 +908,17 @@ struct ocfs2_write_cluster_desc {
1086     */
1087     unsigned c_new;
1088     unsigned c_unwritten;
1089     + unsigned c_needs_zero;
1090     };
1091    
1092     -static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
1093     -{
1094     - return d->c_new || d->c_unwritten;
1095     -}
1096     -
1097     struct ocfs2_write_ctxt {
1098     /* Logical cluster position / len of write */
1099     u32 w_cpos;
1100     u32 w_clen;
1101    
1102     + /* First cluster allocated in a nonsparse extend */
1103     + u32 w_first_new_cpos;
1104     +
1105     struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
1106    
1107     /*
1108     @@ -997,6 +996,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
1109     return -ENOMEM;
1110    
1111     wc->w_cpos = pos >> osb->s_clustersize_bits;
1112     + wc->w_first_new_cpos = UINT_MAX;
1113     cend = (pos + len - 1) >> osb->s_clustersize_bits;
1114     wc->w_clen = cend - wc->w_cpos + 1;
1115     get_bh(di_bh);
1116     @@ -1239,13 +1239,11 @@ static int ocfs2_write_cluster(struct address_space *mapping,
1117     struct ocfs2_write_ctxt *wc, u32 cpos,
1118     loff_t user_pos, unsigned user_len)
1119     {
1120     - int ret, i, new, should_zero = 0;
1121     + int ret, i, new;
1122     u64 v_blkno, p_blkno;
1123     struct inode *inode = mapping->host;
1124    
1125     new = phys == 0 ? 1 : 0;
1126     - if (new || unwritten)
1127     - should_zero = 1;
1128    
1129     if (new) {
1130     u32 tmp_pos;
1131     @@ -1356,7 +1354,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1132     local_len = osb->s_clustersize - cluster_off;
1133    
1134     ret = ocfs2_write_cluster(mapping, desc->c_phys,
1135     - desc->c_unwritten, data_ac, meta_ac,
1136     + desc->c_unwritten,
1137     + desc->c_needs_zero,
1138     + data_ac, meta_ac,
1139     wc, desc->c_cpos, pos, local_len);
1140     if (ret) {
1141     mlog_errno(ret);
1142     @@ -1406,14 +1406,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1143     * newly allocated cluster.
1144     */
1145     desc = &wc->w_desc[0];
1146     - if (ocfs2_should_zero_cluster(desc))
1147     + if (desc->c_needs_zero)
1148     ocfs2_figure_cluster_boundaries(osb,
1149     desc->c_cpos,
1150     &wc->w_target_from,
1151     NULL);
1152    
1153     desc = &wc->w_desc[wc->w_clen - 1];
1154     - if (ocfs2_should_zero_cluster(desc))
1155     + if (desc->c_needs_zero)
1156     ocfs2_figure_cluster_boundaries(osb,
1157     desc->c_cpos,
1158     NULL,
1159     @@ -1481,13 +1481,28 @@ static int ocfs2_populate_write_desc(struct inode *inode,
1160     phys++;
1161     }
1162    
1163     + /*
1164     + * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1165     + * file that got extended. w_first_new_cpos tells us
1166     + * where the newly allocated clusters are so we can
1167     + * zero them.
1168     + */
1169     + if (desc->c_cpos >= wc->w_first_new_cpos) {
1170     + BUG_ON(phys == 0);
1171     + desc->c_needs_zero = 1;
1172     + }
1173     +
1174     desc->c_phys = phys;
1175     if (phys == 0) {
1176     desc->c_new = 1;
1177     + desc->c_needs_zero = 1;
1178     *clusters_to_alloc = *clusters_to_alloc + 1;
1179     }
1180     - if (ext_flags & OCFS2_EXT_UNWRITTEN)
1181     +
1182     + if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1183     desc->c_unwritten = 1;
1184     + desc->c_needs_zero = 1;
1185     + }
1186    
1187     num_clusters--;
1188     }
1189     @@ -1644,10 +1659,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
1190     if (newsize <= i_size_read(inode))
1191     return 0;
1192    
1193     - ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
1194     + ret = ocfs2_extend_no_holes(inode, newsize, pos);
1195     if (ret)
1196     mlog_errno(ret);
1197    
1198     + wc->w_first_new_cpos =
1199     + ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1200     +
1201     return ret;
1202     }
1203    
1204     @@ -1656,7 +1674,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1205     struct page **pagep, void **fsdata,
1206     struct buffer_head *di_bh, struct page *mmap_page)
1207     {
1208     - int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
1209     + int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1210     unsigned int clusters_to_alloc, extents_to_split;
1211     struct ocfs2_write_ctxt *wc;
1212     struct inode *inode = mapping->host;
1213     @@ -1724,8 +1742,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1214    
1215     }
1216    
1217     - ocfs2_set_target_boundaries(osb, wc, pos, len,
1218     - clusters_to_alloc + extents_to_split);
1219     + /*
1220     + * We have to zero sparse allocated clusters, unwritten extent clusters,
1221     + * and non-sparse clusters we just extended. For non-sparse writes,
1222     + * we know zeros will only be needed in the first and/or last cluster.
1223     + */
1224     + if (clusters_to_alloc || extents_to_split ||
1225     + (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1226     + wc->w_desc[wc->w_clen - 1].c_needs_zero)))
1227     + cluster_of_pages = 1;
1228     + else
1229     + cluster_of_pages = 0;
1230     +
1231     + ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
1232    
1233     handle = ocfs2_start_trans(osb, credits);
1234     if (IS_ERR(handle)) {
1235     @@ -1753,8 +1782,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1236     * extent.
1237     */
1238     ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
1239     - clusters_to_alloc + extents_to_split,
1240     - mmap_page);
1241     + cluster_of_pages, mmap_page);
1242     if (ret) {
1243     mlog_errno(ret);
1244     goto out_commit;
1245     diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
1246     index c2e34c2..cf7c887 100644
1247     --- a/include/asm-x86/kvm_host.h
1248     +++ b/include/asm-x86/kvm_host.h
1249     @@ -195,6 +195,13 @@ struct kvm_mmu_page {
1250     };
1251     };
1252    
1253     +struct kvm_pv_mmu_op_buffer {
1254     + void *ptr;
1255     + unsigned len;
1256     + unsigned processed;
1257     + char buf[512] __aligned(sizeof(long));
1258     +};
1259     +
1260     /*
1261     * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
1262     * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
1263     @@ -237,6 +244,9 @@ struct kvm_vcpu_arch {
1264     bool tpr_access_reporting;
1265    
1266     struct kvm_mmu mmu;
1267     + /* only needed in kvm_pv_mmu_op() path, but it's hot so
1268     + * put it here to avoid allocation */
1269     + struct kvm_pv_mmu_op_buffer mmu_op_buffer;
1270    
1271     struct kvm_mmu_memory_cache mmu_pte_chain_cache;
1272     struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
1273     diff --git a/include/linux/parport.h b/include/linux/parport.h
1274     index 6a0d7cd..986252e 100644
1275     --- a/include/linux/parport.h
1276     +++ b/include/linux/parport.h
1277     @@ -326,6 +326,10 @@ struct parport {
1278     int spintime;
1279     atomic_t ref_count;
1280    
1281     + unsigned long devflags;
1282     +#define PARPORT_DEVPROC_REGISTERED 0
1283     + struct pardevice *proc_device; /* Currently register proc device */
1284     +
1285     struct list_head full_list;
1286     struct parport *slaves[3];
1287     };
1288     diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
1289     index 4d80a11..75a87fe 100644
1290     --- a/include/linux/sunrpc/xprt.h
1291     +++ b/include/linux/sunrpc/xprt.h
1292     @@ -260,6 +260,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
1293     #define XPRT_BOUND (4)
1294     #define XPRT_BINDING (5)
1295     #define XPRT_CLOSING (6)
1296     +#define XPRT_CONNECTION_CLOSE (8)
1297    
1298     static inline void xprt_set_connected(struct rpc_xprt *xprt)
1299     {
1300     diff --git a/kernel/fork.c b/kernel/fork.c
1301     index fcbd28c..3fdf3d5 100644
1302     --- a/kernel/fork.c
1303     +++ b/kernel/fork.c
1304     @@ -767,11 +767,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1305     struct signal_struct *sig;
1306     int ret;
1307    
1308     - if (clone_flags & CLONE_THREAD) {
1309     - atomic_inc(&current->signal->count);
1310     - atomic_inc(&current->signal->live);
1311     + if (clone_flags & CLONE_THREAD)
1312     return 0;
1313     - }
1314     +
1315     sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
1316     tsk->signal = sig;
1317     if (!sig)
1318     @@ -844,16 +842,6 @@ void __cleanup_signal(struct signal_struct *sig)
1319     kmem_cache_free(signal_cachep, sig);
1320     }
1321    
1322     -static void cleanup_signal(struct task_struct *tsk)
1323     -{
1324     - struct signal_struct *sig = tsk->signal;
1325     -
1326     - atomic_dec(&sig->live);
1327     -
1328     - if (atomic_dec_and_test(&sig->count))
1329     - __cleanup_signal(sig);
1330     -}
1331     -
1332     static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1333     {
1334     unsigned long new_flags = p->flags;
1335     @@ -1201,6 +1189,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1336     }
1337    
1338     if (clone_flags & CLONE_THREAD) {
1339     + atomic_inc(&current->signal->count);
1340     + atomic_inc(&current->signal->live);
1341     p->group_leader = current->group_leader;
1342     list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1343    
1344     @@ -1261,7 +1251,8 @@ bad_fork_cleanup_mm:
1345     if (p->mm)
1346     mmput(p->mm);
1347     bad_fork_cleanup_signal:
1348     - cleanup_signal(p);
1349     + if (!(clone_flags & CLONE_THREAD))
1350     + __cleanup_signal(p->signal);
1351     bad_fork_cleanup_sighand:
1352     __cleanup_sighand(p->sighand);
1353     bad_fork_cleanup_fs:
1354     diff --git a/kernel/kthread.c b/kernel/kthread.c
1355     index 96cff2f..9548d52 100644
1356     --- a/kernel/kthread.c
1357     +++ b/kernel/kthread.c
1358     @@ -213,12 +213,12 @@ int kthread_stop(struct task_struct *k)
1359     /* Now set kthread_should_stop() to true, and wake it up. */
1360     kthread_stop_info.k = k;
1361     wake_up_process(k);
1362     - put_task_struct(k);
1363    
1364     /* Once it dies, reset stop ptr, gather result and we're done. */
1365     wait_for_completion(&kthread_stop_info.done);
1366     kthread_stop_info.k = NULL;
1367     ret = kthread_stop_info.err;
1368     + put_task_struct(k);
1369     mutex_unlock(&kthread_stop_lock);
1370    
1371     return ret;
1372     diff --git a/kernel/signal.c b/kernel/signal.c
1373     index 7d0a222..de2b649 100644
1374     --- a/kernel/signal.c
1375     +++ b/kernel/signal.c
1376     @@ -2353,11 +2353,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
1377     stack_t oss;
1378     int error;
1379    
1380     - if (uoss) {
1381     - oss.ss_sp = (void __user *) current->sas_ss_sp;
1382     - oss.ss_size = current->sas_ss_size;
1383     - oss.ss_flags = sas_ss_flags(sp);
1384     - }
1385     + oss.ss_sp = (void __user *) current->sas_ss_sp;
1386     + oss.ss_size = current->sas_ss_size;
1387     + oss.ss_flags = sas_ss_flags(sp);
1388    
1389     if (uss) {
1390     void __user *ss_sp;
1391     @@ -2400,13 +2398,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
1392     current->sas_ss_size = ss_size;
1393     }
1394    
1395     + error = 0;
1396     if (uoss) {
1397     error = -EFAULT;
1398     - if (copy_to_user(uoss, &oss, sizeof(oss)))
1399     + if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
1400     goto out;
1401     + error = __put_user(oss.ss_sp, &uoss->ss_sp) |
1402     + __put_user(oss.ss_size, &uoss->ss_size) |
1403     + __put_user(oss.ss_flags, &uoss->ss_flags);
1404     }
1405    
1406     - error = 0;
1407     out:
1408     return error;
1409     }
1410     diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
1411     index 0c85042..8067dc7 100644
1412     --- a/net/appletalk/ddp.c
1413     +++ b/net/appletalk/ddp.c
1414     @@ -1245,6 +1245,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
1415     return -ENOBUFS;
1416    
1417     *uaddr_len = sizeof(struct sockaddr_at);
1418     + memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
1419    
1420     if (peer) {
1421     if (sk->sk_state != TCP_ESTABLISHED)
1422     diff --git a/net/can/raw.c b/net/can/raw.c
1423     index 6e0663f..08f31d4 100644
1424     --- a/net/can/raw.c
1425     +++ b/net/can/raw.c
1426     @@ -396,6 +396,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
1427     if (peer)
1428     return -EOPNOTSUPP;
1429    
1430     + memset(addr, 0, sizeof(*addr));
1431     addr->can_family = AF_CAN;
1432     addr->can_ifindex = ro->ifindex;
1433    
1434     diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
1435     index 8789d2b..9aae86f 100644
1436     --- a/net/econet/af_econet.c
1437     +++ b/net/econet/af_econet.c
1438     @@ -520,6 +520,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
1439     if (peer)
1440     return -EOPNOTSUPP;
1441    
1442     + memset(sec, 0, sizeof(*sec));
1443     mutex_lock(&econet_mutex);
1444    
1445     sk = sock->sk;
1446     diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
1447     index 3eb5bcc..b28409c 100644
1448     --- a/net/irda/af_irda.c
1449     +++ b/net/irda/af_irda.c
1450     @@ -714,6 +714,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
1451     struct sock *sk = sock->sk;
1452     struct irda_sock *self = irda_sk(sk);
1453    
1454     + memset(&saddr, 0, sizeof(saddr));
1455     if (peer) {
1456     if (sk->sk_state != TCP_ESTABLISHED)
1457     return -ENOTCONN;
1458     diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
1459     index 5bcc452..90a55b1 100644
1460     --- a/net/llc/af_llc.c
1461     +++ b/net/llc/af_llc.c
1462     @@ -915,6 +915,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
1463     struct llc_sock *llc = llc_sk(sk);
1464     int rc = 0;
1465    
1466     + memset(&sllc, 0, sizeof(sllc));
1467     lock_sock(sk);
1468     if (sock_flag(sk, SOCK_ZAPPED))
1469     goto out;
1470     diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
1471     index db9e263..ad72bde 100644
1472     --- a/net/netrom/af_netrom.c
1473     +++ b/net/netrom/af_netrom.c
1474     @@ -848,6 +848,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
1475     sax->fsa_ax25.sax25_family = AF_NETROM;
1476     sax->fsa_ax25.sax25_ndigis = 1;
1477     sax->fsa_ax25.sax25_call = nr->user_addr;
1478     + memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
1479     sax->fsa_digipeater[0] = nr->dest_addr;
1480     *uaddr_len = sizeof(struct full_sockaddr_ax25);
1481     } else {
1482     diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
1483     index c062361..f132243 100644
1484     --- a/net/rose/af_rose.c
1485     +++ b/net/rose/af_rose.c
1486     @@ -957,6 +957,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
1487     struct rose_sock *rose = rose_sk(sk);
1488     int n;
1489    
1490     + memset(srose, 0, sizeof(*srose));
1491     if (peer != 0) {
1492     if (sk->sk_state != TCP_ESTABLISHED)
1493     return -ENOTCONN;
1494     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
1495     index 66cfe88..860b1d4 100644
1496     --- a/net/sunrpc/clnt.c
1497     +++ b/net/sunrpc/clnt.c
1498     @@ -860,6 +860,7 @@ static inline void
1499     rpc_task_force_reencode(struct rpc_task *task)
1500     {
1501     task->tk_rqstp->rq_snd_buf.len = 0;
1502     + task->tk_rqstp->rq_bytes_sent = 0;
1503     }
1504    
1505     static inline void
1506     diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
1507     index 99a52aa..b66be67 100644
1508     --- a/net/sunrpc/xprt.c
1509     +++ b/net/sunrpc/xprt.c
1510     @@ -645,10 +645,8 @@ xprt_init_autodisconnect(unsigned long data)
1511     if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
1512     goto out_abort;
1513     spin_unlock(&xprt->transport_lock);
1514     - if (xprt_connecting(xprt))
1515     - xprt_release_write(xprt, NULL);
1516     - else
1517     - queue_work(rpciod_workqueue, &xprt->task_cleanup);
1518     + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1519     + queue_work(rpciod_workqueue, &xprt->task_cleanup);
1520     return;
1521     out_abort:
1522     spin_unlock(&xprt->transport_lock);
1523     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1524     index 8f9295d..a283304 100644
1525     --- a/net/sunrpc/xprtsock.c
1526     +++ b/net/sunrpc/xprtsock.c
1527     @@ -748,6 +748,9 @@ out_release:
1528     *
1529     * This is used when all requests are complete; ie, no DRC state remains
1530     * on the server we want to save.
1531     + *
1532     + * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1533     + * xs_reset_transport() zeroing the socket from underneath a writer.
1534     */
1535     static void xs_close(struct rpc_xprt *xprt)
1536     {
1537     @@ -781,6 +784,14 @@ clear_close_wait:
1538     xprt_disconnect_done(xprt);
1539     }
1540    
1541     +static void xs_tcp_close(struct rpc_xprt *xprt)
1542     +{
1543     + if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
1544     + xs_close(xprt);
1545     + else
1546     + xs_tcp_shutdown(xprt);
1547     +}
1548     +
1549     /**
1550     * xs_destroy - prepare to shutdown a transport
1551     * @xprt: doomed transport
1552     @@ -1676,11 +1687,21 @@ static void xs_tcp_connect_worker4(struct work_struct *work)
1553     goto out_clear;
1554     case -ECONNREFUSED:
1555     case -ECONNRESET:
1556     + case -ENETUNREACH:
1557     /* retry with existing socket, after a delay */
1558     - break;
1559     + goto out_clear;
1560     default:
1561     /* get rid of existing socket, and retry */
1562     xs_tcp_shutdown(xprt);
1563     + printk("%s: connect returned unhandled error %d\n",
1564     + __func__, status);
1565     + case -EADDRNOTAVAIL:
1566     + /* We're probably in TIME_WAIT. Get rid of existing socket,
1567     + * and retry
1568     + */
1569     + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1570     + xprt_force_disconnect(xprt);
1571     + status = -EAGAIN;
1572     }
1573     }
1574     out:
1575     @@ -1735,11 +1756,21 @@ static void xs_tcp_connect_worker6(struct work_struct *work)
1576     goto out_clear;
1577     case -ECONNREFUSED:
1578     case -ECONNRESET:
1579     + case -ENETUNREACH:
1580     /* retry with existing socket, after a delay */
1581     - break;
1582     + goto out_clear;
1583     default:
1584     /* get rid of existing socket, and retry */
1585     xs_tcp_shutdown(xprt);
1586     + printk("%s: connect returned unhandled error %d\n",
1587     + __func__, status);
1588     + case -EADDRNOTAVAIL:
1589     + /* We're probably in TIME_WAIT. Get rid of existing socket,
1590     + * and retry
1591     + */
1592     + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1593     + xprt_force_disconnect(xprt);
1594     + status = -EAGAIN;
1595     }
1596     }
1597     out:
1598     @@ -1871,7 +1902,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1599     .buf_free = rpc_free,
1600     .send_request = xs_tcp_send_request,
1601     .set_retrans_timeout = xprt_set_retrans_timeout_def,
1602     - .close = xs_tcp_shutdown,
1603     + .close = xs_tcp_close,
1604     .destroy = xs_destroy,
1605     .print_stats = xs_tcp_print_stats,
1606     };
1607     diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
1608     index 1533f03..4cfbd79 100644
1609     --- a/sound/core/pcm_lib.c
1610     +++ b/sound/core/pcm_lib.c
1611     @@ -779,47 +779,24 @@ static int snd_interval_ratden(struct snd_interval *i,
1612     int snd_interval_list(struct snd_interval *i, unsigned int count, unsigned int *list, unsigned int mask)
1613     {
1614     unsigned int k;
1615     - int changed = 0;
1616     + struct snd_interval list_range;
1617    
1618     if (!count) {
1619     i->empty = 1;
1620     return -EINVAL;
1621     }
1622     + snd_interval_any(&list_range);
1623     + list_range.min = UINT_MAX;
1624     + list_range.max = 0;
1625     for (k = 0; k < count; k++) {
1626     if (mask && !(mask & (1 << k)))
1627     continue;
1628     - if (i->min == list[k] && !i->openmin)
1629     - goto _l1;
1630     - if (i->min < list[k]) {
1631     - i->min = list[k];
1632     - i->openmin = 0;
1633     - changed = 1;
1634     - goto _l1;
1635     - }
1636     - }
1637     - i->empty = 1;
1638     - return -EINVAL;
1639     - _l1:
1640     - for (k = count; k-- > 0;) {
1641     - if (mask && !(mask & (1 << k)))
1642     + if (!snd_interval_test(i, list[k]))
1643     continue;
1644     - if (i->max == list[k] && !i->openmax)
1645     - goto _l2;
1646     - if (i->max > list[k]) {
1647     - i->max = list[k];
1648     - i->openmax = 0;
1649     - changed = 1;
1650     - goto _l2;
1651     - }
1652     + list_range.min = min(list_range.min, list[k]);
1653     + list_range.max = max(list_range.max, list[k]);
1654     }
1655     - i->empty = 1;
1656     - return -EINVAL;
1657     - _l2:
1658     - if (snd_interval_checkempty(i)) {
1659     - i->empty = 1;
1660     - return -EINVAL;
1661     - }
1662     - return changed;
1663     + return snd_interval_refine(i, &list_range);
1664     }
1665    
1666     EXPORT_SYMBOL(snd_interval_list);
1667     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1668     index 07f8dcc..2581b8c 100644
1669     --- a/sound/pci/hda/patch_realtek.c
1670     +++ b/sound/pci/hda/patch_realtek.c
1671     @@ -5580,9 +5580,9 @@ static struct hda_verb alc885_mbp_ch2_init[] = {
1672     };
1673    
1674     /*
1675     - * 6ch mode
1676     + * 4ch mode
1677     */
1678     -static struct hda_verb alc885_mbp_ch6_init[] = {
1679     +static struct hda_verb alc885_mbp_ch4_init[] = {
1680     { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
1681     { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1682     { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
1683     @@ -5591,9 +5591,9 @@ static struct hda_verb alc885_mbp_ch6_init[] = {
1684     { } /* end */
1685     };
1686    
1687     -static struct hda_channel_mode alc885_mbp_6ch_modes[2] = {
1688     +static struct hda_channel_mode alc885_mbp_4ch_modes[2] = {
1689     { 2, alc885_mbp_ch2_init },
1690     - { 6, alc885_mbp_ch6_init },
1691     + { 4, alc885_mbp_ch4_init },
1692     };
1693    
1694    
1695     @@ -5628,10 +5628,11 @@ static struct snd_kcontrol_new alc882_base_mixer[] = {
1696     };
1697    
1698     static struct snd_kcontrol_new alc885_mbp3_mixer[] = {
1699     - HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
1700     - HDA_BIND_MUTE ("Front Playback Switch", 0x0c, 0x02, HDA_INPUT),
1701     - HDA_CODEC_MUTE ("Speaker Playback Switch", 0x14, 0x00, HDA_OUTPUT),
1702     - HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
1703     + HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
1704     + HDA_BIND_MUTE ("Speaker Playback Switch", 0x0c, 0x02, HDA_INPUT),
1705     + HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0e, 0x00, HDA_OUTPUT),
1706     + HDA_BIND_MUTE ("Headphone Playback Switch", 0x0e, 0x02, HDA_INPUT),
1707     + HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
1708     HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
1709     HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
1710     HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
1711     @@ -5879,14 +5880,18 @@ static struct hda_verb alc885_mbp3_init_verbs[] = {
1712     {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
1713     {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
1714     {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
1715     + /* HP mixer */
1716     + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
1717     + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
1718     + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
1719     /* Front Pin: output 0 (0x0c) */
1720     {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
1721     {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1722     {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
1723     - /* HP Pin: output 0 (0x0d) */
1724     + /* HP Pin: output 0 (0x0e) */
1725     {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc4},
1726     - {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
1727     - {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
1728     + {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1729     + {0x15, AC_VERB_SET_CONNECT_SEL, 0x02},
1730     {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
1731     /* Mic (rear) pin: input vref at 80% */
1732     {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
1733     @@ -6326,10 +6331,11 @@ static struct alc_config_preset alc882_presets[] = {
1734     .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
1735     .init_verbs = { alc885_mbp3_init_verbs,
1736     alc880_gpio1_init_verbs },
1737     - .num_dacs = ARRAY_SIZE(alc882_dac_nids),
1738     + .num_dacs = 2,
1739     .dac_nids = alc882_dac_nids,
1740     - .channel_mode = alc885_mbp_6ch_modes,
1741     - .num_channel_mode = ARRAY_SIZE(alc885_mbp_6ch_modes),
1742     + .hp_nid = 0x04,
1743     + .channel_mode = alc885_mbp_4ch_modes,
1744     + .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
1745     .input_mux = &alc882_capture_source,
1746     .dig_out_nid = ALC882_DIGOUT_NID,
1747     .dig_in_nid = ALC882_DIGIN_NID,
1748     @@ -11634,6 +11640,8 @@ static int patch_alc269(struct hda_codec *codec)
1749     spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids);
1750     spec->capsrc_nids = alc269_capsrc_nids;
1751    
1752     + spec->vmaster_nid = 0x02;
1753     +
1754     codec->patch_ops = alc_patch_ops;
1755     if (board_config == ALC269_AUTO)
1756     spec->init_hook = alc269_auto_init;
1757     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1758     index 7dd9b0b..db062b5 100644
1759     --- a/virt/kvm/kvm_main.c
1760     +++ b/virt/kvm/kvm_main.c
1761     @@ -406,6 +406,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
1762     #endif
1763     #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1764     mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1765     +#else
1766     + kvm_arch_flush_shadow(kvm);
1767     #endif
1768     kvm_arch_destroy_vm(kvm);
1769     mmdrop(mm);
1770     @@ -548,6 +550,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
1771     if (!new.dirty_bitmap)
1772     goto out_free;
1773     memset(new.dirty_bitmap, 0, dirty_bytes);
1774     + if (old.npages)
1775     + kvm_arch_flush_shadow(kvm);
1776     }
1777     #endif /* not defined CONFIG_S390 */
1778    
1779     @@ -726,7 +730,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1780     return page_to_pfn(bad_page);
1781     }
1782    
1783     - npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
1784     + npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1785     NULL);
1786    
1787     if (unlikely(npages != 1)) {
1788     @@ -1074,12 +1078,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1789    
1790     r = kvm_arch_vcpu_setup(vcpu);
1791     if (r)
1792     - goto vcpu_destroy;
1793     + return r;
1794    
1795     mutex_lock(&kvm->lock);
1796     if (kvm->vcpus[n]) {
1797     r = -EEXIST;
1798     - mutex_unlock(&kvm->lock);
1799     goto vcpu_destroy;
1800     }
1801     kvm->vcpus[n] = vcpu;
1802     @@ -1095,8 +1098,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1803     unlink:
1804     mutex_lock(&kvm->lock);
1805     kvm->vcpus[n] = NULL;
1806     - mutex_unlock(&kvm->lock);
1807     vcpu_destroy:
1808     + mutex_unlock(&kvm->lock);
1809     kvm_arch_vcpu_destroy(vcpu);
1810     return r;
1811     }
1812     @@ -1118,6 +1121,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
1813     struct kvm_vcpu *vcpu = filp->private_data;
1814     void __user *argp = (void __user *)arg;
1815     int r;
1816     + struct kvm_fpu *fpu = NULL;
1817     + struct kvm_sregs *kvm_sregs = NULL;
1818    
1819     if (vcpu->kvm->mm != current->mm)
1820     return -EIO;
1821     @@ -1165,25 +1170,28 @@ out_free2:
1822     break;
1823     }
1824     case KVM_GET_SREGS: {
1825     - struct kvm_sregs kvm_sregs;
1826     -
1827     - memset(&kvm_sregs, 0, sizeof kvm_sregs);
1828     - r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
1829     + kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1830     + r = -ENOMEM;
1831     + if (!kvm_sregs)
1832     + goto out;
1833     + r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1834     if (r)
1835     goto out;
1836     r = -EFAULT;
1837     - if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
1838     + if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1839     goto out;
1840     r = 0;
1841     break;
1842     }
1843     case KVM_SET_SREGS: {
1844     - struct kvm_sregs kvm_sregs;
1845     -
1846     + kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1847     + r = -ENOMEM;
1848     + if (!kvm_sregs)
1849     + goto out;
1850     r = -EFAULT;
1851     - if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
1852     + if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1853     goto out;
1854     - r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
1855     + r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1856     if (r)
1857     goto out;
1858     r = 0;
1859     @@ -1264,25 +1272,28 @@ out_free2:
1860     break;
1861     }
1862     case KVM_GET_FPU: {
1863     - struct kvm_fpu fpu;
1864     -
1865     - memset(&fpu, 0, sizeof fpu);
1866     - r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
1867     + fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1868     + r = -ENOMEM;
1869     + if (!fpu)
1870     + goto out;
1871     + r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1872     if (r)
1873     goto out;
1874     r = -EFAULT;
1875     - if (copy_to_user(argp, &fpu, sizeof fpu))
1876     + if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1877     goto out;
1878     r = 0;
1879     break;
1880     }
1881     case KVM_SET_FPU: {
1882     - struct kvm_fpu fpu;
1883     -
1884     + fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1885     + r = -ENOMEM;
1886     + if (!fpu)
1887     + goto out;
1888     r = -EFAULT;
1889     - if (copy_from_user(&fpu, argp, sizeof fpu))
1890     + if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1891     goto out;
1892     - r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
1893     + r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1894     if (r)
1895     goto out;
1896     r = 0;
1897     @@ -1292,6 +1303,8 @@ out_free2:
1898     r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1899     }
1900     out:
1901     + kfree(fpu);
1902     + kfree(kvm_sregs);
1903     return r;
1904     }
1905