diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245 index 02838a4..86b5880 100644 --- a/Documentation/hwmon/ltc4245 +++ b/Documentation/hwmon/ltc4245 @@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm in7_min_alarm 3v output undervoltage alarm in8_min_alarm Vee (-12v) output undervoltage alarm -in9_input GPIO #1 voltage data -in10_input GPIO #2 voltage data -in11_input GPIO #3 voltage data +in9_input GPIO voltage data power1_input 12v power usage (mW) power2_input 5v power usage (mW) diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 8ba7044..b07bfee 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -887,8 +887,6 @@ static int sa1111_resume(struct platform_device *dev) if (!save) return 0; - spin_lock_irqsave(&sachip->lock, flags); - /* * Ensure that the SA1111 is still here. * FIXME: shouldn't do this here. @@ -905,6 +903,13 @@ static int sa1111_resume(struct platform_device *dev) * First of all, wake up the chip. */ sa1111_wake(sachip); + + /* + * Only lock for write ops. Also, sa1111_wake must be called with + * released spinlock! + */ + spin_lock_irqsave(&sachip->lock, flags); + sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0); sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1); diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 70997d5..dd9598b 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4-r9, lr} \n\ - mov ip, %0 \n\ + mov ip, %2 \n\ 1: mov lr, r1 \n\ ldmia r1!, {r2 - r9} \n\ pld [lr, #32] \n\ @@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom) mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ ldmfd sp!, {r4-r9, pc}" : - : "I" (PAGE_SIZE)); + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE)); } void feroceon_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9ab0984..7bc0ac7 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %0 @ 1\n\ + mov r2, %2 @ 1\n\ ldmia r1!, {r3, r4, ip, lr} @ 4\n\ 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ stmia r0!, {r3, r4, ip, lr} @ 4\n\ @@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom) mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ ldmfd sp!, {r4, pc} @ 3" : - : "I" (PAGE_SIZE / 64)); + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); } void v4wb_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 300efaf..35bf609 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %0 @ 1\n\ + mov r2, %2 @ 1\n\ ldmia r1!, {r3, r4, ip, lr} @ 4\n\ 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ @@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ ldmfd sp!, {r4, pc} @ 3" : - : "I" (PAGE_SIZE / 64)); + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); } void v4wt_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index bc4525f..27dc363 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, r5, lr} \n\ - mov lr, %0 \n\ + mov lr, %2 \n\ \n\ pld [r1, #0] \n\ pld [r1, #32] \n\ @@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) \n\ ldmfd sp!, {r4, r5, pc}" : - : "I" (PAGE_SIZE / 64 - 1)); + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1)); } void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 10e0680..3191cd6 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -386,6 +386,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); + if (user_mode(regs)) + goto bad_area; + index = pgd_index(addr); /* diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a04ffbb..3cbdd5c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -632,10 +632,10 @@ void __init mem_init(void) void free_initmem(void) { #ifdef CONFIG_HAVE_TCM - extern char *__tcm_start, *__tcm_end; + extern char __tcm_start, __tcm_end; - totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)), - __phys_to_pfn(__pa(__tcm_end)), + totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), + __phys_to_pfn(__pa(&__tcm_end)), "TCM link"); #endif diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 66dc2d0..d66cead 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S @@ -277,7 +277,7 @@ ENTRY(vfp_put_double) #ifdef CONFIG_VFPv3 @ d16 - d31 registers .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 -1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr +1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr mov pc, lr .org 1b + 8 .endr diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h index 8542bc3..93f6c63 100644 --- a/arch/blackfin/include/asm/cache.h +++ b/arch/blackfin/include/asm/cache.h @@ -15,6 +15,8 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define SMP_CACHE_BYTES L1_CACHE_BYTES +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + #ifdef CONFIG_SMP #define __cacheline_aligned #else diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h index 2797163..7dc0f0f 100644 --- a/arch/frv/include/asm/cache.h +++ b/arch/frv/include/asm/cache.h @@ -17,6 +17,8 @@ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h index fed3fd3..ecafbe1 100644 --- a/arch/m68k/include/asm/cache.h +++ b/arch/m68k/include/asm/cache.h @@ -8,4 +8,6 @@ #define L1_CACHE_SHIFT 4 #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + #endif diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h index e03cfa2..6e2fe28 100644 --- a/arch/mn10300/include/asm/cache.h +++ b/arch/mn10300/include/asm/cache.h @@ -21,6 +21,8 @@ #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES #endif +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + /* data cache purge registers * - read from the register to unconditionally purge that cache line * - write address & 0xffffff00 to conditionally purge that cache line diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c index 3ca1c61..27a7492 100644 --- a/arch/parisc/math-emu/decode_exc.c +++ b/arch/parisc/math-emu/decode_exc.c @@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[]) return SIGNALCODE(SIGFPE, FPE_FLTINV); case DIVISIONBYZEROEXCEPTION: update_trap_counts(Fpu_register, aflags, bflags, trap_counts); + Clear_excp_register(exception_index); return SIGNALCODE(SIGFPE, FPE_FLTDIV); case INEXACTEXCEPTION: update_trap_counts(Fpu_register, aflags, bflags, trap_counts); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index e6dc595..0ca0b99 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -766,6 +766,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; + vcpu_load(vcpu); + sregs->pvr = vcpu->arch.pvr; sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; @@ -784,6 +786,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; } } + + vcpu_put(vcpu); + return 0; } @@ -793,6 +798,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; + vcpu_load(vcpu); + kvmppc_set_pvr(vcpu, sregs->pvr); vcpu3s->sdr1 = sregs->u.s.sdr1; @@ -819,6 +826,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, /* Flush the MMU after messing with the segments */ kvmppc_mmu_pte_flush(vcpu, 0, 0); + + vcpu_put(vcpu); + return 0; } diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 06f5a9e..0a5263e 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -443,6 +443,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + regs->pc = vcpu->arch.pc; regs->cr = vcpu->arch.cr; regs->ctr = vcpu->arch.ctr; @@ -463,6 +465,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = vcpu->arch.gpr[i]; + vcpu_put(vcpu); + return 0; } @@ -470,6 +474,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + vcpu->arch.pc = regs->pc; vcpu->arch.cr = regs->cr; vcpu->arch.ctr = regs->ctr; @@ -489,6 +495,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) vcpu->arch.gpr[i] = regs->gpr[i]; + vcpu_put(vcpu); + return 0; } @@ -517,7 +525,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { - return kvmppc_core_vcpu_translate(vcpu, tr); + int r; + + vcpu_load(vcpu); + r = kvmppc_core_vcpu_translate(vcpu, tr); + vcpu_put(vcpu); + return r; } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index f06cf93..82098ae 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -181,7 +181,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; vcpu = kvmppc_core_vcpu_create(kvm, id); - kvmppc_create_vcpu_debugfs(vcpu, id); + if (!IS_ERR(vcpu)) + kvmppc_create_vcpu_debugfs(vcpu, id); return vcpu; } diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S index 64e2e49..3ac0cd3 100644 --- a/arch/powerpc/lib/string.S +++ b/arch/powerpc/lib/string.S @@ -71,7 +71,7 @@ _GLOBAL(strcmp) _GLOBAL(strncmp) PPC_LCMPI r5,0 - beqlr + ble- 2f mtctr r5 addi r5,r3,-1 addi r4,r4,-1 @@ -82,6 +82,8 @@ _GLOBAL(strncmp) beqlr 1 bdnzt eq,1b blr +2: li r3,0 + blr _GLOBAL(strlen) addi r4,r3,-1 diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 2c9e522..7fd90d0 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n) index = ENTRIES-1; /* make sure index is valid */ - if ((index > ENTRIES) || (index < 0)) + if ((index >= ENTRIES) || (index < 0)) index = ENTRIES-1; return initial_lfsr[index]; diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index b842378..da684a7 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void) for(;;); } -static int qcss_tok; /* query-cpu-stopped-state token */ - -/* Get state of physical CPU. - * Return codes: - * 0 - The processor is in the RTAS stopped state - * 1 - stop-self is in progress - * 2 - The processor is not in the RTAS stopped state - * -1 - Hardware Error - * -2 - Hardware Busy, Try again later. - */ -static int query_cpu_stopped(unsigned int pcpu) -{ - int cpu_status, status; - - status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); - if (status != 0) { - printk(KERN_ERR - "RTAS query-cpu-stopped-state failed: %i\n", status); - return status; - } - - return cpu_status; -} - static int pseries_cpu_disable(void) { int cpu = smp_processor_id(); @@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu) } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { for (tries = 0; tries < 25; tries++) { - cpu_status = query_cpu_stopped(pcpu); - if (cpu_status == 0 || cpu_status == -1) + cpu_status = smp_query_cpu_stopped(pcpu); + if (cpu_status == QCSS_STOPPED || + cpu_status == QCSS_HARDWARE_ERROR) break; cpu_relax(); } @@ -400,6 +377,7 @@ static int __init pseries_cpu_hotplug_init(void) struct device_node *np; const char *typep; int cpu; + int qcss_tok; for_each_node_by_name(np, "interrupt-controller") { typep = of_get_property(np, "compatible", NULL); diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index 0603c91..e724ef8 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h @@ -4,6 +4,14 @@ #include #include +/* Get state of physical CPU from query_cpu_stopped */ +int smp_query_cpu_stopped(unsigned int pcpu); +#define QCSS_STOPPED 0 +#define QCSS_STOPPING 1 +#define QCSS_NOT_STOPPED 2 +#define QCSS_HARDWARE_ERROR -1 +#define QCSS_HARDWARE_BUSY -2 + static inline long poll_pending(void) { return plpar_hcall_norets(H_POLL_PENDING); diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index b488663..5530b4b 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -57,6 +57,28 @@ */ static cpumask_t of_spin_map; +/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */ +int smp_query_cpu_stopped(unsigned int pcpu) +{ + int cpu_status, status; + int qcss_tok = rtas_token("query-cpu-stopped-state"); + + if (qcss_tok == RTAS_UNKNOWN_SERVICE) { + printk(KERN_INFO "Firmware doesn't support " + "query-cpu-stopped-state\n"); + return QCSS_HARDWARE_ERROR; + } + + status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); + if (status != 0) { + printk(KERN_ERR + "RTAS query-cpu-stopped-state failed: %i\n", status); + return status; + } + + return cpu_status; +} + /** * smp_startup_cpu() - start the given cpu * @@ -82,6 +104,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) pcpu = get_hard_smp_processor_id(lcpu); + /* Check to see if the CPU out of FW already for kexec */ + if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){ + cpu_set(lcpu, of_spin_map); + return 1; + } + /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca[lcpu].__current)->preempt_count = 0; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f8bcaef..e06d18c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -339,11 +339,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, rc = kvm_vcpu_init(vcpu, kvm, id); if (rc) - goto out_free_cpu; + goto out_free_sie_block; VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, vcpu->arch.sie_block); return vcpu; +out_free_sie_block: + free_page((unsigned long)(vcpu->arch.sie_block)); out_free_cpu: kfree(vcpu); out_nomem: diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4f865e8..e62b4b9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -193,6 +193,7 @@ union kvm_mmu_page_role { unsigned invalid:1; unsigned cr4_pge:1; unsigned nxe:1; + unsigned cr0_wp:1; }; }; @@ -533,6 +534,8 @@ struct kvm_x86_ops { u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); bool (*gb_page_enable)(void); + void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); + const struct trace_print_flags *exit_reasons_str; }; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4604e6a..d86da72 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -199,8 +199,9 @@ #define MSR_IA32_EBL_CR_POWERON 0x0000002a #define MSR_IA32_FEATURE_CONTROL 0x0000003a -#define FEATURE_CONTROL_LOCKED (1<<0) -#define FEATURE_CONTROL_VMXON_ENABLED (1<<2) +#define FEATURE_CONTROL_LOCKED (1<<0) +#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) +#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) #define MSR_IA32_APICBASE 0x0000001b #define MSR_IA32_APICBASE_BSP (1<<8) diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 2e77516..ca15b93 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1419,6 +1419,7 @@ static int __attach_device(struct device *dev, struct protection_domain *domain) { struct iommu_dev_data *dev_data, *alias_data; + int ret; dev_data = get_dev_data(dev); alias_data = get_dev_data(dev_data->alias); @@ -1430,13 +1431,14 @@ static int __attach_device(struct device *dev, spin_lock(&domain->lock); /* Some sanity checks */ + ret = -EBUSY; if (alias_data->domain != NULL && alias_data->domain != domain) - return -EBUSY; + goto out_unlock; if (dev_data->domain != NULL && dev_data->domain != domain) - return -EBUSY; + goto out_unlock; /* Do real assignment */ if (dev_data->alias != dev) { @@ -1452,10 +1454,14 @@ static int __attach_device(struct device *dev, atomic_inc(&dev_data->bind); + ret = 0; + +out_unlock: + /* ready */ spin_unlock(&domain->lock); - return 0; + return ret; } /* @@ -2256,10 +2262,6 @@ int __init amd_iommu_init_dma_ops(void) iommu_detected = 1; swiotlb = 0; -#ifdef CONFIG_GART_IOMMU - gart_iommu_aperture_disabled = 1; - gart_iommu_aperture = 0; -#endif /* Make the driver finally visible to the drivers */ dma_ops = &amd_iommu_dma_ops; diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 883d619..86a5a11 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -286,8 +286,12 @@ static u8 * __init iommu_map_mmio_space(u64 address) { u8 *ret; - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) + if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { + pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", + address); + pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); return NULL; + } ret = ioremap_nocache(address, MMIO_REGION_LENGTH); if (ret != NULL) @@ -1296,7 +1300,7 @@ static int __init amd_iommu_init(void) ret = amd_iommu_init_dma_ops(); if (ret) - goto free; + goto free_disable; amd_iommu_init_api(); @@ -1314,9 +1318,10 @@ static int __init amd_iommu_init(void) out: return ret; -free: +free_disable: disable_iommus(); +free: amd_iommu_uninit_devices(); free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, @@ -1335,6 +1340,15 @@ free: free_unity_maps(); +#ifdef CONFIG_GART_IOMMU + /* + * We failed to initialize the AMD IOMMU - try fallback to GART + * if possible. + */ + gart_iommu_init(); + +#endif + goto out; } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index c7ca8e2..16307ff 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1060,8 +1060,11 @@ static int __hw_perf_event_init(struct perf_event *event) if (atomic_read(&active_events) == 0) { if (!reserve_pmc_hardware()) err = -EBUSY; - else + else { err = reserve_bts_hardware(); + if (err) + release_pmc_hardware(); + } } if (!err) atomic_inc(&active_events); diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 03801f2..dfdfe46 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) return pv_tsc_khz; } +static atomic64_t last_value = ATOMIC64_INIT(0); + cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) { struct pvclock_shadow_time shadow; unsigned version; cycle_t ret, offset; + u64 last; do { version = pvclock_get_time_values(&shadow, src); @@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) barrier(); } while (version != src->version); + /* + * Assumption here is that last_value, a global accumulator, always goes + * forward. If we are less than that, we should not be much smaller. + * We assume there is an error marging we're inside, and then the correction + * does not sacrifice accuracy. + * + * For reads: global may have changed between test and return, + * but this means someone else updated poked the clock at a later time. + * We just need to make sure we are not seeing a backwards event. + * + * For updates: last_value = ret is not enough, since two vcpus could be + * updating at the same time, and one of them could be slightly behind, + * making the assumption that last_value always go forward fail to hold. + */ + last = atomic64_read(&last_value); + do { + if (ret < last) + return last; + last = atomic64_cmpxchg(&last_value, last, ret); + } while (unlikely(last != ret)); + return ret; } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5d9e40c..4772a9f 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -663,6 +663,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), }, }, + /* + * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so + * match on the product name. + */ + { + .callback = dmi_low_memory_corruption, + .ident = "Phoenix BIOS", + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), + }, + }, #endif {} }; diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 86c9f91..46b8277 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -46,6 +46,7 @@ /* Global pointer to shared data; NULL means no measured launch. */ struct tboot *tboot __read_mostly; +EXPORT_SYMBOL(tboot); /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */ #define AP_WAIT_TIMEOUT 1 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 28c3d81..8822021 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -227,7 +227,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, } EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); -static int is_write_protection(struct kvm_vcpu *vcpu) +static bool is_write_protection(struct kvm_vcpu *vcpu) { return vcpu->arch.cr0 & X86_CR0_WP; } @@ -2097,11 +2097,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) direct = 1; if (mmu_check_root(vcpu, root_gfn)) return 1; + + spin_lock(&vcpu->kvm->mmu_lock); sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, direct, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.root_hpa = root; return 0; } @@ -2123,11 +2126,15 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) root_gfn = 0; if (mmu_check_root(vcpu, root_gfn)) return 1; + + spin_lock(&vcpu->kvm->mmu_lock); sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, direct, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; + spin_unlock(&vcpu->kvm->mmu_lock); + vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; } vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); @@ -2448,6 +2455,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu) r = paging32_init_context(vcpu); vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level; + vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); return r; } @@ -2487,7 +2495,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) goto out; spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); + spin_unlock(&vcpu->kvm->mmu_lock); r = mmu_alloc_roots(vcpu); + spin_lock(&vcpu->kvm->mmu_lock); mmu_sync_roots(vcpu); spin_unlock(&vcpu->kvm->mmu_lock); if (r) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d42e191..a2f839c 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -128,6 +128,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu); static void svm_complete_interrupts(struct vcpu_svm *svm); static int nested_svm_exit_handled(struct vcpu_svm *svm); +static int nested_svm_intercept(struct vcpu_svm *svm); static int nested_svm_vmexit(struct vcpu_svm *svm); static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); @@ -1359,6 +1360,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm) static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code) { + int vmexit; + if (!is_nested(svm)) return 0; @@ -1367,19 +1370,24 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, svm->vmcb->control.exit_info_1 = error_code; svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; - return nested_svm_exit_handled(svm); + vmexit = nested_svm_intercept(svm); + if (vmexit == NESTED_EXIT_DONE) + svm->nested.exit_required = true; + + return vmexit; } -static inline int nested_svm_intr(struct vcpu_svm *svm) +/* This function returns true if it is save to enable the irq window */ +static inline bool nested_svm_intr(struct vcpu_svm *svm) { if (!is_nested(svm)) - return 0; + return true; if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) - return 0; + return true; if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) - return 0; + return false; svm->vmcb->control.exit_code = SVM_EXIT_INTR; @@ -1392,13 +1400,13 @@ static inline int nested_svm_intr(struct vcpu_svm *svm) */ svm->nested.exit_required = true; trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); - return 1; + return false; } - return 0; + return true; } -static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx) +static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) { struct page *page; @@ -1406,7 +1414,9 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx) if (is_error_page(page)) goto error; - return kmap_atomic(page, idx); + *_page = page; + + return kmap(page); error: kvm_release_page_clean(page); @@ -1415,16 +1425,9 @@ error: return NULL; } -static void nested_svm_unmap(void *addr, enum km_type idx) +static void nested_svm_unmap(struct page *page) { - struct page *page; - - if (!addr) - return; - - page = kmap_atomic_to_page(addr); - - kunmap_atomic(addr, idx); + kunmap(page); kvm_release_page_dirty(page); } @@ -1434,16 +1437,11 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm) u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; bool ret = false; u32 t0, t1; - u8 *msrpm; + u8 val; if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) return false; - msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0); - - if (!msrpm) - goto out; - switch (msr) { case 0 ... 0x1fff: t0 = (msr * 2) % 8; @@ -1464,11 +1462,10 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm) goto out; } - ret = msrpm[t1] & ((1 << param) << t0); + if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1)) + ret = val & ((1 << param) << t0); out: - nested_svm_unmap(msrpm, KM_USER0); - return ret; } @@ -1500,7 +1497,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) /* * If this function returns true, this #vmexit was already handled */ -static int nested_svm_exit_handled(struct vcpu_svm *svm) +static int nested_svm_intercept(struct vcpu_svm *svm) { u32 exit_code = svm->vmcb->control.exit_code; int vmexit = NESTED_EXIT_HOST; @@ -1546,9 +1543,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm) } } - if (vmexit == NESTED_EXIT_DONE) { + return vmexit; +} + +static int nested_svm_exit_handled(struct vcpu_svm *svm) +{ + int vmexit; + + vmexit = nested_svm_intercept(svm); + + if (vmexit == NESTED_EXIT_DONE) nested_svm_vmexit(svm); - } return vmexit; } @@ -1590,6 +1595,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; + struct page *page; trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, vmcb->control.exit_info_1, @@ -1597,7 +1603,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) vmcb->control.exit_int_info, vmcb->control.exit_int_info_err); - nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0); + nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); if (!nested_vmcb) return 1; @@ -1610,9 +1616,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) nested_vmcb->save.ds = vmcb->save.ds; nested_vmcb->save.gdtr = vmcb->save.gdtr; nested_vmcb->save.idtr = vmcb->save.idtr; + nested_vmcb->save.cr0 = svm->vcpu.arch.cr0; if (npt_enabled) nested_vmcb->save.cr3 = vmcb->save.cr3; + else + nested_vmcb->save.cr3 = svm->vcpu.arch.cr3; nested_vmcb->save.cr2 = vmcb->save.cr2; + nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; nested_vmcb->save.rflags = vmcb->save.rflags; nested_vmcb->save.rip = vmcb->save.rip; nested_vmcb->save.rsp = vmcb->save.rsp; @@ -1687,7 +1697,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) /* Exit nested SVM mode */ svm->nested.vmcb = 0; - nested_svm_unmap(nested_vmcb, KM_USER0); + nested_svm_unmap(page); kvm_mmu_reset_context(&svm->vcpu); kvm_mmu_load(&svm->vcpu); @@ -1698,9 +1708,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) { u32 *nested_msrpm; + struct page *page; int i; - nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0); + nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page); if (!nested_msrpm) return false; @@ -1709,7 +1720,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); - nested_svm_unmap(nested_msrpm, KM_USER0); + nested_svm_unmap(page); return true; } @@ -1719,8 +1730,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; + struct page *page; - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return false; @@ -1794,21 +1806,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) svm->vmcb->save.dr6 = nested_vmcb->save.dr6; svm->vmcb->save.cpl = nested_vmcb->save.cpl; - /* We don't want a nested guest to be more powerful than the guest, - so all intercepts are ORed */ - svm->vmcb->control.intercept_cr_read |= - nested_vmcb->control.intercept_cr_read; - svm->vmcb->control.intercept_cr_write |= - nested_vmcb->control.intercept_cr_write; - svm->vmcb->control.intercept_dr_read |= - nested_vmcb->control.intercept_dr_read; - svm->vmcb->control.intercept_dr_write |= - nested_vmcb->control.intercept_dr_write; - svm->vmcb->control.intercept_exceptions |= - nested_vmcb->control.intercept_exceptions; - - svm->vmcb->control.intercept |= nested_vmcb->control.intercept; - svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; /* cache intercepts */ @@ -1826,13 +1823,40 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) else svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; + if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { + /* We only want the cr8 intercept bits of the guest */ + svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK; + svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; + } + + /* We don't want to see VMMCALLs from a nested guest */ + svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL); + + /* + * We don't want a nested guest to be more powerful than the guest, so + * all intercepts are ORed + */ + svm->vmcb->control.intercept_cr_read |= + nested_vmcb->control.intercept_cr_read; + svm->vmcb->control.intercept_cr_write |= + nested_vmcb->control.intercept_cr_write; + svm->vmcb->control.intercept_dr_read |= + nested_vmcb->control.intercept_dr_read; + svm->vmcb->control.intercept_dr_write |= + nested_vmcb->control.intercept_dr_write; + svm->vmcb->control.intercept_exceptions |= + nested_vmcb->control.intercept_exceptions; + + svm->vmcb->control.intercept |= nested_vmcb->control.intercept; + + svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; svm->vmcb->control.int_state = nested_vmcb->control.int_state; svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; - nested_svm_unmap(nested_vmcb, KM_USER0); + nested_svm_unmap(page); enable_gif(svm); @@ -1858,6 +1882,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) static int vmload_interception(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; + struct page *page; if (nested_svm_check_permissions(svm)) return 1; @@ -1865,12 +1890,12 @@ static int vmload_interception(struct vcpu_svm *svm) svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return 1; nested_svm_vmloadsave(nested_vmcb, svm->vmcb); - nested_svm_unmap(nested_vmcb, KM_USER0); + nested_svm_unmap(page); return 1; } @@ -1878,6 +1903,7 @@ static int vmload_interception(struct vcpu_svm *svm) static int vmsave_interception(struct vcpu_svm *svm) { struct vmcb *nested_vmcb; + struct page *page; if (nested_svm_check_permissions(svm)) return 1; @@ -1885,12 +1911,12 @@ static int vmsave_interception(struct vcpu_svm *svm) svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); if (!nested_vmcb) return 1; nested_svm_vmloadsave(svm->vmcb, nested_vmcb); - nested_svm_unmap(nested_vmcb, KM_USER0); + nested_svm_unmap(page); return 1; } @@ -2487,6 +2513,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) + return; + if (irr == -1) return; @@ -2544,13 +2573,11 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - nested_svm_intr(svm); - /* In case GIF=0 we can't rely on the CPU to tell us when * GIF becomes 1, because that's a separate STGI/VMRUN intercept. * The next time we get that intercept, this function will be * called again though and we'll get the vintr intercept. */ - if (gif_set(svm)) { + if (gif_set(svm) && nested_svm_intr(svm)) { svm_set_vintr(svm); svm_inject_irq(svm, 0x0); } @@ -2590,6 +2617,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) + return; + if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) { int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; kvm_set_cr8(vcpu, cr8); @@ -2601,6 +2631,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); u64 cr8; + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) + return; + cr8 = kvm_get_cr8(vcpu); svm->vmcb->control.int_ctl &= ~V_TPR_MASK; svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; @@ -2857,6 +2890,20 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) return 0; } +static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) +{ + switch (func) { + case 0x8000000A: + entry->eax = 1; /* SVM revision 1 */ + entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper + ASID emulation to nested SVM */ + entry->ecx = 0; /* Reserved */ + entry->edx = 0; /* Do not support any additional features */ + + break; + } +} + static const struct trace_print_flags svm_exit_reasons_str[] = { { SVM_EXIT_READ_CR0, "read_cr0" }, { SVM_EXIT_READ_CR3, "read_cr3" }, @@ -2981,6 +3028,7 @@ static struct kvm_x86_ops svm_x86_ops = { .exit_reasons_str = svm_exit_reasons_str, .gb_page_enable = svm_gb_page_enable, + .set_supported_cpuid = svm_set_supported_cpuid, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3acbe19..2840568 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "kvm_cache_regs.h" #include "x86.h" @@ -1125,9 +1126,16 @@ static __init int vmx_disabled_by_bios(void) u64 msr; rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); - return (msr & (FEATURE_CONTROL_LOCKED | - FEATURE_CONTROL_VMXON_ENABLED)) - == FEATURE_CONTROL_LOCKED; + if (msr & FEATURE_CONTROL_LOCKED) { + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) + && tboot_enabled()) + return 1; + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) + && !tboot_enabled()) + return 1; + } + + return 0; /* locked but not enabled */ } @@ -1135,21 +1143,23 @@ static int hardware_enable(void *garbage) { int cpu = raw_smp_processor_id(); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); - u64 old; + u64 old, test_bits; if (read_cr4() & X86_CR4_VMXE) return -EBUSY; INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); rdmsrl(MSR_IA32_FEATURE_CONTROL, old); - if ((old & (FEATURE_CONTROL_LOCKED | - FEATURE_CONTROL_VMXON_ENABLED)) - != (FEATURE_CONTROL_LOCKED | - FEATURE_CONTROL_VMXON_ENABLED)) + + test_bits = FEATURE_CONTROL_LOCKED; + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; + if (tboot_enabled()) + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; + + if ((old & test_bits) != test_bits) { /* enable and lock */ - wrmsrl(MSR_IA32_FEATURE_CONTROL, old | - FEATURE_CONTROL_LOCKED | - FEATURE_CONTROL_VMXON_ENABLED); + wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); + } write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ asm volatile (ASM_VMX_VMXON_RAX : : "a"(&phys_addr), "m"(phys_addr) @@ -3993,6 +4003,10 @@ static bool vmx_gb_page_enable(void) return false; } +static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) +{ +} + static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, @@ -4057,6 +4071,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .exit_reasons_str = vmx_exit_reasons_str, .gb_page_enable = vmx_gb_page_enable, + .set_supported_cpuid = vmx_set_supported_cpuid, }; static int __init vmx_init(void) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dd78927..07750de 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { - kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); + kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); @@ -570,48 +570,42 @@ static u32 emulated_msrs[] = { MSR_IA32_MISC_ENABLE, }; -static void set_efer(struct kvm_vcpu *vcpu, u64 efer) +static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { - if (efer & efer_reserved_bits) { - kvm_inject_gp(vcpu, 0); - return; - } + if (efer & efer_reserved_bits) + return 1; if (is_paging(vcpu) - && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) { - kvm_inject_gp(vcpu, 0); - return; - } + && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) + return 1; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { - kvm_inject_gp(vcpu, 0); - return; - } + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) + return 1; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); - if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { - kvm_inject_gp(vcpu, 0); - return; - } + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) + return 1; } - kvm_x86_ops->set_efer(vcpu, efer); - efer &= ~EFER_LMA; efer |= vcpu->arch.shadow_efer & EFER_LMA; + kvm_x86_ops->set_efer(vcpu, efer); + vcpu->arch.shadow_efer = efer; vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; kvm_mmu_reset_context(vcpu); + + return 0; } void kvm_enable_efer_bits(u64 mask) @@ -641,14 +635,22 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { - static int version; + int version; + int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; - version++; + r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); + if (r) + return; + + if (version & 1) + ++version; /* first time write, random junk */ + + ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); @@ -938,8 +940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case MSR_EFER: - set_efer(vcpu, data); - break; + return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ if (data != 0) { @@ -1542,6 +1543,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, { int r; + vcpu_load(vcpu); r = -E2BIG; if (cpuid->nent < vcpu->arch.cpuid_nent) goto out; @@ -1553,6 +1555,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, out: cpuid->nent = vcpu->arch.cpuid_nent; + vcpu_put(vcpu); return r; } @@ -1688,6 +1691,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ecx &= kvm_supported_word6_x86_features; break; } + + kvm_x86_ops->set_supported_cpuid(function, entry); + put_cpu(); } @@ -1802,6 +1808,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, int r; unsigned bank_num = mcg_cap & 0xff, bank; + vcpu_load(vcpu); r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; @@ -1816,6 +1823,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: + vcpu_put(vcpu); return r; } @@ -2083,7 +2091,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; + vcpu_load(vcpu); r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); + vcpu_put(vcpu); break; } case KVM_GET_VCPU_EVENTS: { diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 2c505ee..f1fb411 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -95,7 +95,10 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) static void nmi_cpu_start(void *dummy) { struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); - model->start(msrs); + if (!msrs->controls) + WARN_ON_ONCE(1); + else + model->start(msrs); } static int nmi_start(void) @@ -107,7 +110,10 @@ static int nmi_start(void) static void nmi_cpu_stop(void *dummy) { struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); - model->stop(msrs); + if (!msrs->controls) + WARN_ON_ONCE(1); + else + model->stop(msrs); } static void nmi_stop(void) diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 987267f..a9c6611 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void *data) void xen_arch_resume(void) { - smp_call_function(xen_vcpu_notify_restore, - (void *)CLOCK_EVT_NOTIFY_RESUME, 1); + on_each_cpu(xen_vcpu_notify_restore, + (void *)CLOCK_EVT_NOTIFY_RESUME, 1); } diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h index f04c989..ed8cd3c 100644 --- a/arch/xtensa/include/asm/cache.h +++ b/arch/xtensa/include/asm/cache.h @@ -29,5 +29,6 @@ # define CACHE_WAY_SIZE ICACHE_WAY_SIZE #endif +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #endif /* _XTENSA_CACHE_H */ diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 023f4e6..d0f8146 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2481,15 +2481,10 @@ static void cfq_free_io_context(struct io_context *ioc) __call_for_each_cic(ioc, cic_free_func); } -static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) +static void cfq_put_cooperator(struct cfq_queue *cfqq) { struct cfq_queue *__cfqq, *next; - if (unlikely(cfqq == cfqd->active_queue)) { - __cfq_slice_expired(cfqd, cfqq, 0); - cfq_schedule_dispatch(cfqd); - } - /* * If this queue was scheduled to merge with another queue, be * sure to drop the reference taken on that queue (and others in @@ -2505,6 +2500,16 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_put_queue(__cfqq); __cfqq = next; } +} + +static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + if (unlikely(cfqq == cfqd->active_queue)) { + __cfq_slice_expired(cfqd, cfqq, 0); + cfq_schedule_dispatch(cfqd); + } + + cfq_put_cooperator(cfqq); cfq_put_queue(cfqq); } @@ -3459,6 +3464,9 @@ split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) } cic_set_cfqq(cic, NULL, 1); + + cfq_put_cooperator(cfqq); + cfq_put_queue(cfqq); return NULL; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index fc2f26b..c5fef01 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str) ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; if (!strcmp("video", str)) acpi_video_support |= - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; + ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO; } return 1; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2401c9c..8e9b132 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -159,6 +159,10 @@ int libata_allow_tpm = 0; module_param_named(allow_tpm, libata_allow_tpm, int, 0444); MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); +static int atapi_an; +module_param(atapi_an, int, 0444); +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); + MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Library module for ATA devices"); MODULE_LICENSE("GPL"); @@ -2570,7 +2574,8 @@ int ata_dev_configure(struct ata_device *dev) * to enable ATAPI AN to discern between PHY status * changed notifications and ATAPI ANs. */ - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && + if (atapi_an && + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && (!sata_pmp_attached(ap) || sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { unsigned int err_mask; diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 730ef3c..06e7204 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -893,7 +893,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); } - if (!do_write) + if (!do_write && !PageSlab(page)) flush_dcache_page(page); qc->curbytes += qc->sect_size; diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 0c82d33..952641e 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -1673,7 +1673,6 @@ static void nv_mcp55_freeze(struct ata_port *ap) mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask &= ~(NV_INT_ALL_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); - ata_sff_freeze(ap); } static void nv_mcp55_thaw(struct ata_port *ap) @@ -1687,7 +1686,6 @@ static void nv_mcp55_thaw(struct ata_port *ap) mask = readl(mmio_base + NV_INT_ENABLE_MCP55); mask |= (NV_INT_MASK_MCP55 << shift); writel(mask, mmio_base + NV_INT_ENABLE_MCP55); - ata_sff_thaw(ap); } static void nv_adma_error_handler(struct ata_port *ap) @@ -2478,8 +2476,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ipriv->irq_handler, - IRQF_SHARED, ipriv->sht); + return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); } #ifdef CONFIG_PM diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 02efd9a..e35596b 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -558,6 +558,19 @@ static void svia_configure(struct pci_dev *pdev) tmp8 |= NATIVE_MODE_ALL; pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); } + + /* + * vt6421 has problems talking to some drives. The following + * is the magic fix from Joseph Chan . + * Please add proper documentation if possible. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=15173 + */ + if (pdev->device == 0x3249) { + pci_read_config_byte(pdev, 0x52, &tmp8); + tmp8 |= 1 << 2; + pci_write_config_byte(pdev, 0x52, tmp8); + } } static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 958bd15..7b71a15 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -183,7 +183,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf) /* display offline cpus < nr_cpu_ids */ if (!alloc_cpumask_var(&offline, GFP_KERNEL)) return -ENOMEM; - cpumask_complement(offline, cpu_online_mask); + cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask); n = cpulist_scnprintf(buf, len, offline); free_cpumask_var(offline); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 176f175..aa79cfd 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -311,9 +311,14 @@ static void deliver_recv_msg(struct smi_info *smi_info, { /* Deliver the message to the upper layer with the lock released. */ - spin_unlock(&(smi_info->si_lock)); - ipmi_smi_msg_received(smi_info->intf, msg); - spin_lock(&(smi_info->si_lock)); + + if (smi_info->run_to_completion) { + ipmi_smi_msg_received(smi_info->intf, msg); + } else { + spin_unlock(&(smi_info->si_lock)); + ipmi_smi_msg_received(smi_info->intf, msg); + spin_lock(&(smi_info->si_lock)); + } } static void return_hosed_msg(struct smi_info *smi_info, int cCode) diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 6fe4f77..234d9f6 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -413,18 +413,10 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) static int sh_cmt_clocksource_enable(struct clocksource *cs) { struct sh_cmt_priv *p = cs_to_sh_cmt(cs); - int ret; p->total_cycles = 0; - ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); - if (ret) - return ret; - - /* TODO: calculate good shift from rate and counter bit width */ - cs->shift = 0; - cs->mult = clocksource_hz2mult(p->rate, cs->shift); - return 0; + return sh_cmt_start(p, FLAG_CLOCKSOURCE); } static void sh_cmt_clocksource_disable(struct clocksource *cs) @@ -444,7 +436,18 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, cs->disable = sh_cmt_clocksource_disable; cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; + + /* clk_get_rate() needs an enabled clock */ + clk_enable(p->clk); + p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8; + clk_disable(p->clk); + + /* TODO: calculate good shift from rate and counter bit width */ + cs->shift = 10; + cs->mult = clocksource_hz2mult(p->rate, cs->shift); + pr_info("sh_cmt: %s used as clock source\n", cs->name); + clocksource_register(cs); return 0; } diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 961f5b5..c073246 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -199,16 +199,8 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) static int sh_tmu_clocksource_enable(struct clocksource *cs) { struct sh_tmu_priv *p = cs_to_sh_tmu(cs); - int ret; - - ret = sh_tmu_enable(p); - if (ret) - return ret; - /* TODO: calculate good shift from rate and counter bit width */ - cs->shift = 10; - cs->mult = clocksource_hz2mult(p->rate, cs->shift); - return 0; + return sh_tmu_enable(p); } static void sh_tmu_clocksource_disable(struct clocksource *cs) @@ -228,6 +220,16 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, cs->disable = sh_tmu_clocksource_disable; cs->mask = CLOCKSOURCE_MASK(32); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; + + /* clk_get_rate() needs an enabled clock */ + clk_enable(p->clk); + /* channel will be configured at parent clock / 4 */ + p->rate = clk_get_rate(p->clk) / 4; + clk_disable(p->clk); + /* TODO: calculate good shift from rate and counter bit width */ + cs->shift = 10; + cs->mult = clocksource_hz2mult(p->rate, cs->shift); + pr_info("sh_tmu: %s used as clock source\n", cs->name); clocksource_register(cs); return 0; diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 5045156..991447b 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -231,7 +231,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) static void fw_card_bm_work(struct work_struct *work) { struct fw_card *card = container_of(work, struct fw_card, work.work); - struct fw_device *root_device; + struct fw_device *root_device, *irm_device; struct fw_node *root_node; unsigned long flags; int root_id, new_root_id, irm_id, local_id; @@ -239,6 +239,7 @@ static void fw_card_bm_work(struct work_struct *work) bool do_reset = false; bool root_device_is_running; bool root_device_is_cmc; + bool irm_is_1394_1995_only; spin_lock_irqsave(&card->lock, flags); @@ -248,12 +249,18 @@ static void fw_card_bm_work(struct work_struct *work) } generation = card->generation; + root_node = card->root_node; fw_node_get(root_node); root_device = root_node->data; root_device_is_running = root_device && atomic_read(&root_device->state) == FW_DEVICE_RUNNING; root_device_is_cmc = root_device && root_device->cmc; + + irm_device = card->irm_node->data; + irm_is_1394_1995_only = irm_device && irm_device->config_rom && + (irm_device->config_rom[2] & 0x000000f0) == 0; + root_id = root_node->node_id; irm_id = card->irm_node->node_id; local_id = card->local_node->node_id; @@ -276,8 +283,15 @@ static void fw_card_bm_work(struct work_struct *work) if (!card->irm_node->link_on) { new_root_id = local_id; - fw_notify("IRM has link off, making local node (%02x) root.\n", - new_root_id); + fw_notify("%s, making local node (%02x) root.\n", + "IRM has link off", new_root_id); + goto pick_me; + } + + if (irm_is_1394_1995_only) { + new_root_id = local_id; + fw_notify("%s, making local node (%02x) root.\n", + "IRM is not 1394a compliant", new_root_id); goto pick_me; } @@ -316,8 +330,8 @@ static void fw_card_bm_work(struct work_struct *work) * root, and thus, IRM. */ new_root_id = local_id; - fw_notify("BM lock failed, making local node (%02x) root.\n", - new_root_id); + fw_notify("%s, making local node (%02x) root.\n", + "BM lock failed", new_root_id); goto pick_me; } } else if (card->bm_generation != generation) { diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index bfd0e4a..48b07ef 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -334,7 +334,7 @@ static struct drm_display_mode drm_dmt_modes[] = { DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@85Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, - 1072, 1376, 0, 768, 769, 772, 808, 0, + 1168, 1376, 0, 768, 769, 772, 808, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4746bfe..930664c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -68,7 +68,8 @@ const static struct intel_device_info intel_845g_info = { }; const static struct intel_device_info intel_i85x_info = { - .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, + .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, + .cursor_needs_physical = 1, }; const static struct intel_device_info intel_i865g_info = { @@ -140,7 +141,7 @@ const static struct pci_device_id pciidlist[] = { INTEL_VGA_DEVICE(0x3577, &intel_i830_info), INTEL_VGA_DEVICE(0x2562, &intel_845g_info), INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), - INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), + INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0b33757..6679741 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -175,6 +175,7 @@ struct intel_overlay; struct intel_device_info { u8 is_mobile : 1; u8 is_i8xx : 1; + u8 is_i85x : 1; u8 is_i915g : 1; u8 is_i9xx : 1; u8 is_i945gm : 1; @@ -1027,7 +1028,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) -#define IS_I85X(dev) ((dev)->pci_device == 0x3582) +#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) #define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c00c978..4a44de4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2641,6 +2641,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) return -EINVAL; } + /* If the object is bigger than the entire aperture, reject it early + * before evicting everything in a vain attempt to find space. + */ + if (obj->size > dev->gtt_total) { + DRM_ERROR("Attempting to bind an object larger than the aperture\n"); + return -E2BIG; + } + search_free: free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, obj->size, alignment, 0); @@ -4175,6 +4183,17 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) int ret; i915_verify_inactive(dev, __FILE__, __LINE__); + + if (obj_priv->gtt_space != NULL) { + if (alignment == 0) + alignment = i915_gem_get_gtt_alignment(obj); + if (obj_priv->gtt_offset & (alignment - 1)) { + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + } + } + if (obj_priv->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment); if (ret) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 4d88315..ff02664 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -514,6 +514,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) } /* look up gpio for ddc, hpd */ + ddc_bus.valid = false; + hpd.hpd = RADEON_HPD_NONE; if ((le16_to_cpu(path->usDeviceTag) & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { for (j = 0; j < con_obj->ucNumberOfObjects; j++) { @@ -569,9 +571,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) break; } } - } else { - hpd.hpd = RADEON_HPD_NONE; - ddc_bus.valid = false; } conn_id = le16_to_cpu(path->usConnObjectId); @@ -1137,7 +1136,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct lvds->native_mode.vtotal = lvds->native_mode.vdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + - le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); lvds->panel_pwr_delay = diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index f129bbb..419630d 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -1646,6 +1646,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); + dev_priv->have_z_offset = 0; radeon_do_engine_reset(dev); radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index c57ad60..ebaee61 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -268,6 +268,8 @@ typedef struct drm_radeon_private { u32 scratch_ages[5]; + int have_z_offset; + /* starting from here on, data is preserved accross an open */ uint32_t flags; /* see radeon_chip_flags */ resource_size_t fb_aper_offset; diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 067167c..16436bb 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -101,6 +101,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * DRM_ERROR("Invalid depth buffer offset\n"); return -EINVAL; } + dev_priv->have_z_offset = 1; break; case RADEON_EMIT_PP_CNTL: @@ -876,6 +877,12 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, if (tmp & RADEON_BACK) flags |= RADEON_FRONT; } + if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { + if (!dev_priv->have_z_offset) { + printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); + flags &= ~(RADEON_DEPTH | RADEON_STENCIL); + } + } if (flags & (RADEON_FRONT | RADEON_BACK)) { diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 8455f3d..0a404db 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1305,6 +1305,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c index 62416e6..3975e03 100644 --- a/drivers/hid/hid-gyration.c +++ b/drivers/hid/hid-gyration.c @@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id gyration_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, { } }; MODULE_DEVICE_TABLE(hid, gyration_devices); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 793691f..5ec8a79 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -256,6 +256,7 @@ #define USB_VENDOR_ID_GYRATION 0x0c16 #define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003 +#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008 #define USB_VENDOR_ID_HAPP 0x078b #define USB_DEVICE_ID_UGCI_DRIVING 0x0010 diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c index 65c232a..21d201b 100644 --- a/drivers/hwmon/ltc4245.c +++ b/drivers/hwmon/ltc4245.c @@ -45,9 +45,7 @@ enum ltc4245_cmd { LTC4245_VEEIN = 0x19, LTC4245_VEESENSE = 0x1a, LTC4245_VEEOUT = 0x1b, - LTC4245_GPIOADC1 = 0x1c, - LTC4245_GPIOADC2 = 0x1d, - LTC4245_GPIOADC3 = 0x1e, + LTC4245_GPIOADC = 0x1c, }; struct ltc4245_data { @@ -61,7 +59,7 @@ struct ltc4245_data { u8 cregs[0x08]; /* Voltage registers */ - u8 vregs[0x0f]; + u8 vregs[0x0d]; }; static struct ltc4245_data *ltc4245_update_device(struct device *dev) @@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev) data->cregs[i] = val; } - /* Read voltage registers -- 0x10 to 0x1f */ + /* Read voltage registers -- 0x10 to 0x1c */ for (i = 0; i < ARRAY_SIZE(data->vregs); i++) { val = i2c_smbus_read_byte_data(client, i+0x10); if (unlikely(val < 0)) @@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg) case LTC4245_VEEOUT: voltage = regval * -55; break; - case LTC4245_GPIOADC1: - case LTC4245_GPIOADC2: - case LTC4245_GPIOADC3: + case LTC4245_GPIOADC: voltage = regval * 10; break; default: @@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2); LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2); /* GPIO voltages */ -LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1); -LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2); -LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3); +LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC); /* Power Consumption (virtual) */ LTC4245_POWER(power1_input, LTC4245_12VSENSE); @@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = { &sensor_dev_attr_in8_min_alarm.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, - &sensor_dev_attr_in10_input.dev_attr.attr, - &sensor_dev_attr_in11_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power2_input.dev_attr.attr, diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index d8c0c8d..6cda023 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -1382,6 +1382,7 @@ static int psmouse_reconnect(struct serio *serio) struct psmouse *psmouse = serio_get_drvdata(serio); struct psmouse *parent = NULL; struct serio_driver *drv = serio->drv; + unsigned char type; int rc = -1; if (!drv || !psmouse) { @@ -1401,10 +1402,15 @@ static int psmouse_reconnect(struct serio *serio) if (psmouse->reconnect) { if (psmouse->reconnect(psmouse)) goto out; - } else if (psmouse_probe(psmouse) < 0 || - psmouse->type != psmouse_extensions(psmouse, - psmouse_max_proto, false)) { - goto out; + } else { + psmouse_reset(psmouse); + + if (psmouse_probe(psmouse) < 0) + goto out; + + type = psmouse_extensions(psmouse, psmouse_max_proto, false); + if (psmouse->type != type) + goto out; } /* ok, the device type (and capabilities) match the old one, diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 001317b..12f4acb 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -281,6 +281,7 @@ static int linear_stop (mddev_t *mddev) rcu_barrier(); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ kfree(conf); + mddev->private = NULL; return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 2ecd1d5..ec10d74 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -507,9 +507,36 @@ static inline int mddev_trylock(mddev_t * mddev) return mutex_trylock(&mddev->reconfig_mutex); } -static inline void mddev_unlock(mddev_t * mddev) -{ - mutex_unlock(&mddev->reconfig_mutex); +static struct attribute_group md_redundancy_group; + +static void mddev_unlock(mddev_t * mddev) +{ + if (mddev->to_remove) { + /* These cannot be removed under reconfig_mutex as + * an access to the files will try to take reconfig_mutex + * while holding the file unremovable, which leads to + * a deadlock. + * So hold open_mutex instead - we are allowed to take + * it while holding reconfig_mutex, and md_run can + * use it to wait for the remove to complete. + */ + struct attribute_group *to_remove = mddev->to_remove; + mddev->to_remove = NULL; + mutex_lock(&mddev->open_mutex); + mutex_unlock(&mddev->reconfig_mutex); + + if (to_remove != &md_redundancy_group) + sysfs_remove_group(&mddev->kobj, to_remove); + if (mddev->pers == NULL || + mddev->pers->sync_request == NULL) { + sysfs_remove_group(&mddev->kobj, &md_redundancy_group); + if (mddev->sysfs_action) + sysfs_put(mddev->sysfs_action); + mddev->sysfs_action = NULL; + } + mutex_unlock(&mddev->open_mutex); + } else + mutex_unlock(&mddev->reconfig_mutex); md_wakeup_thread(mddev->thread); } @@ -2979,6 +3006,23 @@ level_store(mddev_t *mddev, const char *buf, size_t len) /* Looks like we have a winner */ mddev_suspend(mddev); mddev->pers->stop(mddev); + + if (mddev->pers->sync_request == NULL && + pers->sync_request != NULL) { + /* need to add the md_redundancy_group */ + if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) + printk(KERN_WARNING + "md: cannot register extra attributes for %s\n", + mdname(mddev)); + mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); + } + if (mddev->pers->sync_request != NULL && + pers->sync_request == NULL) { + /* need to remove the md_redundancy_group */ + if (mddev->to_remove == NULL) + mddev->to_remove = &md_redundancy_group; + } + module_put(mddev->pers->owner); /* Invalidate devices that are now superfluous */ list_for_each_entry(rdev, &mddev->disks, same_set) @@ -4081,15 +4125,6 @@ static void mddev_delayed_delete(struct work_struct *ws) { mddev_t *mddev = container_of(ws, mddev_t, del_work); - if (mddev->private) { - sysfs_remove_group(&mddev->kobj, &md_redundancy_group); - if (mddev->private != (void*)1) - sysfs_remove_group(&mddev->kobj, mddev->private); - if (mddev->sysfs_action) - sysfs_put(mddev->sysfs_action); - mddev->sysfs_action = NULL; - mddev->private = NULL; - } sysfs_remove_group(&mddev->kobj, &md_bitmap_group); kobject_del(&mddev->kobj); kobject_put(&mddev->kobj); @@ -4247,6 +4282,13 @@ static int do_md_run(mddev_t * mddev) if (mddev->pers) return -EBUSY; + /* These two calls synchronise us with the + * sysfs_remove_group calls in mddev_unlock, + * so they must have completed. + */ + mutex_lock(&mddev->open_mutex); + mutex_unlock(&mddev->open_mutex); + /* * Analyze all RAID superblock(s) */ @@ -4535,8 +4577,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->queue->unplug_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; module_put(mddev->pers->owner); - if (mddev->pers->sync_request && mddev->private == NULL) - mddev->private = (void*)1; + if (mddev->pers->sync_request && mddev->to_remove == NULL) + mddev->to_remove = &md_redundancy_group; mddev->pers = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent(mddev->sysfs_state); @@ -5495,6 +5537,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, int err = 0; void __user *argp = (void __user *)arg; mddev_t *mddev = NULL; + int ro; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -5630,6 +5673,34 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, err = do_md_stop(mddev, 1, 1); goto done_unlock; + case BLKROSET: + if (get_user(ro, (int __user *)(arg))) { + err = -EFAULT; + goto done_unlock; + } + err = -EINVAL; + + /* if the bdev is going readonly the value of mddev->ro + * does not matter, no writes are coming + */ + if (ro) + goto done_unlock; + + /* are we are already prepared for writes? */ + if (mddev->ro != 1) + goto done_unlock; + + /* transitioning to readauto need only happen for + * arrays that call md_write_start + */ + if (mddev->pers) { + err = restart_array(mddev); + if (err == 0) { + mddev->ro = 2; + set_disk_ro(mddev->gendisk, 0); + } + } + goto done_unlock; } /* diff --git a/drivers/md/md.h b/drivers/md/md.h index 8e4c75c..722f5df 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -305,6 +305,7 @@ struct mddev_s atomic_t max_corr_read_errors; /* max read retries */ struct list_head all_mddevs; + struct attribute_group *to_remove; /* Generic barrier handling. * If there is a pending barrier request, all other * writes are blocked while the devices are flushed. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 859bd3f..db2de5a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -417,7 +417,7 @@ static void raid1_end_write_request(struct bio *bio, int error) */ static int read_balance(conf_t *conf, r1bio_t *r1_bio) { - const unsigned long this_sector = r1_bio->sector; + const sector_t this_sector = r1_bio->sector; int new_disk = conf->last_used, disk = new_disk; int wonly_disk = -1; const int sectors = r1_bio->sectors; @@ -433,7 +433,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) retry: if (conf->mddev->recovery_cp < MaxSector && (this_sector + sectors >= conf->next_resync)) { - /* Choose the first operation device, for consistancy */ + /* Choose the first operational device, for consistancy */ new_disk = 0; for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); @@ -911,9 +911,10 @@ static int make_request(struct request_queue *q, struct bio * bio) if (test_bit(Faulty, &rdev->flags)) { rdev_dec_pending(rdev, mddev); r1_bio->bios[i] = NULL; - } else + } else { r1_bio->bios[i] = bio; - targets++; + targets++; + } } else r1_bio->bios[i] = NULL; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 047c468..5fb1ad6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -493,7 +493,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, */ static int read_balance(conf_t *conf, r10bio_t *r10_bio) { - const unsigned long this_sector = r10_bio->sector; + const sector_t this_sector = r10_bio->sector; int disk, slot, nslot; const int sectors = r10_bio->sectors; sector_t new_distance, current_distance; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0468f5b..105a3dc 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5086,7 +5086,9 @@ static int run(mddev_t *mddev) } /* Ok, everything is just fine now */ - if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) + if (mddev->to_remove == &raid5_attrs_group) + mddev->to_remove = NULL; + else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) printk(KERN_WARNING "raid5: failed to create sysfs attributes for %s\n", mdname(mddev)); @@ -5133,7 +5135,8 @@ static int stop(mddev_t *mddev) mddev->queue->backing_dev_info.congested_fn = NULL; blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ free_conf(conf); - mddev->private = &raid5_attrs_group; + mddev->private = NULL; + mddev->to_remove = &raid5_attrs_group; return 0; } diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c index 5d0241b..06e0e86 100644 --- a/drivers/media/video/gspca/stv06xx/stv06xx.c +++ b/drivers/media/video/gspca/stv06xx/stv06xx.c @@ -496,8 +496,6 @@ static const __devinitdata struct usb_device_id device_table[] = { {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger (new) */ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, - /* QuickCam Messenger (new) */ - {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 }, {} }; MODULE_DEVICE_TABLE(usb, device_table); diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index dbf4de3..69698e5 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -165,8 +165,8 @@ static struct pci_device_id com20020pci_id_table[] = { { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, - { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, + { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT }, + { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT }, { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, {0,} diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 542a4f7..2ee19d1 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -84,6 +84,20 @@ static struct can_bittiming_const sja1000_bittiming_const = { .brp_inc = 1, }; +static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) +{ + unsigned long flags; + + /* + * The command register needs some locking and time to settle + * the write_reg() operation - especially on SMP systems. + */ + spin_lock_irqsave(&priv->cmdreg_lock, flags); + priv->write_reg(priv, REG_CMR, val); + priv->read_reg(priv, REG_SR); + spin_unlock_irqrestore(&priv->cmdreg_lock, flags); +} + static int sja1000_probe_chip(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); @@ -279,7 +293,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, can_put_echo_skb(skb, dev, 0); - priv->write_reg(priv, REG_CMR, CMD_TR); + sja1000_write_cmdreg(priv, CMD_TR); return NETDEV_TX_OK; } @@ -328,7 +342,7 @@ static void sja1000_rx(struct net_device *dev) cf->can_id = id; /* release receive buffer */ - priv->write_reg(priv, REG_CMR, CMD_RRB); + sja1000_write_cmdreg(priv, CMD_RRB); netif_rx(skb); @@ -356,7 +370,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; - priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */ + sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */ } if (isrc & IRQ_EI) { diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index 97a622b..de8e778 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h @@ -167,6 +167,7 @@ struct sja1000_priv { void __iomem *reg_base; /* ioremap'ed address to registers */ unsigned long irq_flags; /* for request_irq() */ + spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */ u16 flags; /* custom mode flags */ u8 ocr; /* output control register */ diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index 04b382f..83eef8e 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c @@ -174,9 +174,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, if (chunk->nsg <= 0) goto fail; + } + if (chunk->npages == MLX4_ICM_CHUNK_LEN) chunk = NULL; - } npages -= 1 << cur_order; } else { diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 449a982..8015310 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -977,7 +977,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh /* Calculate UDP checksum if configured to do so */ if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT) skb->ip_summed = CHECKSUM_NONE; - else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { + else if ((skb_dst(skb) && skb_dst(skb)->dev) && + (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) { skb->ip_summed = CHECKSUM_COMPLETE; csum = skb_checksum(skb, 0, udp_len, 0); uh->check = csum_tcpudp_magic(inet->inet_saddr, diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h index 701ddb7..5e7e1bd 100644 --- a/drivers/net/wireless/ath/ar9170/hw.h +++ b/drivers/net/wireless/ath/ar9170/hw.h @@ -424,5 +424,6 @@ enum ar9170_txq { #define AR9170_TXQ_DEPTH 32 #define AR9170_TX_MAX_PENDING 128 +#define AR9170_RX_STREAM_MAX_SIZE 65535 #endif /* __AR9170_HW_H */ diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index f9d6db8..9ea9845 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -2538,7 +2538,7 @@ void *ar9170_alloc(size_t priv_size) * tends to split the streams into seperate rx descriptors. */ - skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL); + skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL); if (!skb) goto err_nomem; diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 0387658..db541d4 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c @@ -66,18 +66,28 @@ static struct usb_device_id ar9170_usb_ids[] = { { USB_DEVICE(0x0cf3, 0x1001) }, /* TP-Link TL-WN821N v2 */ { USB_DEVICE(0x0cf3, 0x1002) }, + /* 3Com Dual Band 802.11n USB Adapter */ + { USB_DEVICE(0x0cf3, 0x1010) }, + /* H3C Dual Band 802.11n USB Adapter */ + { USB_DEVICE(0x0cf3, 0x1011) }, /* Cace Airpcap NX */ { USB_DEVICE(0xcace, 0x0300) }, /* D-Link DWA 160 A1 */ { USB_DEVICE(0x07d1, 0x3c10) }, /* D-Link DWA 160 A2 */ { USB_DEVICE(0x07d1, 0x3a09) }, + /* Netgear WNA1000 */ + { USB_DEVICE(0x0846, 0x9040) }, /* Netgear WNDA3100 */ { USB_DEVICE(0x0846, 0x9010) }, /* Netgear WN111 v2 */ { USB_DEVICE(0x0846, 0x9001) }, /* Zydas ZD1221 */ { USB_DEVICE(0x0ace, 0x1221) }, + /* Proxim ORiNOCO 802.11n USB */ + { USB_DEVICE(0x1435, 0x0804) }, + /* WNC Generic 11n USB Dongle */ + { USB_DEVICE(0x1435, 0x0326) }, /* ZyXEL NWD271N */ { USB_DEVICE(0x0586, 0x3417) }, /* Z-Com UB81 BG */ diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index ced648b..d377809 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1210,6 +1210,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) struct ath5k_hw *ah = sc->ah; struct sk_buff *skb = bf->skb; struct ath5k_desc *ds; + int ret; if (!skb) { skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); @@ -1236,9 +1237,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) ds = bf->desc; ds->ds_link = bf->daddr; /* link to self */ ds->ds_data = bf->skbaddr; - ah->ah_setup_rx_desc(ah, ds, - skb_tailroom(skb), /* buffer size */ - 0); + ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); + if (ret) + return ret; if (sc->rxlink != NULL) *sc->rxlink = bf->daddr; @@ -2996,13 +2997,15 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw, if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) { if (*new_flags & FIF_PROMISC_IN_BSS) { - rfilt |= AR5K_RX_FILTER_PROM; __set_bit(ATH_STAT_PROMISC, sc->status); } else { __clear_bit(ATH_STAT_PROMISC, sc->status); } } + if (test_bit(ATH_STAT_PROMISC, sc->status)) + rfilt |= AR5K_RX_FILTER_PROM; + /* Note, AR5K_RX_FILTER_MCAST is already enabled */ if (*new_flags & FIF_ALLMULTI) { mfilt[0] = ~0; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 3534d86..2423068 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -2083,10 +2083,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, } /* Else we have enough samples; calculate estimate of * actual average throughput */ - - /* Sanity-check TPT calculations */ - BUG_ON(window->average_tpt != ((window->success_ratio * - tbl->expected_tpt[index] + 64) / 128)); + if (window->average_tpt != ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128)) { + IWL_ERR(priv, "expected_tpt should have been calculated by now\n"); + window->average_tpt = ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128); + } /* If we are searching for better modulation mode, check success. */ if (lq_sta->search_better_tbl && diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 5622a55..664dcd5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -3365,6 +3365,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ spin_lock_init(&priv->reg_lock); spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + iwl_hw_detect(priv); IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", priv->cfg->name, priv->hw_rev); diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index adbb3ea..ca0fb8b 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -4022,6 +4022,13 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e spin_lock_init(&priv->reg_lock); spin_lock_init(&priv->lock); + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + /*********************** * 4. Read EEPROM * ********************/ diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index b3c4fbd..805284d 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -78,6 +78,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ + {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */ {} diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index 8a40a14..77b7657 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -189,6 +189,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio) info->flags |= IEEE80211_TX_STAT_ACK; info->status.rates[0].count = (flags & 0xFF) + 1; + info->status.rates[1].idx = -1; ieee80211_tx_status_irqsafe(dev, skb); if (ring->entries - skb_queue_len(&ring->queue) == 2) diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c index 9423f22..d74b89b 100644 --- a/drivers/net/wireless/wl12xx/wl1251_sdio.c +++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c @@ -160,6 +160,7 @@ disable: sdio_disable_func(func); release: sdio_release_host(func); + wl1251_free_hw(wl); return ret; } diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 166b67e..de82183 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -30,23 +30,7 @@ #define OP_BUFFER_FLAGS 0 -/* - * Read and write access is using spin locking. Thus, writing to the - * buffer by NMI handler (x86) could occur also during critical - * sections when reading the buffer. To avoid this, there are 2 - * buffers for independent read and write access. Read access is in - * process context only, write access only in the NMI handler. If the - * read buffer runs empty, both buffers are swapped atomically. There - * is potentially a small window during swapping where the buffers are - * disabled and samples could be lost. - * - * Using 2 buffers is a little bit overhead, but the solution is clear - * and does not require changes in the ring buffer implementation. It - * can be changed to a single buffer solution when the ring buffer - * access is implemented as non-locking atomic code. - */ -static struct ring_buffer *op_ring_buffer_read; -static struct ring_buffer *op_ring_buffer_write; +static struct ring_buffer *op_ring_buffer; DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); static void wq_sync_buffer(struct work_struct *work); @@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void) void free_cpu_buffers(void) { - if (op_ring_buffer_read) - ring_buffer_free(op_ring_buffer_read); - op_ring_buffer_read = NULL; - if (op_ring_buffer_write) - ring_buffer_free(op_ring_buffer_write); - op_ring_buffer_write = NULL; + if (op_ring_buffer) + ring_buffer_free(op_ring_buffer); + op_ring_buffer = NULL; } #define RB_EVENT_HDR_SIZE 4 @@ -86,11 +67,8 @@ int alloc_cpu_buffers(void) unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + RB_EVENT_HDR_SIZE); - op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); - if (!op_ring_buffer_read) - goto fail; - op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); - if (!op_ring_buffer_write) + op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); + if (!op_ring_buffer) goto fail; for_each_possible_cpu(i) { @@ -162,16 +140,11 @@ struct op_sample *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) { entry->event = ring_buffer_lock_reserve - (op_ring_buffer_write, sizeof(struct op_sample) + + (op_ring_buffer, sizeof(struct op_sample) + size * sizeof(entry->sample->data[0])); - if (entry->event) - entry->sample = ring_buffer_event_data(entry->event); - else - entry->sample = NULL; - - if (!entry->sample) + if (!entry->event) return NULL; - + entry->sample = ring_buffer_event_data(entry->event); entry->size = size; entry->data = entry->sample->data; @@ -180,25 +153,16 @@ struct op_sample int op_cpu_buffer_write_commit(struct op_entry *entry) { - return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); + return ring_buffer_unlock_commit(op_ring_buffer, entry->event); } struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) { struct ring_buffer_event *e; - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); - if (e) - goto event; - if (ring_buffer_swap_cpu(op_ring_buffer_read, - op_ring_buffer_write, - cpu)) + e = ring_buffer_consume(op_ring_buffer, cpu, NULL); + if (!e) return NULL; - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); - if (e) - goto event; - return NULL; -event: entry->event = e; entry->sample = ring_buffer_event_data(e); entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) @@ -209,8 +173,7 @@ event: unsigned long op_cpu_buffer_entries(int cpu) { - return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) - + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); + return ring_buffer_entries_cpu(op_ring_buffer, cpu); } static int diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 456c265..197e38f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1461,7 +1461,8 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) conf5 &= ~(1 << 24); /* Clear bit 24 */ switch (pdev->device) { - case PCI_DEVICE_ID_JMICRON_JMB360: + case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */ + case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */ /* The controller should be in single function ahci mode */ conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */ break; @@ -1497,12 +1498,14 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); @@ -2113,6 +2116,8 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi); /* Go through the list of Hypertransport capabilities and * return 1 if a HT MSI capability is found and enabled */ @@ -2204,15 +2209,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, ht_enable_msi_mapping); -/* The P5N32-SLI Premium motherboard from Asus has a problem with msi +/* The P5N32-SLI motherboards from Asus have a problem with msi * for the MCP55 NIC. It is not yet determined whether the msi problem * also affects other devices. As for now, turn off msi for this device. */ static void __devinit nvenet_msi_disable(struct pci_dev *dev) { - if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) { + if (dmi_name_in_vendors("P5N32-SLI PREMIUM") || + dmi_name_in_vendors("P5N32-E SLI")) { dev_info(&dev->dev, - "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n"); + "Disabling msi for MCP55 NIC on P5N32-SLI\n"); dev->no_msi = 1; } } diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 6a47bb7..9a844ca 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -578,6 +578,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) struct pci_dev *dev; struct pci_bus *bus; bool blocked = eeepc_wlan_rfkill_blocked(eeepc); + bool absent; + u32 l; if (eeepc->wlan_rfkill) rfkill_set_sw_state(eeepc->wlan_rfkill, blocked); @@ -591,6 +593,22 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) goto out_unlock; } + if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) { + pr_err("Unable to read PCI config space?\n"); + goto out_unlock; + } + absent = (l == 0xffffffff); + + if (blocked != absent) { + pr_warning("BIOS says wireless lan is %s, " + "but the pci device is %s\n", + blocked ? "blocked" : "unblocked", + absent ? "absent" : "present"); + pr_warning("skipped wireless hotplug as probably " + "inappropriate for this model\n"); + goto out_unlock; + } + if (!blocked) { dev = pci_get_slot(bus, 0); if (dev) { diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index e9aa814..aa13875 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) } } + cmos_rtc.dev = dev; + dev_set_drvdata(dev, &cmos_rtc); + cmos_rtc.rtc = rtc_device_register(driver_name, dev, &cmos_rtc_ops, THIS_MODULE); if (IS_ERR(cmos_rtc.rtc)) { @@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) goto cleanup0; } - cmos_rtc.dev = dev; - dev_set_drvdata(dev, &cmos_rtc); rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); spin_lock_irq(&rtc_lock); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index e0d7b99..43bfffe 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -456,8 +456,6 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(s3c_rtc_base + S3C2410_RTCCON)); - s3c_rtc_setfreq(&pdev->dev, 1); - device_init_wakeup(&pdev->dev, 1); /* register RTC and exit */ @@ -474,6 +472,9 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) rtc->max_user_freq = 128; platform_set_drvdata(pdev, rtc); + + s3c_rtc_setfreq(&pdev->dev, 1); + return 0; err_nortc: diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6fc63b9..40807e4 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2169,30 +2169,28 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) /* If possible, enable MSI-X. */ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && - !IS_QLA8432(ha) && !IS_QLA8001(ha)) - goto skip_msix; + !IS_QLA8432(ha) && !IS_QLA8001(ha)) + goto skip_msi; + + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && + (ha->pdev->subsystem_device == 0x7040 || + ha->pdev->subsystem_device == 0x7041 || + ha->pdev->subsystem_device == 0x1705)) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n", + ha->pdev->subsystem_vendor, + ha->pdev->subsystem_device)); + goto skip_msi; + } if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { DEBUG2(qla_printk(KERN_WARNING, ha, "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", ha->pdev->revision, ha->fw_attributes)); - goto skip_msix; } - if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && - (ha->pdev->subsystem_device == 0x7040 || - ha->pdev->subsystem_device == 0x7041 || - ha->pdev->subsystem_device == 0x1705)) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n", - ha->pdev->subsystem_vendor, - ha->pdev->subsystem_device)); - - goto skip_msi; - } - ret = qla24xx_enable_msix(ha, rsp); if (!ret) { DEBUG2(qla_printk(KERN_INFO, ha, diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c index dc4849a..9855608 100644 --- a/drivers/staging/comedi/drivers/ni_mio_cs.c +++ b/drivers/staging/comedi/drivers/ni_mio_cs.c @@ -123,7 +123,7 @@ static const struct ni_board_struct ni_boards[] = { .adbits = 12, .ai_fifo_depth = 1024, .alwaysdither = 0, - .gainlkup = ai_gain_16, + .gainlkup = ai_gain_4, .ai_speed = 5000, .n_aochan = 2, .aobits = 12, diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index 925a236..2ebc599 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c @@ -97,6 +97,7 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ {USB_DEVICE(0x7392, 0x7718)}, {USB_DEVICE(0x7392, 0x7717)}, + {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */ {USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */ {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c index ccb9d5b..55337c8 100644 --- a/drivers/staging/rtl8192su/r8192U_core.c +++ b/drivers/staging/rtl8192su/r8192U_core.c @@ -120,6 +120,7 @@ static struct usb_device_id rtl8192_usb_id_tbl[] = { {USB_DEVICE(0x050d, 0x805E)}, /* Sitecom */ {USB_DEVICE(0x0df6, 0x0031)}, + {USB_DEVICE(0x0df6, 0x004b)}, /* WL-349 */ /* EnGenius */ {USB_DEVICE(0x1740, 0x9201)}, /* Dlink */ diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 0db8d7b..433c403 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1089,11 +1089,13 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent) } //2008-07-21-01by MikeLiu //register wpadev +#if 0 if(wpa_set_wpadev(pDevice, 1)!=0) { printk("Fail to Register WPADEV?\n"); unregister_netdev(pDevice->dev); free_netdev(dev); } +#endif device_print_info(pDevice); pci_set_drvdata(pcid, pDevice); return 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index db6b071..d1c2108 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1201,7 +1201,7 @@ made_compressed_probe: if (rcv->urb == NULL) { dev_dbg(&intf->dev, "out of memory (read urbs usb_alloc_urb)\n"); - goto alloc_fail7; + goto alloc_fail6; } rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; @@ -1225,7 +1225,7 @@ made_compressed_probe: if (snd->urb == NULL) { dev_dbg(&intf->dev, "out of memory (write urbs usb_alloc_urb)"); - goto alloc_fail7; + goto alloc_fail8; } if (usb_endpoint_xfer_int(epwrite)) @@ -1264,6 +1264,7 @@ made_compressed_probe: i = device_create_file(&intf->dev, &dev_attr_iCountryCodeRelDate); if (i < 0) { + device_remove_file(&intf->dev, &dev_attr_wCountryCodes); kfree(acm->country_codes); goto skip_countries; } @@ -1300,6 +1301,7 @@ alloc_fail8: usb_free_urb(acm->wb[i].urb); alloc_fail7: acm_read_buffers_free(acm); +alloc_fail6: for (i = 0; i < num_rx_buf; i++) usb_free_urb(acm->ru[i].urb); usb_free_urb(acm->ctrlurb); diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index fa3d142..08a9a62 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep, case USB_ENDPOINT_XFER_ISOC: /* Calculate transactions needed for high bandwidth iso */ mult = (unsigned char)(1 + ((max >> 11) & 0x03)); - max = max & 0x8ff; /* bit 0~10 */ + max = max & 0x7ff; /* bit 0~10 */ /* 3 transactions at most */ if (mult > 3) goto en_done; diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 1937267..ec45293 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -294,6 +294,16 @@ static int ehci_bus_resume (struct usb_hcd *hcd) /* manually resume the ports we suspended during bus_suspend() */ i = HCS_N_PORTS (ehci->hcs_params); while (i--) { + /* clear phy low power mode before resume */ + if (ehci->has_hostpc) { + u32 __iomem *hostpc_reg = + (u32 __iomem *)((u8 *)ehci->regs + + HOSTPC0 + 4 * (i & 0xff)); + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp & ~HOSTPC_PHCD, + hostpc_reg); + mdelay(5); + } temp = ehci_readl(ehci, &ehci->regs->port_status [i]); temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); if (test_bit(i, &ehci->bus_suspended) && @@ -678,6 +688,13 @@ static int ehci_hub_control ( if (temp & PORT_SUSPEND) { if ((temp & PORT_PE) == 0) goto error; + /* clear phy low power mode before resume */ + if (hostpc_reg) { + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp1 & ~HOSTPC_PHCD, + hostpc_reg); + mdelay(5); + } /* resume signaling for 20 msec */ temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); ehci_writel(ehci, temp | PORT_RESUME, diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h index 72dae1c..3b6e864 100644 --- a/drivers/usb/host/fhci.h +++ b/drivers/usb/host/fhci.h @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p) static inline void *cq_get(struct kfifo *kfifo) { - void *p = NULL; + unsigned int sz; + void *p; + + sz = kfifo_out(kfifo, (void *)&p, sizeof(p)); + if (sz != sizeof(p)) + return NULL; - kfifo_out(kfifo, (void *)&p, sizeof(p)); return p; } diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index fa920c7..53d90ca 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c @@ -104,6 +104,33 @@ int xhci_halt(struct xhci_hcd *xhci) } /* + * Set the run bit and wait for the host to be running. + */ +int xhci_start(struct xhci_hcd *xhci) +{ + u32 temp; + int ret; + + temp = xhci_readl(xhci, &xhci->op_regs->command); + temp |= (CMD_RUN); + xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", + temp); + xhci_writel(xhci, temp, &xhci->op_regs->command); + + /* + * Wait for the HCHalted Status bit to be 0 to indicate the host is + * running. + */ + ret = handshake(xhci, &xhci->op_regs->status, + STS_HALT, 0, XHCI_MAX_HALT_USEC); + if (ret == -ETIMEDOUT) + xhci_err(xhci, "Host took too long to start, " + "waited %u microseconds.\n", + XHCI_MAX_HALT_USEC); + return ret; +} + +/* * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. * * This resets pipelines, timers, counters, state machines, etc. @@ -114,6 +141,7 @@ int xhci_reset(struct xhci_hcd *xhci) { u32 command; u32 state; + int ret; state = xhci_readl(xhci, &xhci->op_regs->status); if ((state & STS_HALT) == 0) { @@ -128,7 +156,17 @@ int xhci_reset(struct xhci_hcd *xhci) /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ xhci_to_hcd(xhci)->state = HC_STATE_HALT; - return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); + ret = handshake(xhci, &xhci->op_regs->command, + CMD_RESET, 0, 250 * 1000); + if (ret) + return ret; + + xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); + /* + * xHCI cannot write to any doorbells or operational registers other + * than status until the "Controller Not Ready" flag is cleared. + */ + return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); } @@ -451,13 +489,11 @@ int xhci_run(struct usb_hcd *hcd) if (NUM_TEST_NOOPS > 0) doorbell = xhci_setup_one_noop(xhci); - temp = xhci_readl(xhci, &xhci->op_regs->command); - temp |= (CMD_RUN); - xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", - temp); - xhci_writel(xhci, temp, &xhci->op_regs->command); - /* Flush PCI posted writes */ - temp = xhci_readl(xhci, &xhci->op_regs->command); + if (xhci_start(xhci)) { + xhci_halt(xhci); + return -ENODEV; + } + xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); if (doorbell) (*doorbell)(xhci); @@ -1452,6 +1488,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, kfree(virt_ep->stopped_td); xhci_ring_cmd_db(xhci); } + virt_ep->stopped_td = NULL; + virt_ep->stopped_trb = NULL; spin_unlock_irqrestore(&xhci->lock, flags); if (ret) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index e097008..4361b2d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd) struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval; - hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1; + hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2; xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index ee7bc7e..2b26b5e 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -241,10 +241,27 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, int i; union xhci_trb *enq = ring->enqueue; struct xhci_segment *enq_seg = ring->enq_seg; + struct xhci_segment *cur_seg; + unsigned int left_on_ring; /* Check if ring is empty */ - if (enq == ring->dequeue) + if (enq == ring->dequeue) { + /* Can't use link trbs */ + left_on_ring = TRBS_PER_SEGMENT - 1; + for (cur_seg = enq_seg->next; cur_seg != enq_seg; + cur_seg = cur_seg->next) + left_on_ring += TRBS_PER_SEGMENT - 1; + + /* Always need one TRB free in the ring. */ + left_on_ring -= 1; + if (num_trbs > left_on_ring) { + xhci_warn(xhci, "Not enough room on ring; " + "need %u TRBs, %u TRBs left\n", + num_trbs, left_on_ring); + return 0; + } return 1; + } /* Make sure there's an extra empty TRB available */ for (i = 0; i <= num_trbs; ++i) { if (enq == ring->dequeue) @@ -333,7 +350,8 @@ static struct xhci_segment *find_trb_seg( while (cur_seg->trbs > trb || &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; - if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK && + if ((generic_trb->field[3] & TRB_TYPE_BITMASK) == + TRB_TYPE(TRB_LINK) && (generic_trb->field[3] & LINK_TOGGLE)) *cycle_state = ~(*cycle_state) & 0x1; cur_seg = cur_seg->next; @@ -389,7 +407,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, BUG(); trb = &state->new_deq_ptr->generic; - if (TRB_TYPE(trb->field[3]) == TRB_LINK && + if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && (trb->field[3] & LINK_TOGGLE)) state->new_cycle_state = ~(state->new_cycle_state) & 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); @@ -577,6 +595,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, /* Otherwise just ring the doorbell to restart the ring */ ring_ep_doorbell(xhci, slot_id, ep_index); } + ep->stopped_td = NULL; + ep->stopped_trb = NULL; /* * Drop the lock and complete the URBs in the cancelled TD list. @@ -1049,8 +1069,13 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, ep->ep_state |= EP_HALTED; ep->stopped_td = td; ep->stopped_trb = event_trb; + xhci_queue_reset_ep(xhci, slot_id, ep_index); xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); + + ep->stopped_td = NULL; + ep->stopped_trb = NULL; + xhci_ring_cmd_db(xhci); } @@ -1370,8 +1395,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; cur_trb != event_trb; next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { - if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP && - TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK) + if ((cur_trb->generic.field[3] & + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && + (cur_trb->generic.field[3] & + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) td->urb->actual_length += TRB_LEN(cur_trb->generic.field[2]); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 7d920f2..e3e087e 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -61,6 +61,8 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ @@ -72,9 +74,12 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */ { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ + { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */ + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ @@ -82,12 +87,15 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ + { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ + { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ @@ -105,6 +113,7 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ @@ -115,6 +124,8 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ca9e3ba..8c19ad5 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -2312,6 +2312,8 @@ static void ftdi_set_termios(struct tty_struct *tty, "urb failed to set to rts/cts flow control\n"); } + /* raise DTR/RTS */ + set_mctrl(port, TIOCM_DTR | TIOCM_RTS); } else { /* * Xon/Xoff code @@ -2359,6 +2361,8 @@ static void ftdi_set_termios(struct tty_struct *tty, } } + /* lower DTR/RTS */ + clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } return; } diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c index 95d8d26..2e0497b 100644 --- a/drivers/usb/serial/ir-usb.c +++ b/drivers/usb/serial/ir-usb.c @@ -312,6 +312,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port) kfree(port->read_urb->transfer_buffer); port->read_urb->transfer_buffer = buffer; port->read_urb->transfer_buffer_length = buffer_size; + port->bulk_in_buffer = buffer; buffer = kmalloc(buffer_size, GFP_KERNEL); if (!buffer) { @@ -321,6 +322,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port) kfree(port->write_urb->transfer_buffer); port->write_urb->transfer_buffer = buffer; port->write_urb->transfer_buffer_length = buffer_size; + port->bulk_out_buffer = buffer; port->bulk_out_size = buffer_size; } diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 3a78738..6825082 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -310,6 +310,7 @@ err_cleanup: usb_free_urb(priv->write_urb_pool[j]); } } + kfree(priv); usb_set_serial_port_data(serial->port[i], NULL); } return -ENOMEM; diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 45ea694..9d99e68 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -345,7 +345,8 @@ static void kobil_close(struct usb_serial_port *port) /* FIXME: Add rts/dtr methods */ if (port->write_urb) { - usb_kill_urb(port->write_urb); + usb_poison_urb(port->write_urb); + kfree(port->write_urb->transfer_buffer); usb_free_urb(port->write_urb); port->write_urb = NULL; } diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 2cfe245..fb472dd 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -717,7 +717,6 @@ static void mos7840_bulk_in_callback(struct urb *urb) mos7840_port = urb->context; if (!mos7840_port) { dbg("%s", "NULL mos7840_port pointer"); - mos7840_port->read_urb_busy = false; return; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d93283d..64b50f6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -334,6 +334,24 @@ static int option_resume(struct usb_serial *serial); #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S 0x0000 +#define PIRELLI_VENDOR_ID 0x1266 +#define PIRELLI_PRODUCT_C100_1 0x1002 +#define PIRELLI_PRODUCT_C100_2 0x1003 +#define PIRELLI_PRODUCT_1004 0x1004 +#define PIRELLI_PRODUCT_1005 0x1005 +#define PIRELLI_PRODUCT_1006 0x1006 +#define PIRELLI_PRODUCT_1007 0x1007 +#define PIRELLI_PRODUCT_1008 0x1008 +#define PIRELLI_PRODUCT_1009 0x1009 +#define PIRELLI_PRODUCT_100A 0x100a +#define PIRELLI_PRODUCT_100B 0x100b +#define PIRELLI_PRODUCT_100C 0x100c +#define PIRELLI_PRODUCT_100D 0x100d +#define PIRELLI_PRODUCT_100E 0x100e +#define PIRELLI_PRODUCT_100F 0x100f +#define PIRELLI_PRODUCT_1011 0x1011 +#define PIRELLI_PRODUCT_1012 0x1012 + /* Airplus products */ #define AIRPLUS_VENDOR_ID 0x1011 #define AIRPLUS_PRODUCT_MCD650 0x3198 @@ -346,6 +364,12 @@ static int option_resume(struct usb_serial *serial); #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 +#define CINTERION_VENDOR_ID 0x0681 + +/* Olivetti products */ +#define OLIVETTI_VENDOR_ID 0x0b3c +#define OLIVETTI_PRODUCT_OLICARD100 0xc000 + static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -621,6 +645,180 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, @@ -649,6 +847,27 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, + /* Pirelli */ + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, + + { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, + + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -771,12 +990,19 @@ static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct option_intf_private *data; + /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */ if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID && serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 && serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8) return -ENODEV; + /* Bandrich modem and AT command interface is 0xff */ + if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID || + serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) && + serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) + return -ENODEV; + data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index ad1f923..c140870 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -249,6 +249,7 @@ static struct usb_serial_driver clie_3_5_device = { .throttle = visor_throttle, .unthrottle = visor_unthrottle, .attach = clie_3_5_startup, + .release = visor_release, .write = visor_write, .write_room = visor_write_room, .write_bulk_callback = visor_write_bulk_callback, diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 98b549b..b6e0e0e 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1858,6 +1858,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Hans de Goede + * These Appotech controllers are found in Picture Frames, they provide a + * (buggy) emulation of a cdrom drive which contains the windows software + * Uploading of pictures happens over the corresponding /dev/sg device. */ +UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000, + "BUILDWIN", + "Photo Frame", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_BAD_SENSE ), +UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000, + "BUILDWIN", + "Photo Frame", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_BAD_SENSE ), + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, "ST", "2A", diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index eab33f1..7b547f5 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transaction t, #define PRINTF_BUFFER_SIZE 4096 char *printf_buffer; - printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); + printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); if (printf_buffer == NULL) return -ENOMEM; diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 6df6d6e..9be949a 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -159,6 +159,9 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, int ret; struct posix_acl *acl = NULL; + if (!is_owner_or_cap(dentry->d_inode)) + return -EPERM; + if (value) { acl = posix_acl_from_xattr(value, size); if (acl == NULL) { diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 5646727..05a9b77 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, struct file *file, struct vfsmount *mnt, unsigned int oflags); extern int cifs_posix_open(char *full_path, struct inode **pinode, - struct vfsmount *mnt, int mode, int oflags, - __u32 *poplock, __u16 *pnetfid, int xid); + struct vfsmount *mnt, + struct super_block *sb, + int mode, int oflags, + __u32 *poplock, __u16 *pnetfid, int xid); extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info, struct cifs_sb_info *cifs_sb); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 6ccf726..9e9d48f 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, } int cifs_posix_open(char *full_path, struct inode **pinode, - struct vfsmount *mnt, int mode, int oflags, - __u32 *poplock, __u16 *pnetfid, int xid) + struct vfsmount *mnt, struct super_block *sb, + int mode, int oflags, + __u32 *poplock, __u16 *pnetfid, int xid) { int rc; FILE_UNIX_BASIC_INFO *presp_data; __u32 posix_flags = 0; - struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb); + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_fattr fattr; cFYI(1, ("posix open %s", full_path)); @@ -242,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, /* get new inode and set it up */ if (*pinode == NULL) { - *pinode = cifs_iget(mnt->mnt_sb, &fattr); + *pinode = cifs_iget(sb, &fattr); if (!*pinode) { rc = -ENOMEM; goto posix_open_ret; @@ -251,7 +252,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode, cifs_fattr_to_inode(*pinode, &fattr); } - cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags); + if (mnt) + cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags); posix_open_ret: kfree(presp_data); @@ -315,13 +317,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, if (nd && (nd->flags & LOOKUP_OPEN)) oflags = nd->intent.open.flags; else - oflags = FMODE_READ; + oflags = FMODE_READ | SMB_O_CREAT; if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { - rc = cifs_posix_open(full_path, &newinode, nd->path.mnt, - mode, oflags, &oplock, &fileHandle, xid); + rc = cifs_posix_open(full_path, &newinode, + nd ? nd->path.mnt : NULL, + inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); /* EIO could indicate that (posix open) operation is not supported, despite what server claimed in capability negotation. EREMOTE indicates DFS junction, which is not @@ -678,6 +681,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && (nd->intent.open.flags & O_CREAT)) { rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, + parent_dir_inode->i_sb, nd->intent.open.create_mode, nd->intent.open.flags, &oplock, &fileHandle, xid); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 057e1da..8639dd7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -297,10 +297,12 @@ int cifs_open(struct inode *inode, struct file *file) (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { int oflags = (int) cifs_posix_convert_flags(file->f_flags); + oflags |= SMB_O_CREAT; /* can not refresh inode info since size could be stale */ rc = cifs_posix_open(full_path, &inode, file->f_path.mnt, - cifs_sb->mnt_file_mode /* ignored */, - oflags, &oplock, &netfid, xid); + inode->i_sb, + cifs_sb->mnt_file_mode /* ignored */, + oflags, &oplock, &netfid, xid); if (rc == 0) { cFYI(1, ("posix open succeeded")); /* no need for special case handling of setting mode @@ -512,8 +514,9 @@ reopen_error_exit: int oflags = (int) cifs_posix_convert_flags(file->f_flags); /* can not refresh inode info since size could be stale */ rc = cifs_posix_open(full_path, NULL, file->f_path.mnt, - cifs_sb->mnt_file_mode /* ignored */, - oflags, &oplock, &netfid, xid); + inode->i_sb, + cifs_sb->mnt_file_mode /* ignored */, + oflags, &oplock, &netfid, xid); if (rc == 0) { cFYI(1, ("posix reopen succeeded")); goto reopen_success; diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index 4cfab1c..d91e9d8 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent) de->inode_no = cpu_to_le64(parent->i_ino); memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR)); exofs_set_de_type(de, inode); - kunmap_atomic(page, KM_USER0); + kunmap_atomic(kaddr, KM_USER0); err = exofs_commit_chunk(page, 0, chunk_size); fail: page_cache_release(page); diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 82c415b..e86f91e 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -964,6 +964,9 @@ mext_check_arguments(struct inode *orig_inode, return -EINVAL; } + if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode)) + return -EPERM; + /* Ext4 move extent does not support swapfile */ if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { ext4_debug("ext4 move extent: The argument files should " diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 3b2c554..433ea27 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -930,7 +930,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) percpu_counter_add(&sbi->s_freeinodes_counter, EXT4_INODES_PER_GROUP(sb)); - if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) && + sbi->s_log_groups_per_flex) { ext4_group_t flex_group; flex_group = ext4_flex_group(sbi, input->group); atomic_add(input->free_blocks_count, diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1a7c42c..e7fb19c 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -834,6 +834,12 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) unsigned long expired; long nr_pages; + /* + * When set to zero, disable periodic writeback + */ + if (!dirty_writeback_interval) + return 0; + expired = wb->last_old_flush + msecs_to_jiffies(dirty_writeback_interval * 10); if (time_before(jiffies, expired)) @@ -929,8 +935,12 @@ int bdi_writeback_task(struct bdi_writeback *wb) break; } - wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); - schedule_timeout_interruptible(wait_jiffies); + if (dirty_writeback_interval) { + wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); + schedule_timeout_interruptible(wait_jiffies); + } else + schedule(); + try_to_freeze(); } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index e6dd2ae..b20bfcc 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -218,6 +218,11 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) if (error) goto out_drop_write; + error = -EACCES; + if (!is_owner_or_cap(inode)) + goto out; + + error = 0; flags = ip->i_diskflags; new_flags = (flags & ~mask) | (reqflags & mask); if ((new_flags ^ flags) == 0) @@ -275,8 +280,10 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr) { struct inode *inode = filp->f_path.dentry->d_inode; u32 fsflags, gfsflags; + if (get_user(fsflags, ptr)) return -EFAULT; + gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags); if (!S_ISDIR(inode->i_mode)) { if (gfsflags & GFS2_DIF_INHERIT_JDATA) diff --git a/fs/libfs.c b/fs/libfs.c index 6e8d17e..9e0fc76 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -415,7 +415,8 @@ int simple_write_end(struct file *file, struct address_space *mapping, * unique inode values later for this filesystem, then you must take care * to pass it an appropriate max_reserved value to avoid collisions. */ -int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) +int simple_fill_super(struct super_block *s, unsigned long magic, + struct tree_descr *files) { struct inode *inode; struct dentry *root; diff --git a/fs/namespace.c b/fs/namespace.c index c768f73..bf6b6fe 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1121,8 +1121,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) { struct path path; int retval; + int lookup_flags = 0; - retval = user_path(name, &path); + if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) + return -EINVAL; + + if (!(flags & UMOUNT_NOFOLLOW)) + lookup_flags |= LOOKUP_FOLLOW; + + retval = user_path_at(AT_FDCWD, name, lookup_flags, &path); if (retval) goto out; retval = -EINVAL; diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 171699e..06b2a26 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -120,7 +120,7 @@ u32 nfsd_supported_minorversion; int nfsd_vers(int vers, enum vers_op change) { if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS) - return -1; + return 0; switch(change) { case NFSD_SET: nfsd_versions[vers] = nfsd_version[vers]; diff --git a/include/linux/fs.h b/include/linux/fs.h index 66b0705..899a4d6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1308,6 +1308,8 @@ extern int send_sigurg(struct fown_struct *fown); #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ #define MNT_DETACH 0x00000002 /* Just detach from the tree */ #define MNT_EXPIRE 0x00000004 /* Mark for expiry */ +#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ +#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ extern struct list_head super_blocks; extern spinlock_t sb_lock; @@ -2359,7 +2361,7 @@ extern const struct file_operations simple_dir_operations; extern const struct inode_operations simple_dir_inode_operations; struct tree_descr { char *name; const struct file_operations *ops; int mode; }; struct dentry *d_alloc_name(struct dentry *, const char *); -extern int simple_fill_super(struct super_block *, int, struct tree_descr *); +extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); extern void simple_release_fs(struct vfsmount **mount, int *count); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0be8243..be29806 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2322,6 +2322,7 @@ #define PCI_VENDOR_ID_JMICRON 0x197B #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 +#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 diff --git a/include/linux/swap.h b/include/linux/swap.h index a2602a8..d8029c2 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -223,21 +223,11 @@ static inline void lru_cache_add_anon(struct page *page) __lru_cache_add(page, LRU_INACTIVE_ANON); } -static inline void lru_cache_add_active_anon(struct page *page) -{ - __lru_cache_add(page, LRU_ACTIVE_ANON); -} - static inline void lru_cache_add_file(struct page *page) { __lru_cache_add(page, LRU_INACTIVE_FILE); } -static inline void lru_cache_add_active_file(struct page *page) -{ - __lru_cache_add(page, LRU_ACTIVE_FILE); -} - /* linux/mm/vmscan.c */ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); diff --git a/include/linux/tboot.h b/include/linux/tboot.h index bf2a0c7..1dba6ee 100644 --- a/include/linux/tboot.h +++ b/include/linux/tboot.h @@ -150,6 +150,7 @@ extern int tboot_force_iommu(void); #else +#define tboot_enabled() 0 #define tboot_probe() do { } while (0) #define tboot_shutdown(shutdown_type) do { } while (0) #define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index ff30177..597f8e2 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -778,6 +778,7 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, struct iovec *data); void sctp_chunk_free(struct sctp_chunk *); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); +void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data); struct sctp_chunk *sctp_chunkify(struct sk_buff *, const struct sctp_association *, struct sock *); diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h index a510b75..32c0697 100644 --- a/include/trace/events/signal.h +++ b/include/trace/events/signal.h @@ -10,7 +10,8 @@ #define TP_STORE_SIGINFO(__entry, info) \ do { \ - if (info == SEND_SIG_NOINFO) { \ + if (info == SEND_SIG_NOINFO || \ + info == SEND_SIG_FORCED) { \ __entry->errno = 0; \ __entry->code = SI_USER; \ } else if (info == SEND_SIG_PRIV) { \ diff --git a/kernel/compat.c b/kernel/compat.c index f6c204f..180d188 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -494,29 +494,26 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, { int ret; cpumask_var_t mask; - unsigned long *k; - unsigned int min_length = cpumask_size(); - - if (nr_cpu_ids <= BITS_PER_COMPAT_LONG) - min_length = sizeof(compat_ulong_t); - if (len < min_length) + if ((len * BITS_PER_BYTE) < nr_cpu_ids) + return -EINVAL; + if (len & (sizeof(compat_ulong_t)-1)) return -EINVAL; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); - if (ret < 0) - goto out; + if (ret == 0) { + size_t retlen = min_t(size_t, len, cpumask_size()); - k = cpumask_bits(mask); - ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); - if (ret == 0) - ret = min_length; - -out: + if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) + ret = -EFAULT; + else + ret = retlen; + } free_cpumask_var(mask); + return ret; } diff --git a/kernel/mutex.c b/kernel/mutex.c index 632f04c..4c0b7b3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -172,6 +172,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct thread_info *owner; /* + * If we own the BKL, then don't spin. The owner of + * the mutex might be waiting on us to release the BKL. + */ + if (unlikely(current->lock_depth >= 0)) + break; + + /* * If there's an owner, wait for it to either * release the lock or go to sleep. */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e928e1a..fbbe79b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1417,13 +1417,16 @@ do { \ divisor = nsec * frequency; } + if (!divisor) + return dividend; + return div64_u64(dividend, divisor); } static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) { struct hw_perf_event *hwc = &event->hw; - u64 period, sample_period; + s64 period, sample_period; s64 delta; period = perf_calculate_period(event, nsec, count); @@ -4712,8 +4715,8 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event_context *ctx; struct file *event_file = NULL; struct file *group_file = NULL; + int event_fd; int fput_needed = 0; - int fput_needed2 = 0; int err; /* for future expandability... */ @@ -4734,12 +4737,18 @@ SYSCALL_DEFINE5(perf_event_open, return -EINVAL; } + event_fd = get_unused_fd_flags(O_RDWR); + if (event_fd < 0) + return event_fd; + /* * Get the target context (task or percpu): */ ctx = find_get_context(pid, cpu); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto err_fd; + } /* * Look up the group leader (we will attach this event to it): @@ -4779,13 +4788,11 @@ SYSCALL_DEFINE5(perf_event_open, if (IS_ERR(event)) goto err_put_context; - err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR); - if (err < 0) - goto err_free_put_context; - - event_file = fget_light(err, &fput_needed2); - if (!event_file) + event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); + if (IS_ERR(event_file)) { + err = PTR_ERR(event_file); goto err_free_put_context; + } if (flags & PERF_FLAG_FD_OUTPUT) { err = perf_event_set_output(event, group_fd); @@ -4806,19 +4813,19 @@ SYSCALL_DEFINE5(perf_event_open, list_add_tail(&event->owner_entry, ¤t->perf_event_list); mutex_unlock(¤t->perf_event_mutex); -err_fput_free_put_context: - fput_light(event_file, fput_needed2); + fput_light(group_file, fput_needed); + fd_install(event_fd, event_file); + return event_fd; +err_fput_free_put_context: + fput(event_file); err_free_put_context: - if (err < 0) - free_event(event); - + free_event(event); err_put_context: - if (err < 0) - put_ctx(ctx); - fput_light(group_file, fput_needed); - + put_ctx(ctx); +err_fd: + put_unused_fd(event_fd); return err; } diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 4954407..5e76d22 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -559,14 +559,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->it_overrun = -1; - error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer)); - if (error) - goto out; - /* - * return the timer_id now. The next step is hard to - * back out if there is an error. - */ if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; @@ -597,6 +590,10 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; + error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer)); + if (error) + goto out; + spin_lock_irq(¤t->sighand->siglock); new_timer->it_signal = current->signal; list_add(&new_timer->list, ¤t->signal->posix_timers); diff --git a/kernel/signal.c b/kernel/signal.c index 934ae5e..6ca731b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -625,7 +625,7 @@ static inline bool si_fromuser(const struct siginfo *info) static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { - const struct cred *cred = current_cred(), *tcred; + const struct cred *cred, *tcred; struct pid *sid; int error; @@ -639,8 +639,10 @@ static int check_kill_permission(int sig, struct siginfo *info, if (error) return error; + cred = current_cred(); tcred = __task_cred(t); - if ((cred->euid ^ tcred->suid) && + if (!same_thread_group(current, t) && + (cred->euid ^ tcred->suid) && (cred->euid ^ tcred->uid) && (cred->uid ^ tcred->suid) && (cred->uid ^ tcred->uid) && diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45cfb6d..7d12c17 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -102,9 +103,6 @@ static inline void ftrace_enable_cpu(void) static cpumask_var_t __read_mostly tracing_buffer_mask; -/* Define which cpu buffers are currently read in trace_pipe */ -static cpumask_var_t tracing_reader_cpumask; - #define for_each_tracing_cpu(cpu) \ for_each_cpu(cpu, tracing_buffer_mask) @@ -243,12 +241,91 @@ static struct tracer *current_trace __read_mostly; /* * trace_types_lock is used to protect the trace_types list. - * This lock is also used to keep user access serialized. - * Accesses from userspace will grab this lock while userspace - * activities happen inside the kernel. */ static DEFINE_MUTEX(trace_types_lock); +/* + * serialize the access of the ring buffer + * + * ring buffer serializes readers, but it is low level protection. + * The validity of the events (which returns by ring_buffer_peek() ..etc) + * are not protected by ring buffer. + * + * The content of events may become garbage if we allow other process consumes + * these events concurrently: + * A) the page of the consumed events may become a normal page + * (not reader page) in ring buffer, and this page will be rewrited + * by events producer. + * B) The page of the consumed events may become a page for splice_read, + * and this page will be returned to system. + * + * These primitives allow multi process access to different cpu ring buffer + * concurrently. + * + * These primitives don't distinguish read-only and read-consume access. + * Multi read-only access are also serialized. + */ + +#ifdef CONFIG_SMP +static DECLARE_RWSEM(all_cpu_access_lock); +static DEFINE_PER_CPU(struct mutex, cpu_access_lock); + +static inline void trace_access_lock(int cpu) +{ + if (cpu == TRACE_PIPE_ALL_CPU) { + /* gain it for accessing the whole ring buffer. */ + down_write(&all_cpu_access_lock); + } else { + /* gain it for accessing a cpu ring buffer. */ + + /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ + down_read(&all_cpu_access_lock); + + /* Secondly block other access to this @cpu ring buffer. */ + mutex_lock(&per_cpu(cpu_access_lock, cpu)); + } +} + +static inline void trace_access_unlock(int cpu) +{ + if (cpu == TRACE_PIPE_ALL_CPU) { + up_write(&all_cpu_access_lock); + } else { + mutex_unlock(&per_cpu(cpu_access_lock, cpu)); + up_read(&all_cpu_access_lock); + } +} + +static inline void trace_access_lock_init(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + mutex_init(&per_cpu(cpu_access_lock, cpu)); +} + +#else + +static DEFINE_MUTEX(access_lock); + +static inline void trace_access_lock(int cpu) +{ + (void)cpu; + mutex_lock(&access_lock); +} + +static inline void trace_access_unlock(int cpu) +{ + (void)cpu; + mutex_unlock(&access_lock); +} + +static inline void trace_access_lock_init(void) +{ +} + +#endif + /* trace_wait is a waitqueue for tasks blocked on trace_poll */ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); @@ -1601,12 +1678,6 @@ static void tracing_iter_reset(struct trace_iterator *iter, int cpu) } /* - * No necessary locking here. The worst thing which can - * happen is loosing events consumed at the same time - * by a trace_pipe reader. - * Other than that, we don't risk to crash the ring buffer - * because it serializes the readers. - * * The current tracer is copied to avoid a global locking * all around. */ @@ -1662,12 +1733,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) } trace_event_read_lock(); + trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { + struct trace_iterator *iter = m->private; + atomic_dec(&trace_record_cmdline_disabled); + trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } @@ -2858,22 +2933,6 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) mutex_lock(&trace_types_lock); - /* We only allow one reader per cpu */ - if (cpu_file == TRACE_PIPE_ALL_CPU) { - if (!cpumask_empty(tracing_reader_cpumask)) { - ret = -EBUSY; - goto out; - } - cpumask_setall(tracing_reader_cpumask); - } else { - if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) - cpumask_set_cpu(cpu_file, tracing_reader_cpumask); - else { - ret = -EBUSY; - goto out; - } - } - /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { @@ -2929,12 +2988,6 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) mutex_lock(&trace_types_lock); - if (iter->cpu_file == TRACE_PIPE_ALL_CPU) - cpumask_clear(tracing_reader_cpumask); - else - cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); - - if (iter->trace->pipe_close) iter->trace->pipe_close(iter); @@ -3096,6 +3149,7 @@ waitagain: iter->pos = -1; trace_event_read_lock(); + trace_access_lock(iter->cpu_file); while (find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int len = iter->seq.len; @@ -3112,6 +3166,7 @@ waitagain: if (iter->seq.len >= cnt) break; } + trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ @@ -3237,6 +3292,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, } trace_event_read_lock(); + trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { @@ -3260,6 +3316,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, trace_seq_init(&iter->seq); } + trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); @@ -3561,10 +3618,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, info->read = 0; + trace_access_lock(info->cpu); ret = ring_buffer_read_page(info->tr->buffer, &info->spare, count, info->cpu, 0); + trace_access_unlock(info->cpu); if (ret < 0) return 0; @@ -3692,6 +3751,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, len &= PAGE_MASK; } + trace_access_lock(info->cpu); entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { @@ -3739,6 +3799,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); } + trace_access_unlock(info->cpu); spd.nr_pages = i; /* did we read anything? */ @@ -4175,6 +4236,8 @@ static __init int tracer_init_debugfs(void) struct dentry *d_tracer; int cpu; + trace_access_lock_init(); + d_tracer = tracing_init_dentry(); trace_create_file("tracing_enabled", 0644, d_tracer, @@ -4409,9 +4472,6 @@ __init static int tracer_alloc_buffers(void) if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; - if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) - goto out_free_tracing_cpumask; - /* To save memory, keep the ring buffer size to its minimum */ if (ring_buffer_expanded) ring_buf_size = trace_buf_size; @@ -4469,8 +4529,6 @@ __init static int tracer_alloc_buffers(void) return 0; out_free_cpumask: - free_cpumask_var(tracing_reader_cpumask); -out_free_tracing_cpumask: free_cpumask_var(tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); diff --git a/mm/filemap.c b/mm/filemap.c index 698ea80..96398d3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, /* * Splice_read and readahead add shmem/tmpfs pages into the page cache * before shmem_readpage has a chance to mark them as SwapBacked: they - * need to go on the active_anon lru below, and mem_cgroup_cache_charge + * need to go on the anon lru below, and mem_cgroup_cache_charge * (called in add_to_page_cache) needs to know where they're going too. */ if (mapping_cap_swap_backed(mapping)) @@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, if (page_is_file_cache(page)) lru_cache_add_file(page); else - lru_cache_add_active_anon(page); + lru_cache_add_anon(page); } return ret; } @@ -1099,6 +1099,12 @@ page_not_up_to_date_locked: } readpage: + /* + * A previous I/O error may have been due to temporary + * failures, eg. multipath errors. + * PG_error will be set again if readpage fails. + */ + ClearPageError(page); /* Start the actual read. The read will unlock the page. */ error = mapping->a_ops->readpage(filp, page); diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index a10d508..9f6f8d3 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211" if MAC80211 != n +config MAC80211_HAS_RC + def_bool n + config MAC80211_RC_PID bool "PID controller based rate control algorithm" if EMBEDDED + select MAC80211_HAS_RC ---help--- This option enables a TX rate control algorithm for mac80211 that uses a PID controller to select the TX @@ -24,12 +28,14 @@ config MAC80211_RC_PID config MAC80211_RC_MINSTREL bool "Minstrel" if EMBEDDED + select MAC80211_HAS_RC default y ---help--- This option enables the 'minstrel' TX rate control algorithm choice prompt "Default rate control algorithm" + depends on MAC80211_HAS_RC default MAC80211_RC_DEFAULT_MINSTREL ---help--- This option selects the default rate control algorithm @@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT endif +comment "Some wireless drivers require a rate control algorithm" + depends on MAC80211_HAS_RC=n + config MAC80211_MESH bool "Enable mac80211 mesh networking (pre-802.11s) support" depends on MAC80211 && EXPERIMENTAL diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9ae1a47..e2b4345 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -98,9 +98,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy, params->mesh_id_len, params->mesh_id); - if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags) - return 0; - if (type == NL80211_IFTYPE_AP_VLAN && params && params->use_4addr == 0) rcu_assign_pointer(sdata->u.vlan.sta, NULL); @@ -108,7 +105,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy, params && params->use_4addr >= 0) sdata->u.mgd.use_4addr = params->use_4addr; - sdata->u.mntr_flags = *flags; + if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags) + sdata->u.mntr_flags = *flags; + return 0; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index edfa036..48d8fdf 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1399,7 +1399,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) (rx->key || rx->sdata->drop_unencrypted))) return -EACCES; if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { - if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && + if (unlikely(!ieee80211_has_protected(fc) && + ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && rx->key)) return -EACCES; /* BIP does not use Protected field, so need to check MMIE */ diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1fdc0a5..9b50183 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -501,7 +501,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) struct ieee80211_hdr *hdr = (void *)tx->skb->data; struct ieee80211_supported_band *sband; struct ieee80211_rate *rate; - int i, len; + int i; + u32 len; bool inval = false, rts = false, short_preamble = false; struct ieee80211_tx_rate_control txrc; u32 sta_flags; @@ -510,7 +511,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) sband = tx->local->hw.wiphy->bands[tx->channel->band]; - len = min_t(int, tx->skb->len + FCS_LEN, + len = min_t(u32, tx->skb->len + FCS_LEN, tx->local->hw.wiphy->frag_threshold); /* set up the tx rate control struct we give the RC algo */ diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 224db01..35fa684 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -107,7 +107,7 @@ static const struct sctp_paramhdr prsctp_param = { cpu_to_be16(sizeof(struct sctp_paramhdr)), }; -/* A helper to initialize to initialize an op error inside a +/* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. */ @@ -124,6 +124,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); } +/* A helper to initialize an op error inside a + * provided chunk, as most cause codes will be embedded inside an + * abort chunk. Differs from sctp_init_cause in that it won't oops + * if there isn't enough space in the op error chunk + */ +int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, + size_t paylen) +{ + sctp_errhdr_t err; + __u16 len; + + /* Cause code constants are now defined in network order. */ + err.cause = cause_code; + len = sizeof(sctp_errhdr_t) + paylen; + err.length = htons(len); + + if (skb_tailroom(chunk->skb) < len) + return -ENOSPC; + chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, + sizeof(sctp_errhdr_t), + &err); + return 0; +} /* 3.3.2 Initiation (INIT) (1) * * This chunk is used to initiate a SCTP association between two @@ -1131,6 +1154,24 @@ nodata: return retval; } +/* Create an Operation Error chunk of a fixed size, + * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) + * This is a helper function to allocate an error chunk for + * for those invalid parameter codes in which we may not want + * to report all the errors, if the incomming chunk is large + */ +static inline struct sctp_chunk *sctp_make_op_error_fixed( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + size_t size = asoc ? asoc->pathmtu : 0; + + if (!size) + size = SCTP_DEFAULT_MAXSEGMENT; + + return sctp_make_op_error_space(asoc, chunk, size); +} + /* Create an Operation Error chunk. */ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, const struct sctp_chunk *chunk, @@ -1373,6 +1414,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) return target; } +/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient + * space in the chunk + */ +void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, + int len, const void *data) +{ + if (skb_tailroom(chunk->skb) >= len) + return sctp_addto_chunk(chunk, len, data); + else + return NULL; +} + /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. @@ -1976,13 +2029,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, * returning multiple unknown parameters. */ if (NULL == *errp) - *errp = sctp_make_op_error_space(asoc, chunk, - ntohs(chunk->chunk_hdr->length)); + *errp = sctp_make_op_error_fixed(asoc, chunk); if (*errp) { - sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, + sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, WORD_ROUND(ntohs(param.p->length))); - sctp_addto_chunk(*errp, + sctp_addto_chunk_fixed(*errp, WORD_ROUND(ntohs(param.p->length)), param.v); } else { diff --git a/net/wireless/core.h b/net/wireless/core.h index 4ef3efc..35b7121 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -378,6 +378,8 @@ int rdev_set_freq(struct cfg80211_registered_device *rdev, struct wireless_dev *for_wdev, int freq, enum nl80211_channel_type channel_type); +u16 cfg80211_calculate_bitrate(struct rate_info *rate); + #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) #else diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a602843..7cb0d64 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1637,39 +1637,6 @@ static int parse_station_flags(struct genl_info *info, return 0; } -static u16 nl80211_calculate_bitrate(struct rate_info *rate) -{ - int modulation, streams, bitrate; - - if (!(rate->flags & RATE_INFO_FLAGS_MCS)) - return rate->legacy; - - /* the formula below does only work for MCS values smaller than 32 */ - if (rate->mcs >= 32) - return 0; - - modulation = rate->mcs & 7; - streams = (rate->mcs >> 3) + 1; - - bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ? - 13500000 : 6500000; - - if (modulation < 4) - bitrate *= (modulation + 1); - else if (modulation == 4) - bitrate *= (modulation + 2); - else - bitrate *= (modulation + 3); - - bitrate *= streams; - - if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) - bitrate = (bitrate / 9) * 10; - - /* do NOT round down here */ - return (bitrate + 50000) / 100000; -} - static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev, u8 *mac_addr, struct station_info *sinfo) @@ -1716,8 +1683,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, if (!txrate) goto nla_put_failure; - /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */ - bitrate = nl80211_calculate_bitrate(&sinfo->txrate); + /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ + bitrate = cfg80211_calculate_bitrate(&sinfo->txrate); if (bitrate > 0) NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); diff --git a/net/wireless/util.c b/net/wireless/util.c index 59361fd..a3c841a 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -720,3 +720,36 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, return err; } + +u16 cfg80211_calculate_bitrate(struct rate_info *rate) +{ + int modulation, streams, bitrate; + + if (!(rate->flags & RATE_INFO_FLAGS_MCS)) + return rate->legacy; + + /* the formula below does only work for MCS values smaller than 32 */ + if (rate->mcs >= 32) + return 0; + + modulation = rate->mcs & 7; + streams = (rate->mcs >> 3) + 1; + + bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ? + 13500000 : 6500000; + + if (modulation < 4) + bitrate *= (modulation + 1); + else if (modulation == 4) + bitrate *= (modulation + 2); + else + bitrate *= (modulation + 3); + + bitrate *= streams; + + if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) + bitrate = (bitrate / 9) * 10; + + /* do NOT round down here */ + return (bitrate + 50000) / 100000; +} diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 54face3..4198243 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1257,10 +1257,7 @@ int cfg80211_wext_giwrate(struct net_device *dev, if (!(sinfo.filled & STATION_INFO_TX_BITRATE)) return -EOPNOTSUPP; - rate->value = 0; - - if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS)) - rate->value = 100000 * sinfo.txrate.legacy; + rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate); return 0; } diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 999e8a7..25a5183 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -198,7 +198,7 @@ HOSTCFLAGS_zconf.tab.o := -I$(src) HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK -HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` +HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` -ldl HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \ -D LKC_DIRECT_LINK diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 8ec0274..e031952 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -524,9 +524,8 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) struct key *keyring; int bucket; - keyring = ERR_PTR(-EINVAL); if (!name) - goto error; + return ERR_PTR(-EINVAL); bucket = keyring_hash(name); @@ -553,17 +552,18 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) KEY_SEARCH) < 0) continue; - /* we've got a match */ - atomic_inc(&keyring->usage); - read_unlock(&keyring_name_lock); - goto error; + /* we've got a match but we might end up racing with + * key_cleanup() if the keyring is currently 'dead' + * (ie. it has a zero usage count) */ + if (!atomic_inc_not_zero(&keyring->usage)) + continue; + goto out; } } - read_unlock(&keyring_name_lock); keyring = ERR_PTR(-ENOKEY); - - error: +out: + read_unlock(&keyring_name_lock); return keyring; } /* end find_keyring_by_name() */ diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 5c23afb..931cfda 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -509,7 +509,7 @@ try_again: ret = install_thread_keyring(); if (ret < 0) { - key = ERR_PTR(ret); + key_ref = ERR_PTR(ret); goto error; } goto reget_creds; @@ -527,7 +527,7 @@ try_again: ret = install_process_keyring(); if (ret < 0) { - key = ERR_PTR(ret); + key_ref = ERR_PTR(ret); goto error; } goto reget_creds; @@ -586,7 +586,7 @@ try_again: case KEY_SPEC_GROUP_KEYRING: /* group keyrings are not yet supported */ - key = ERR_PTR(-EINVAL); + key_ref = ERR_PTR(-EINVAL); goto error; case KEY_SPEC_REQKEY_AUTH_KEY: diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index a747871..0dc3418 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2263,16 +2263,23 @@ static int azx_dev_free(struct snd_device *device) * white/black-listing for position_fix */ static struct snd_pci_quirk position_fix_list[] __devinitdata = { + SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB), SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB), SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), - SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), + SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB), SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB), SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB), SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), {} diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index abfc558..187a467 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9116,6 +9116,7 @@ static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = { SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24), SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24), SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3), + SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889A_MB31), SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31), SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3), SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index cb474c0..256ad3a 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -2070,12 +2070,12 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = { SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000, "Intel D965", STAC_D965_3ST), /* Dell 3 stack systems */ - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST), /* Dell 3 stack systems with verb table in BIOS */ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS), diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index 718ef91..b1be92a 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c @@ -423,8 +423,8 @@ static const struct soc_enum wm8350_enum[] = { SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr), }; -static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525); -static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600); +static DECLARE_TLV_DB_SCALE(pre_amp_tlv, -1200, 3525, 0); +static DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 600, 0); static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1); static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1); static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1); diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c index b432f4d..6390ffe 100644 --- a/sound/soc/codecs/wm8400.c +++ b/sound/soc/codecs/wm8400.c @@ -106,21 +106,21 @@ static void wm8400_codec_reset(struct snd_soc_codec *codec) wm8400_reset_codec_reg_cache(wm8400->wm8400); } -static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600); +static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0); -static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000); +static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0); -static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, -2100, 0); +static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0); -static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600); +static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0); -static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0); +static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0); -static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0); +static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0); -static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763); +static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0); -static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0); +static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0); static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) @@ -439,7 +439,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w, /* INMIX dB values */ static const unsigned int in_mix_tlv[] = { TLV_DB_RANGE_HEAD(1), - 0,7, TLV_DB_LINEAR_ITEM(-1200, 600), + 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0), }; /* Left In PGA Connections */ diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c index 341481e..2f76f4d 100644 --- a/sound/soc/codecs/wm8990.c +++ b/sound/soc/codecs/wm8990.c @@ -110,21 +110,21 @@ static const u16 wm8990_reg[] = { #define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0) -static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600); +static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0); -static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000); +static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0); -static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100); +static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0); -static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600); +static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0); -static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0); +static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0); -static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0); +static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0); -static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763); +static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0); -static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0); +static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0); static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) @@ -450,7 +450,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w, /* INMIX dB values */ static const unsigned int in_mix_tlv[] = { TLV_DB_RANGE_HEAD(1), - 0, 7, TLV_DB_LINEAR_ITEM(-1200, 600), + 0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0), }; /* Left In PGA Connections */