diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h index 811743c..5f2ba8d 100644 --- a/arch/arm/mach-pxa/include/mach/colibri.h +++ b/arch/arm/mach-pxa/include/mach/colibri.h @@ -2,6 +2,7 @@ #define _COLIBRI_H_ #include +#include /* * common settings for all modules diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h index 7950ef4..743385d 100644 --- a/arch/mips/include/asm/mach-sibyte/war.h +++ b/arch/mips/include/asm/mach-sibyte/war.h @@ -16,7 +16,11 @@ #if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ defined(CONFIG_SB1_PASS_2_WORKAROUNDS) -#define BCM1250_M3_WAR 1 +#ifndef __ASSEMBLY__ +extern int sb1250_m3_workaround_needed(void); +#endif + +#define BCM1250_M3_WAR sb1250_m3_workaround_needed() #define SIBYTE_1956_WAR 1 #else diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c index 0444da1..92da315 100644 --- a/arch/mips/sibyte/sb1250/setup.c +++ b/arch/mips/sibyte/sb1250/setup.c @@ -87,6 +87,21 @@ static int __init setup_bcm1250(void) return ret; } +int sb1250_m3_workaround_needed(void) +{ + switch (soc_type) { + case K_SYS_SOC_TYPE_BCM1250: + case K_SYS_SOC_TYPE_BCM1250_ALT: + case K_SYS_SOC_TYPE_BCM1250_ALT2: + case K_SYS_SOC_TYPE_BCM1125: + case K_SYS_SOC_TYPE_BCM1125H: + return soc_pass < K_SYS_REVISION_BCM1250_C0; + + default: + return 0; + } +} + static int __init setup_bcm112x(void) { int ret = 0; diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 9258074..567cd57 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -615,6 +615,17 @@ _GLOBAL(start_secondary_prolog) std r3,0(r1) /* Zero the stack frame pointer */ bl .start_secondary b . +/* + * Reset stack pointer and call start_secondary + * to continue with online operation when woken up + * from cede in cpu offline. + */ +_GLOBAL(start_secondary_resume) + ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ + li r3,0 + std r3,0(r1) /* Zero the stack frame pointer */ + bl .start_secondary + b . #endif /* diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index c539472..1ce9dd5 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -155,15 +155,10 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS) TLBCAM[index].MAS7 = (u64)phys >> 32; -#ifndef CONFIG_KGDB /* want user access for breakpoints */ if (flags & _PAGE_USER) { TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); } -#else - TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; - TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); -#endif tlbcam_addrs[index].start = virt; tlbcam_addrs[index].limit = virt + size - 1; diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 6ea4698..b842378 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -122,44 +122,32 @@ static void pseries_mach_cpu_die(void) if (!get_lppaca()->shared_proc) get_lppaca()->donate_dedicated_cpu = 1; - printk(KERN_INFO - "cpu %u (hwid %u) ceding for offline with hint %d\n", - cpu, hwcpu, cede_latency_hint); while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { extended_cede_processor(cede_latency_hint); - printk(KERN_INFO "cpu %u (hwid %u) returned from cede.\n", - cpu, hwcpu); - printk(KERN_INFO - "Decrementer value = %x Timebase value = %llx\n", - get_dec(), get_tb()); } - printk(KERN_INFO "cpu %u (hwid %u) got prodded to go online\n", - cpu, hwcpu); - if (!get_lppaca()->shared_proc) get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->idle = 0; - } - if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { - unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); + if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { + unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); - /* - * NOTE: Calling start_secondary() here for now to - * start new context. - * However, need to do it cleanly by resetting the - * stack pointer. - */ - start_secondary(); + /* + * Call to start_secondary_resume() will not return. + * Kernel stack will be reset and start_secondary() + * will be called to continue the online operation. + */ + start_secondary_resume(); + } + } - } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { + /* Requested state is CPU_STATE_OFFLINE at this point */ + WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); - set_cpu_current_state(cpu, CPU_STATE_OFFLINE); - unregister_slb_shadow(hard_smp_processor_id(), - __pa(get_slb_shadow())); - rtas_stop_self(); - } + set_cpu_current_state(cpu, CPU_STATE_OFFLINE); + unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); + rtas_stop_self(); /* Should never get here... */ BUG(); diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 202d869..75a6f48 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h @@ -35,4 +35,5 @@ static inline void set_default_offline_state(int cpu) extern enum cpu_state_vals get_preferred_offline_state(int cpu); extern int start_secondary(void); +extern void start_secondary_resume(void); #endif diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h index 8b49bf9..bfa1ea4 100644 --- a/arch/sparc/include/asm/irqflags_64.h +++ b/arch/sparc/include/asm/irqflags_64.h @@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void) */ static inline unsigned long __raw_local_irq_save(void) { - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_disable(); + unsigned long flags, tmp; + + /* Disable interrupts to PIL_NORMAL_MAX unless we already + * are using PIL_NMI, in which case PIL_NMI is retained. + * + * The only values we ever program into the %pil are 0, + * PIL_NORMAL_MAX and PIL_NMI. + * + * Since PIL_NMI is the largest %pil value and all bits are + * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX + * actually is. + */ + __asm__ __volatile__( + "rdpr %%pil, %0\n\t" + "or %0, %2, %1\n\t" + "wrpr %1, 0x0, %%pil" + : "=r" (flags), "=r" (tmp) + : "i" (PIL_NORMAL_MAX) + : "memory" + ); return flags; } diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 39be9f2..3df02de 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -121,7 +121,7 @@ struct thread_info { #define THREAD_SHIFT PAGE_SHIFT #endif /* PAGE_SHIFT == 13 */ -#define PREEMPT_ACTIVE 0x4000000 +#define PREEMPT_ACTIVE 0x10000000 /* * macros/functions for gaining access to the thread information structure diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index b775658..8a00058 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm) struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { - prom_printf("Cannot allocate IOMMU resource.\n"); - prom_halt(); + pr_info("%s: Cannot allocate IOMMU resource.\n", + pbm->name); + return; } rp->name = "IOMMU"; rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; rp->end = rp->start + (unsigned long) vdma[1] - 1UL; rp->flags = IORESOURCE_BUSY; - request_resource(&pbm->mem_space, rp); + if (request_resource(&pbm->mem_space, rp)) { + pr_info("%s: Unable to request IOMMU resource.\n", + pbm->name); + kfree(rp); + } } } diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index fd3cee4..c720f0c 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -172,7 +172,17 @@ rtrap_xcall: nop call trace_hardirqs_on nop - wrpr %l4, %pil + /* Do not actually set the %pil here. We will do that + * below after we clear PSTATE_IE in the %pstate register. + * If we re-enable interrupts here, we can recurse down + * the hardirq stack potentially endlessly, causing a + * stack overflow. + * + * It is tempting to put this test and trace_hardirqs_on + * call at the 'rt_continue' label, but that will not work + * as that path hits unconditionally and we do not want to + * execute this in NMI return paths, for example. + */ #endif rtrap_no_irq_enable: andcc %l1, TSTATE_PRIV, %l3 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 10f7bb9..22cd475 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2202,27 +2202,6 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); -static inline int is_kernel_stack(struct task_struct *task, - struct reg_window *rw) -{ - unsigned long rw_addr = (unsigned long) rw; - unsigned long thread_base, thread_end; - - if (rw_addr < PAGE_OFFSET) { - if (task != &init_task) - return 0; - } - - thread_base = (unsigned long) task_stack_page(task); - thread_end = thread_base + sizeof(union thread_union); - if (rw_addr >= thread_base && - rw_addr < thread_end && - !(rw_addr & 0x7UL)) - return 1; - - return 0; -} - static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; @@ -2251,6 +2230,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) show_regs(regs); add_taint(TAINT_DIE); if (regs->tstate & TSTATE_PRIV) { + struct thread_info *tp = current_thread_info(); struct reg_window *rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); @@ -2258,8 +2238,8 @@ void die_if_kernel(char *str, struct pt_regs *regs) * find some badly aligned kernel stack. */ while (rw && - count++ < 30&& - is_kernel_stack(current, rw)) { + count++ < 30 && + kstack_valid(tp, (unsigned long) rw)) { printk("Caller[%016lx]: %pS\n", rw->ins[7], (void *) rw->ins[7]); diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 378ca82..95a8e9a 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -49,7 +49,7 @@ static inline enum direction decode_direction(unsigned int insn) } /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ -static inline int decode_access_size(unsigned int insn) +static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) { unsigned int tmp; @@ -65,7 +65,7 @@ static inline int decode_access_size(unsigned int insn) return 2; else { printk("Impossible unaligned trap. insn=%08x\n", insn); - die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); + die_if_kernel("Byte sized unaligned access?!?!", regs); /* GCC should never warn that control reaches the end * of this function without returning a value because @@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs) asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); - int size = decode_access_size(insn); + int size = decode_access_size(regs, insn); int orig_asi, asi; current_thread_info()->kern_una_regs = regs; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index eb40925..ddb52b8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -627,7 +627,7 @@ config GART_IOMMU bool "GART IOMMU support" if EMBEDDED default y select SWIOTLB - depends on X86_64 && PCI + depends on X86_64 && PCI && K8_NB ---help--- Support for full DMA access of devices with 32bit memory access only on systems with more than 3GB. This is usually needed for USB, @@ -2026,7 +2026,7 @@ endif # X86_32 config K8_NB def_bool y - depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA))) + depends on CPU_SUP_AMD && PCI source "drivers/pcmcia/Kconfig" diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index be37059..b35c160 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2539,6 +2539,9 @@ void irq_force_complete_move(int irq) struct irq_desc *desc = irq_to_desc(irq); struct irq_cfg *cfg = desc->chip_data; + if (!cfg) + return; + __irq_complete_move(&desc, cfg->vector); } #else diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6e44519..3b5ea38 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -929,7 +929,8 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, powernow_table[i].index = index; /* Frequency may be rounded for these */ - if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) { + if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) + || boot_cpu_data.x86 == 0x11) { powernow_table[i].frequency = freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); } else diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 7e1cca1..1366c7c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -47,6 +47,27 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + /* + * Atom erratum AAE44/AAF40/AAG38/AAH41: + * + * A race condition between speculative fetches and invalidating + * a large page. This is worked around in microcode, but we + * need the microcode to have already been loaded... so if it is + * not, recommend a BIOS update and disable large pages. + */ + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) { + u32 ucode, junk; + + wrmsr(MSR_IA32_UCODE_REV, 0, 0); + sync_core(); + rdmsr(MSR_IA32_UCODE_REV, junk, ucode); + + if (ucode < 0x20e) { + printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); + clear_cpu_cap(c, X86_FEATURE_PSE); + } + } + #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #else diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c index cbc4332..9b89546 100644 --- a/arch/x86/kernel/k8.c +++ b/arch/x86/kernel/k8.c @@ -121,3 +121,17 @@ void k8_flush_garts(void) } EXPORT_SYMBOL_GPL(k8_flush_garts); +static __init int init_k8_nbs(void) +{ + int err = 0; + + err = cache_k8_northbridges(); + + if (err < 0) + printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); + + return err; +} + +/* This has to go after the PCI subsystem */ +fs_initcall(init_k8_nbs); diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 4f41b29..0ae24d9 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -738,7 +738,7 @@ int __init gart_iommu_init(void) unsigned long scratch; long i; - if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) + if (num_k8_northbridges == 0) return 0; #ifndef CONFIG_AGP_AMD64 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 126f0b4..11d0702 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -282,12 +282,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, set_tsk_thread_flag(p, TIF_FORK); - p->thread.fs = me->thread.fs; - p->thread.gs = me->thread.gs; p->thread.io_bitmap_ptr = NULL; savesegment(gs, p->thread.gsindex); + p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs; savesegment(fs, p->thread.fsindex); + p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs; savesegment(es, p->thread.es); savesegment(ds, p->thread.ds); diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 1ba7e0a..4f0c06c 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data) struct request_queue *q = (struct request_queue *) data; unsigned long flags, next = 0; struct request *rq, *tmp; + int next_set = 0; spin_lock_irqsave(q->queue_lock, flags); @@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data) if (blk_mark_rq_complete(rq)) continue; blk_rq_timed_out(rq); - } else if (!next || time_after(next, rq->deadline)) + } else if (!next_set || time_after(next, rq->deadline)) { next = rq->deadline; + next_set = 1; + } } - /* - * next can never be 0 here with the list non-empty, since we always - * bump ->deadline to 1 so we can detect if the timer was ever added - * or not. See comment in blk_add_timer() - */ - if (next) + if (next_set) mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 943f2ab..ce038d8 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -324,6 +324,7 @@ struct dma_async_tx_descriptor * async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) { + void *scribble = submit->scribble; int non_zero_srcs, i; BUG_ON(faila == failb); @@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); - /* we need to preserve the contents of 'blocks' for the async - * case, so punt to synchronous if a scribble buffer is not available + /* if a dma resource is not available or a scribble buffer is not + * available punt to the synchronous path. In the 'dma not + * available' case be sure to use the scribble buffer to + * preserve the content of 'blocks' as the caller intended. */ - if (!submit->scribble) { - void **ptrs = (void **) blocks; + if (!async_dma_find_channel(DMA_PQ) || !scribble) { + void **ptrs = scribble ? scribble : (void **) blocks; async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) @@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); - /* we need to preserve the contents of 'blocks' for the async - * case, so punt to synchronous if a scribble buffer is not available + /* if a dma resource is not available or a scribble buffer is not + * available punt to the synchronous path. In the 'dma not + * available' case be sure to use the scribble buffer to + * preserve the content of 'blocks' as the caller intended. */ - if (!scribble) { - void **ptrs = (void **) blocks; + if (!async_dma_find_channel(DMA_PQ) || !scribble) { + void **ptrs = scribble ? scribble : (void **) blocks; async_tx_quiesce(&submit->depend_tx); for (i = 0; i < disks; i++) diff --git a/drivers/Makefile b/drivers/Makefile index 6ee53c7..8b0b948 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_SFI) += sfi/ obj-$(CONFIG_PNP) += pnp/ obj-$(CONFIG_ARM_AMBA) += amba/ +obj-$(CONFIG_VIRTIO) += virtio/ obj-$(CONFIG_XEN) += xen/ # regulators early, since some subsystems rely on them to initialize @@ -106,7 +107,6 @@ obj-$(CONFIG_HID) += hid/ obj-$(CONFIG_PPC_PS3) += ps3/ obj-$(CONFIG_OF) += of/ obj-$(CONFIG_SSB) += ssb/ -obj-$(CONFIG_VIRTIO) += virtio/ obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_STAGING) += staging/ obj-y += platform/ diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c index dc4ffad..e02d93c 100644 --- a/drivers/acpi/power_meter.c +++ b/drivers/acpi/power_meter.c @@ -34,7 +34,7 @@ #define ACPI_POWER_METER_NAME "power_meter" ACPI_MODULE_NAME(ACPI_POWER_METER_NAME); #define ACPI_POWER_METER_DEVICE_NAME "Power Meter" -#define ACPI_POWER_METER_CLASS "power_meter_resource" +#define ACPI_POWER_METER_CLASS "pwr_meter_resource" #define NUM_SENSORS 17 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 79d33d9..7c85265 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -450,6 +450,126 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, }, { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad T410", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad T510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad W510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad X201", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad X201", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad T410", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad T510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad W510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad X201", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad X201", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad T410", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad T510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad W510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad X201", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Lenovo ThinkPad X201", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), + }, + }, + { .callback = init_old_suspend_ordering, .ident = "Panasonic CF51-2L", .matches = { @@ -458,6 +578,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), }, }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Dell Studio 1558", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Dell Studio 1557", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"), + }, + }, + { + .callback = init_set_sci_en_on_resume, + .ident = "Dell Studio 1555", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9f6cfac..228740f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) void ata_qc_schedule_eh(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; + struct request_queue *q = qc->scsicmd->device->request_queue; + unsigned long flags; WARN_ON(!ap->ops->error_handler); @@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc) * Note that ATA_QCFLAG_FAILED is unconditionally set after * this function completes. */ + spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(qc->scsicmd->request); + spin_unlock_irqrestore(q->queue_lock, flags); } /** @@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) } /* okay, this error is ours */ + memset(&tf, 0, sizeof(tf)); rc = ata_eh_read_log_10h(dev, &tag, &tf); if (rc) { ata_link_printk(link, KERN_ERR, "failed to read log page 10h " diff --git a/drivers/base/memory.c b/drivers/base/memory.c index bd02505..d7d77d4 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -311,7 +311,7 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); static ssize_t print_block_size(struct class *class, char *buf) { - return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); + return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); } static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index 2fb3a48..4b66c69 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -57,7 +57,7 @@ config AGP_AMD config AGP_AMD64 tristate "AMD Opteron/Athlon64 on-CPU GART support" - depends on AGP && X86 + depends on AGP && X86 && K8_NB help This option gives you AGP support for the GLX component of X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 73655ae..f8e57c6 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -101,7 +101,6 @@ struct menu_device { unsigned int expected_us; u64 predicted_us; - unsigned int measured_us; unsigned int exit_us; unsigned int bucket; u64 correction_factor[BUCKETS]; @@ -187,14 +186,14 @@ static int menu_select(struct cpuidle_device *dev) int i; int multiplier; - data->last_state_idx = 0; - data->exit_us = 0; - if (data->needs_update) { menu_update(dev); data->needs_update = 0; } + data->last_state_idx = 0; + data->exit_us = 0; + /* Special case when user has set very strict latency requirement */ if (unlikely(latency_req == 0)) return 0; @@ -294,7 +293,7 @@ static void menu_update(struct cpuidle_device *dev) new_factor = data->correction_factor[data->bucket] * (DECAY - 1) / DECAY; - if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING) + if (data->expected_us > 0 && measured_us < MAX_INTERESTING) new_factor += RESOLUTION * measured_us / data->expected_us; else /* diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c index f5b6d9f..97e64bc 100644 --- a/drivers/edac/edac_mce_amd.c +++ b/drivers/edac/edac_mce_amd.c @@ -294,7 +294,6 @@ wrong_ls_mce: void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) { u32 ec = ERROR_CODE(regs->nbsl); - u32 xec = EXT_ERROR_CODE(regs->nbsl); if (!handle_errors) return; @@ -324,7 +323,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) pr_cont("\n"); } - pr_emerg("%s.\n", EXT_ERR_MSG(xec)); + pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl)); if (BUS_ERROR(ec) && nb_bus_decoder) nb_bus_decoder(node_id, regs); @@ -374,7 +373,7 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val, ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); /* do the two bits[14:13] together */ - ecc = m->status & (3ULL << 45); + ecc = (m->status >> 45) & 0x3; if (ecc) pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U")); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a894ade..1372796 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - if (!IS_IRONLAKE(dev)) { + if (!HAS_PCH_SPLIT(dev)) { seq_printf(m, "Interrupt enable: %08x\n", I915_READ(IER)); seq_printf(m, "Interrupt identity: %08x\n", diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2307f98..d642efd 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -978,15 +978,21 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, * Some of the preallocated space is taken by the GTT * and popup. GTT is 1K per MB of aperture size, and popup is 4K. */ - if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) + if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) overhead = 4096; else overhead = (*aperture_size / 1024) + 4096; switch (tmp & INTEL_GMCH_GMS_MASK) { case INTEL_855_GMCH_GMS_DISABLED: - DRM_ERROR("video memory is disabled\n"); - return -1; + /* XXX: This is what my A1 silicon has. */ + if (IS_GEN6(dev)) { + stolen = 64 * 1024 * 1024; + } else { + DRM_ERROR("video memory is disabled\n"); + return -1; + } + break; case INTEL_855_GMCH_GMS_STOLEN_1M: stolen = 1 * 1024 * 1024; break; @@ -1064,7 +1070,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, int gtt_offset, gtt_size; if (IS_I965G(dev)) { - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { gtt_offset = 2*1024*1024; gtt_size = 2*1024*1024; } else { @@ -1445,7 +1451,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b99b6a8..16ce3ba 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1026,7 +1026,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define IS_845G(dev) ((dev)->pci_device == 0x2562) #define IS_I85X(dev) ((dev)->pci_device == 0x3582) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) -#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) +#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) #define IS_I945G(dev) ((dev)->pci_device == 0x2772) @@ -1045,8 +1045,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) +#define IS_GEN3(dev) (IS_I915G(dev) || \ + IS_I915GM(dev) || \ + IS_I945G(dev) || \ + IS_I945GM(dev) || \ + IS_G33(dev) || \ + IS_PINEVIEW(dev)) +#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \ + (dev)->pci_device == 0x2982 || \ + (dev)->pci_device == 0x2992 || \ + (dev)->pci_device == 0x29A2 || \ + (dev)->pci_device == 0x2A02 || \ + (dev)->pci_device == 0x2A12 || \ + (dev)->pci_device == 0x2E02 || \ + (dev)->pci_device == 0x2E12 || \ + (dev)->pci_device == 0x2E22 || \ + (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x2A42 || \ + (dev)->pci_device == 0x2E42) + #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) +#define IS_GEN6(dev) ((dev)->pci_device == 0x0102) + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ @@ -1067,6 +1088,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) +#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ + IS_GEN6(dev)) + #define PRIMARY_RINGBUFFER_SIZE (128*1024) #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fd099a1..6458400 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1819,7 +1819,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) return -EIO; if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); else ier = I915_READ(IER); @@ -2316,6 +2316,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) pitch_val = obj_priv->stride / tile_width; pitch_val = ffs(pitch_val) - 1; + if (obj_priv->tiling_mode == I915_TILING_Y && + HAS_128_BYTE_Y_TILING(dev)) + WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); + else + WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); + val = obj_priv->gtt_offset; if (obj_priv->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index df278b2..040e80c 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -209,7 +209,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; bool need_disable; - if (IS_IRONLAKE(dev)) { + if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ @@ -357,21 +357,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) * reg, so dont bother to check the size */ if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) return false; - } else if (IS_I9XX(dev)) { - uint32_t pitch_val = ffs(stride / tile_width) - 1; - - /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB) - * instead of 4 (2KB) on 945s. - */ - if (pitch_val > I915_FENCE_MAX_PITCH_VAL || - size > (I830_FENCE_MAX_SIZE_VAL << 20)) + } else if (IS_GEN3(dev) || IS_GEN2(dev)) { + if (stride > 8192) return false; - } else { - uint32_t pitch_val = ffs(stride / tile_width) - 1; - if (pitch_val > I830_FENCE_MAX_PITCH_VAL || - size > (I830_FENCE_MAX_SIZE_VAL << 19)) - return false; + if (IS_GEN3(dev)) { + if (size > I830_FENCE_MAX_SIZE_VAL << 20) + return false; + } else { + if (size > I830_FENCE_MAX_SIZE_VAL << 19) + return false; + } } /* 965+ just needs multiples of tile width */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a17d6bd..032f667 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -576,7 +576,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) atomic_inc(&dev_priv->irq_received); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return ironlake_irq_handler(dev); iir = I915_READ(IIR); @@ -737,7 +737,7 @@ void i915_user_irq_get(struct drm_device *dev) spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); @@ -753,7 +753,7 @@ void i915_user_irq_put(struct drm_device *dev) spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); @@ -861,7 +861,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) return -EINVAL; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else if (IS_I965G(dev)) @@ -883,7 +883,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else @@ -897,7 +897,7 @@ void i915_enable_interrupt (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) opregion_enable_asle(dev); dev_priv->irq_enabled = 1; } @@ -1076,7 +1076,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { ironlake_irq_preinstall(dev); return; } @@ -1108,7 +1108,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return ironlake_irq_postinstall(dev); /* Unmask the interrupts that we always want on. */ @@ -1196,7 +1196,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev) dev_priv->vblank_pipe = 0; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { ironlake_irq_uninstall(dev); return; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ab1bd2d..fd95bdf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -221,7 +221,7 @@ #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 #define I830_FENCE_REG_VALID (1<<0) -#define I915_FENCE_MAX_PITCH_VAL 0x10 +#define I915_FENCE_MAX_PITCH_VAL 4 #define I830_FENCE_MAX_PITCH_VAL 6 #define I830_FENCE_MAX_SIZE_VAL (1<<8) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 15fbc1b..70c9d4b 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -247,6 +247,7 @@ static void parse_general_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { + struct drm_device *dev = dev_priv->dev; struct bdb_general_features *general; /* Set sensible defaults in case we can't find the general block */ @@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv, if (IS_I85X(dev_priv->dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; - else if (IS_IRONLAKE(dev_priv->dev)) + else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; else diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 79dd402..fccf074 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) struct drm_i915_private *dev_priv = dev->dev_private; u32 temp, reg; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) reg = PCH_ADPA; else reg = ADPA; @@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, else dpll_md_reg = DPLL_B_MD; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) adpa_reg = PCH_ADPA; else adpa_reg = ADPA; @@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, * Disable separate mode multiplier used when cloning SDVO to CRT * XXX this needs to be adjusted when we really are cloning */ - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { dpll_md = I915_READ(dpll_md_reg); I915_WRITE(dpll_md_reg, dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); @@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (intel_crtc->pipe == 0) { adpa |= ADPA_PIPE_A_SELECT; - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT_A, 0); } else { adpa |= ADPA_PIPE_B_SELECT; - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT_B, 0); } @@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) u32 hotplug_en; int i, tries = 0; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return intel_ironlake_crt_detect_hotplug(connector); /* @@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev) &intel_output->enc); /* Set up the DDC bus. */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) i2c_reg = PCH_GPIOA; else { i2c_reg = GPIOA; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b27202d..4b2458d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -232,7 +232,7 @@ struct intel_limit { #define G4X_P2_DISPLAY_PORT_FAST 10 #define G4X_P2_DISPLAY_PORT_LIMIT 0 -/* Ironlake */ +/* Ironlake / Sandybridge */ /* as we calculate clock using (register_value + 2) for N/M1/M2, so here the range value for them is (actual_value-2). */ @@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; const intel_limit_t *limit; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) limit = intel_ironlake_limit(crtc); else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); @@ -1366,7 +1366,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, dspcntr &= ~DISPPLANE_TILED; } - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) /* must disable */ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; @@ -1427,7 +1427,7 @@ static void i915_disable_vga (struct drm_device *dev) u8 sr1; u32 vga_reg; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) vga_reg = CPU_VGACNTRL; else vga_reg = VGACNTRL; @@ -2111,7 +2111,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ if (mode->clock * 3 > 27000 * 4) return MODE_CLOCK_HIGH; @@ -2967,7 +2967,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, refclk / 1000); } else if (IS_I9XX(dev)) { refclk = 96000; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) refclk = 120000; /* 120Mhz refclk */ } else { refclk = 48000; @@ -3025,7 +3025,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* FDI link */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { int lane, link_bw, bpp; /* eDP doesn't require FDI link, so just set DP M/N according to current link config */ @@ -3102,7 +3102,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * PCH B stepping, previous chipset stepping should be * ignoring this setting. */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { temp = I915_READ(PCH_DREF_CONTROL); /* Always enable nonspread source */ temp &= ~DREF_NONSPREAD_SOURCE_MASK; @@ -3149,7 +3149,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, reduced_clock.m2; } - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) dpll = DPLL_VGA_MODE_DIS; if (IS_I9XX(dev)) { @@ -3162,7 +3162,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; - else if (IS_IRONLAKE(dev)) + else if (HAS_PCH_SPLIT(dev)) dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } if (is_dp) @@ -3174,7 +3174,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, else { dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; /* also FPA1 */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; if (IS_G4X(dev) && has_reduced_clock) dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; @@ -3193,7 +3193,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); } else { if (is_lvds) { @@ -3227,7 +3227,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Ironlake's plane is forced to pipe, bit 24 is to enable color space conversion */ - if (!IS_IRONLAKE(dev)) { + if (!HAS_PCH_SPLIT(dev)) { if (pipe == 0) dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; else @@ -3254,14 +3254,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Disable the panel fitter if it was on our pipe */ - if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) + if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) I915_WRITE(PFIT_CONTROL, 0); DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); /* assign to Ironlake registers */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { fp_reg = pch_fp_reg; dpll_reg = pch_dpll_reg; } @@ -3282,7 +3282,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (is_lvds) { u32 lvds; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) lvds_reg = PCH_LVDS; lvds = I915_READ(lvds_reg); @@ -3328,7 +3328,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Wait for the clocks to stabilize. */ udelay(150); - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { if (is_sdvo) { sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | @@ -3375,14 +3375,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* pipesrc and dspsize control the size that is scaled from, which should * always be the user's requested size. */ - if (!IS_IRONLAKE(dev)) { + if (!HAS_PCH_SPLIT(dev)) { I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); I915_WRITE(dsppos_reg, 0); } I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); I915_WRITE(link_m1_reg, m_n.link_m); @@ -3403,7 +3403,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, intel_wait_for_vblank(dev); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { /* enable address swizzle for tiling buffer */ temp = I915_READ(DISP_ARB_CTL); I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); @@ -3438,7 +3438,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) return; /* use legacy palette for Ironlake */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : LGC_PALETTE_B; @@ -3922,7 +3922,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dpll = I915_READ(dpll_reg); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return; if (!dev_priv->lvds_downclock_avail) @@ -3961,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dpll = I915_READ(dpll_reg); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return; if (!dev_priv->lvds_downclock_avail) @@ -4382,7 +4382,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (IS_MOBILE(dev) && !IS_I830(dev)) intel_lvds_init(dev); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { int found; if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) @@ -4451,7 +4451,7 @@ static void intel_setup_outputs(struct drm_device *dev) DRM_DEBUG_KMS("probing DP_D\n"); intel_dp_init(dev, DP_D); } - } else if (IS_I8XX(dev)) + } else if (IS_GEN2(dev)) intel_dvo_init(dev); if (SUPPORTS_TV(dev)) @@ -4599,7 +4599,7 @@ void intel_init_clock_gating(struct drm_device *dev) * Disable clock gating reported to work incorrectly according to the * specs, but enable as much else as we can. */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; @@ -4672,7 +4672,7 @@ static void intel_init_display(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* We always want a DPMS function */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) dev_priv->display.dpms = ironlake_crtc_dpms; else dev_priv->display.dpms = i9xx_crtc_dpms; @@ -4715,7 +4715,7 @@ static void intel_init_display(struct drm_device *dev) i830_get_display_clock_speed; /* For FIFO watermark updates */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) dev_priv->display.update_wm = NULL; else if (IS_G4X(dev)) dev_priv->display.update_wm = g4x_update_wm; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 1238bc9..66df0c3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -661,7 +661,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect /* ACPI lid methods were generally unreliable in this generation, so * don't even bother. */ - if (IS_I8XX(dev)) + if (IS_GEN2(dev)) return connector_status_connected; if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 63f569b..6b89042 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -172,7 +172,7 @@ struct overlay_registers { #define OFC_UPDATE 0x1 #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) -#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) +#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 10be7b5..855911e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1210,12 +1210,23 @@ static int i2c_detect_address(struct i2c_client *temp_client, return 0; /* Make sure there is something at this address */ - if (i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL) < 0) - return 0; + if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) { + /* Special probe for FSC hwmon chips */ + union i2c_smbus_data dummy; - /* Prevent 24RF08 corruption */ - if ((addr & ~0x0f) == 0x50) - i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL); + if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0, + I2C_SMBUS_BYTE_DATA, &dummy) < 0) + return 0; + } else { + if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0, + I2C_SMBUS_QUICK, NULL) < 0) + return 0; + + /* Prevent 24RF08 corruption */ + if ((addr & ~0x0f) == 0x50) + i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0, + I2C_SMBUS_QUICK, NULL); + } /* Finally call the custom detection function */ memset(&info, 0, sizeof(struct i2c_board_info)); diff --git a/drivers/md/md.c b/drivers/md/md.c index a20a71e..2ecd1d5 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2108,12 +2108,18 @@ repeat: if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ /* .. if the array isn't clean, an 'even' event must also go * to spares. */ - if ((mddev->events&1)==0) + if ((mddev->events&1)==0) { nospares = 0; + sync_req = 2; /* force a second update to get the + * even/odd in sync */ + } } else { /* otherwise an 'odd' event must go to spares */ - if ((mddev->events&1)) + if ((mddev->events&1)) { nospares = 0; + sync_req = 2; /* force a second update to get the + * even/odd in sync */ + } } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ceb24af..0468f5b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1526,7 +1526,7 @@ static void raid5_end_read_request(struct bio * bi, int error) clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&rdev->read_errors); - if (conf->mddev->degraded) + if (conf->mddev->degraded >= conf->max_degraded) printk_rl(KERN_WARNING "raid5:%s: read error not correctable " "(sector %llu on %s).\n", @@ -1649,8 +1649,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, int previous, int *dd_idx, struct stripe_head *sh) { - long stripe; - unsigned long chunk_number; + sector_t stripe, stripe2; + sector_t chunk_number; unsigned int chunk_offset; int pd_idx, qd_idx; int ddf_layout = 0; @@ -1670,18 +1670,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, */ chunk_offset = sector_div(r_sector, sectors_per_chunk); chunk_number = r_sector; - BUG_ON(r_sector != chunk_number); /* * Compute the stripe number */ - stripe = chunk_number / data_disks; - - /* - * Compute the data disk and parity disk indexes inside the stripe - */ - *dd_idx = chunk_number % data_disks; - + stripe = chunk_number; + *dd_idx = sector_div(stripe, data_disks); + stripe2 = stripe; /* * Select the parity disk based on the user selected algorithm. */ @@ -1693,21 +1688,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case 5: switch (algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: - pd_idx = data_disks - stripe % raid_disks; + pd_idx = data_disks - sector_div(stripe2, raid_disks); if (*dd_idx >= pd_idx) (*dd_idx)++; break; case ALGORITHM_RIGHT_ASYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); if (*dd_idx >= pd_idx) (*dd_idx)++; break; case ALGORITHM_LEFT_SYMMETRIC: - pd_idx = data_disks - stripe % raid_disks; + pd_idx = data_disks - sector_div(stripe2, raid_disks); *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; break; case ALGORITHM_RIGHT_SYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; break; case ALGORITHM_PARITY_0: @@ -1727,7 +1722,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, switch (algorithm) { case ALGORITHM_LEFT_ASYMMETRIC: - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1736,7 +1731,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, (*dd_idx) += 2; /* D D P Q D */ break; case ALGORITHM_RIGHT_ASYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1745,12 +1740,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, (*dd_idx) += 2; /* D D P Q D */ break; case ALGORITHM_LEFT_SYMMETRIC: - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = (pd_idx + 1) % raid_disks; *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; break; case ALGORITHM_RIGHT_SYMMETRIC: - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = (pd_idx + 1) % raid_disks; *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; break; @@ -1769,7 +1764,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, /* Exactly the same as RIGHT_ASYMMETRIC, but or * of blocks for computing Q is different. */ - pd_idx = stripe % raid_disks; + pd_idx = sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1784,7 +1779,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, * D D D P Q rather than * Q D D D P */ - pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); + stripe2 += 1; + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = pd_idx + 1; if (pd_idx == raid_disks-1) { (*dd_idx)++; /* Q D D D P */ @@ -1796,7 +1792,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case ALGORITHM_ROTATING_N_CONTINUE: /* Same as left_symmetric but Q is before P */ - pd_idx = raid_disks - 1 - (stripe % raid_disks); + pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); qd_idx = (pd_idx + raid_disks - 1) % raid_disks; *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; ddf_layout = 1; @@ -1804,27 +1800,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, case ALGORITHM_LEFT_ASYMMETRIC_6: /* RAID5 left_asymmetric, with Q on last device */ - pd_idx = data_disks - stripe % (raid_disks-1); + pd_idx = data_disks - sector_div(stripe2, raid_disks-1); if (*dd_idx >= pd_idx) (*dd_idx)++; qd_idx = raid_disks - 1; break; case ALGORITHM_RIGHT_ASYMMETRIC_6: - pd_idx = stripe % (raid_disks-1); + pd_idx = sector_div(stripe2, raid_disks-1); if (*dd_idx >= pd_idx) (*dd_idx)++; qd_idx = raid_disks - 1; break; case ALGORITHM_LEFT_SYMMETRIC_6: - pd_idx = data_disks - stripe % (raid_disks-1); + pd_idx = data_disks - sector_div(stripe2, raid_disks-1); *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); qd_idx = raid_disks - 1; break; case ALGORITHM_RIGHT_SYMMETRIC_6: - pd_idx = stripe % (raid_disks-1); + pd_idx = sector_div(stripe2, raid_disks-1); *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); qd_idx = raid_disks - 1; break; @@ -1869,14 +1865,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) : conf->algorithm; sector_t stripe; int chunk_offset; - int chunk_number, dummy1, dd_idx = i; + sector_t chunk_number; + int dummy1, dd_idx = i; sector_t r_sector; struct stripe_head sh2; chunk_offset = sector_div(new_sector, sectors_per_chunk); stripe = new_sector; - BUG_ON(new_sector != stripe); if (i == sh->pd_idx) return 0; @@ -1969,7 +1965,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) } chunk_number = stripe * data_disks + i; - r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; + r_sector = chunk_number * sectors_per_chunk + chunk_offset; check = raid5_compute_sector(conf, r_sector, previous, &dummy1, &sh2); diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c index e48380c..95a463c 100644 --- a/drivers/media/dvb/ttpci/budget.c +++ b/drivers/media/dvb/ttpci/budget.c @@ -643,9 +643,6 @@ static void frontend_init(struct budget *budget) &budget->i2c_adap, &tt1600_isl6423_config); - } else { - dvb_frontend_detach(budget->dvb_frontend); - budget->dvb_frontend = NULL; } } break; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 65df1de..a555c90 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -4772,8 +4772,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) rc = bnx2_alloc_bad_rbuf(bp); } - if (bp->flags & BNX2_FLAG_USING_MSIX) + if (bp->flags & BNX2_FLAG_USING_MSIX) { bnx2_setup_msix_tbl(bp); + /* Prevent MSIX table reads and write from timing out */ + REG_WR(bp, BNX2_MISC_ECO_HW_CTL, + BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); + } return rc; } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 3db85da..787befc 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2832,8 +2832,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) spin_lock_irq(&tp->lock); RTL_W8(Cfg9346, Cfg9346_Unlock); - RTL_W32(MAC0, low); + RTL_W32(MAC4, high); + RTL_R32(MAC4); + + RTL_W32(MAC0, low); + RTL_R32(MAC0); + RTL_W8(Cfg9346, Cfg9346_Lock); spin_unlock_irq(&tp->lock); @@ -4316,7 +4321,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - smp_wmb(); + wmb(); RTL_W8(TxPoll, NPQ); /* set polling bit */ @@ -4675,7 +4680,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) * until it does. */ tp->intr_mask = 0xffff; - smp_wmb(); + wmb(); RTL_W16(IntrMask, tp->intr_event); } @@ -4813,8 +4818,8 @@ static void rtl_set_rx_mode(struct net_device *dev) mc_filter[1] = swab32(data); } - RTL_W32(MAR0 + 0, mc_filter[0]); RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); RTL_W32(RxConfig, tmp); diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 46997e1..fb52e47 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1862,6 +1862,7 @@ out: } if (disabled) { + dev_close(efx->net_dev); EFX_ERR(efx, "has been disabled\n"); efx->state = STATE_DISABLED; } else { @@ -1885,8 +1886,7 @@ static void efx_reset_work(struct work_struct *data) } rtnl_lock(); - if (efx_reset(efx, efx->reset_pending)) - dev_close(efx->net_dev); + (void)efx_reset(efx, efx->reset_pending); rtnl_unlock(); } diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 9d009c4..e20a824 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -1317,7 +1317,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx) EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); - falcon_probe_board(efx, board_rev); + rc = falcon_probe_board(efx, board_rev); + if (rc) + goto fail2; kfree(nvconfig); return 0; diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index 5712fdd..c7a933a 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c @@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = { }, }; -static const struct falcon_board_type falcon_dummy_board = { - .init = efx_port_dummy_op_int, - .init_phy = efx_port_dummy_op_void, - .fini = efx_port_dummy_op_void, - .set_id_led = efx_port_dummy_op_set_id_led, - .monitor = efx_port_dummy_op_int, -}; - -void falcon_probe_board(struct efx_nic *efx, u16 revision_info) +int falcon_probe_board(struct efx_nic *efx, u16 revision_info) { struct falcon_board *board = falcon_board(efx); u8 type_id = FALCON_BOARD_TYPE(revision_info); @@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info) (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) ? board->type->ref_model : board->type->gen_type, 'A' + board->major, board->minor); + return 0; } else { EFX_ERR(efx, "unknown board type %d\n", type_id); - board->type = &falcon_dummy_board; + return -ENODEV; } } diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 9351c03..3166baf 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type; ************************************************************************** */ -extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); +extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); /* TX data path */ extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index f8c6771..afbac2d 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -454,8 +454,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) static void siena_update_nic_stats(struct efx_nic *efx) { - while (siena_try_update_nic_stats(efx) == -EAGAIN) - cpu_relax(); + int retry; + + /* If we're unlucky enough to read statistics wduring the DMA, wait + * up to 10ms for it to finish (typically takes <500us) */ + for (retry = 0; retry < 100; ++retry) { + if (siena_try_update_nic_stats(efx) == 0) + return; + udelay(100); + } + + /* Use the old values instead */ } static void siena_start_nic_stats(struct efx_nic *efx) diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 17d1493..8405fb8 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -8572,6 +8572,7 @@ static int tg3_test_msi(struct tg3 *tp) pci_disable_msi(tp->pdev); tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; + tp->napi[0].irq_vec = tp->pdev->irq; err = tg3_request_irq(tp, 0); if (err) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2834a01..909b73d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -380,6 +380,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) } } + /* Orphan the skb - required as we might hang on to it + * for indefinite time. */ + skb_orphan(skb); + /* Enqueue packet */ skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); dev->trans_start = jiffies; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 5f3b9ea..8a6e027 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -433,6 +433,7 @@ static const struct driver_info mbm_info = { .bind = cdc_bind, .unbind = usbnet_cdc_unbind, .status = cdc_status, + .manage_power = cdc_manage_power, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 3d406f9..c60625b 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -238,7 +238,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu goto out; dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); - dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14); + dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12); for (i = 0; i < DM_TIMEOUT; i++) { u8 tmp; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b9b9d6b..941f053 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev) ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); } +static void ppp_close(struct net_device *dev) +{ + ppp_tx_flush(); +} + static struct hdlc_proto proto = { .start = ppp_start, .stop = ppp_stop, + .close = ppp_close, .type_trans = ppp_type_trans, .ioctl = ppp_ioctl, .netif_rx = ppp_rx, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 7b1eab4..e55f718 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1358,9 +1358,9 @@ void ath_cleanup(struct ath_softc *sc) free_irq(sc->irq, sc); ath_bus_cleanup(common); kfree(sc->sec_wiphy); - ieee80211_free_hw(sc->hw); ath9k_uninit_hw(sc); + ieee80211_free_hw(sc->hw); } static int ath9k_reg_notifier(struct wiphy *wiphy, diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 4bf4c21..41d33cd 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -245,7 +245,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, u32 idx, i; i = (*index) % ring_limit; - (*index) = idx = le32_to_cpu(ring_control->device_idx[1]); + (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); idx %= ring_limit; while (i != idx) { diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 8742640..b3c4fbd 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -36,6 +36,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ + {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */ diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index b6dda2b..9d147de 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -186,7 +186,7 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv, struct ieee80211_tx_queue_stats *queue; unsigned long flags; - if (WARN_ON(p54_queue > P54_QUEUE_NUM)) + if (WARN_ON(p54_queue >= P54_QUEUE_NUM)) return -EINVAL; queue = &priv->tx_stats[p54_queue]; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c4fead1..b8eb5e7 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -624,7 +624,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) */ int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) { - return state > PCI_D0 ? + return state >= PCI_D0 ? pci_platform_power_transition(dev, state) : -EINVAL; } EXPORT_SYMBOL_GPL(__pci_complete_power_transition); @@ -661,10 +661,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) */ return 0; - /* Check if we're already there */ - if (dev->current_state == state) - return 0; - __pci_start_power_transition(dev, state); /* This device is quirked not to be put into D3, so diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index e6b67f2..741672f 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -470,12 +470,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) WARN_ON(hdrlength >= 256); hdr->hlength = hdrlength & 0xFF; + hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); if (session->tt->init_task && session->tt->init_task(task)) return -EIO; task->state = ISCSI_TASK_RUNNING; - hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++; conn->scsicmd_pdus_cnt++; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index e155011..816ab97 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -394,11 +394,15 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev, void sas_ata_task_abort(struct sas_task *task) { struct ata_queued_cmd *qc = task->uldd_task; + struct request_queue *q = qc->scsicmd->device->request_queue; struct completion *waiting; + unsigned long flags; /* Bounce SCSI-initiated commands to the SCSI EH */ if (qc->scsicmd) { + spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(qc->scsicmd->request); + spin_unlock_irqrestore(q->queue_lock, flags); scsi_schedule_eh(qc->scsicmd->device->host); return; } diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 14b1319..b672d10 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -1029,6 +1029,8 @@ int __sas_task_abort(struct sas_task *task) void sas_task_abort(struct sas_task *task) { struct scsi_cmnd *sc = task->uldd_task; + struct request_queue *q = sc->device->request_queue; + unsigned long flags; /* Escape for libsas internal commands */ if (!sc) { @@ -1043,7 +1045,9 @@ void sas_task_abort(struct sas_task *task) return; } + spin_lock_irqsave(q->queue_lock, flags); blk_abort_request(sc->request); + spin_unlock_irqrestore(q->queue_lock, flags); scsi_schedule_eh(sc->device->host); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 0b575c8..aa2a2dc 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -956,7 +956,8 @@ static int resp_start_stop(struct scsi_cmnd * scp, static sector_t get_sdebug_capacity(void) { if (scsi_debug_virtual_gb > 0) - return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb; + return (sector_t)scsi_debug_virtual_gb * + (1073741824 / scsi_debug_sector_size); else return sdebug_store_sectors; } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 08ed506..e46155b 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -301,7 +301,20 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (scmd->device->allow_restart && (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) return FAILED; - return SUCCESS; + + if (blk_barrier_rq(scmd->request)) + /* + * barrier requests should always retry on UA + * otherwise block will get a spurious error + */ + return NEEDS_RETRY; + else + /* + * for normal (non barrier) commands, pass the + * UA upwards for a determination in the + * completion functions + */ + return SUCCESS; /* these three are not supported */ case COPY_ABORTED: diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c664242..5697709 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * we already took a copy of the original into rq->errors which * is what gets returned to the user */ - if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) { - if (!(req->cmd_flags & REQ_QUIET)) + if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { + /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip + * print since caller wants ATA registers. Only occurs on + * SCSI ATA PASS_THROUGH commands when CK_COND=1 + */ + if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) + ; + else if (!(req->cmd_flags & REQ_QUIET)) scsi_print_sense("", cmd); result = 0; /* BLOCK_PC may have set error */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 255da53..bf15920 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1039,6 +1039,7 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq) { rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->timeout = SD_TIMEOUT; + rq->retries = SD_MAX_RETRIES; rq->cmd[0] = SYNCHRONIZE_CACHE; rq->cmd_len = 10; } diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 24485cc..4822cb5 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c @@ -348,6 +348,8 @@ static const struct pnp_device_id pnp_dev_table[] = { { "FUJ02E6", 0 }, /* Fujitsu Wacom 2FGT Tablet PC device */ { "FUJ02E7", 0 }, + /* Fujitsu Wacom 1FGT Tablet PC device */ + { "FUJ02E9", 0 }, /* * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in * disguise) diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c index c2809f2..b12237f 100644 --- a/drivers/staging/hv/Hv.c +++ b/drivers/staging/hv/Hv.c @@ -306,9 +306,9 @@ void HvCleanup(void) DPRINT_ENTER(VMBUS); if (gHvContext.SignalEventBuffer) { + kfree(gHvContext.SignalEventBuffer); gHvContext.SignalEventBuffer = NULL; gHvContext.SignalEventParam = NULL; - kfree(gHvContext.SignalEventBuffer); } if (gHvContext.GuestId == HV_LINUX_GUEST_ID) { diff --git a/drivers/staging/hv/RndisFilter.c b/drivers/staging/hv/RndisFilter.c index 26d7997..f05f4e1 100644 --- a/drivers/staging/hv/RndisFilter.c +++ b/drivers/staging/hv/RndisFilter.c @@ -756,6 +756,7 @@ static int RndisFilterOpenDevice(struct rndis_device *Device) ret = RndisFilterSetPacketFilter(Device, NDIS_PACKET_TYPE_BROADCAST | + NDIS_PACKET_TYPE_ALL_MULTICAST | NDIS_PACKET_TYPE_DIRECTED); if (ret == 0) Device->State = RNDIS_DEV_DATAINITIALIZED; diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 0d7459e..4c3c8bc 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c @@ -413,8 +413,7 @@ static int netvsc_probe(struct device *device) if (!net_drv_obj->Base.OnDeviceAdd) return -1; - net = alloc_netdev(sizeof(struct net_device_context), "seth%d", - ether_setup); + net = alloc_etherdev(sizeof(struct net_device_context)); if (!net) return -1; diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c index 6da1021..a2566f1 100644 --- a/drivers/staging/usbip/usbip_event.c +++ b/drivers/staging/usbip/usbip_event.c @@ -117,6 +117,9 @@ void usbip_stop_eh(struct usbip_device *ud) { struct usbip_task *eh = &ud->eh; + if (eh->thread == current) + return; /* do not wait for myself */ + wait_for_completion(&eh->thread_done); usbip_dbg_eh("usbip_eh has finished\n"); } diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index ca479a7..d9d0bf5 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -1255,9 +1255,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) udev->state == USB_STATE_SUSPENDED) goto done; - udev->do_remote_wakeup = device_may_wakeup(&udev->dev); - if (msg.event & PM_EVENT_AUTO) { + udev->do_remote_wakeup = device_may_wakeup(&udev->dev); status = autosuspend_check(udev, 0); if (status < 0) goto done; @@ -1789,6 +1788,34 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg) return status; } +static void choose_wakeup(struct usb_device *udev, pm_message_t msg) +{ + int w, i; + struct usb_interface *intf; + + /* Remote wakeup is needed only when we actually go to sleep. + * For things like FREEZE and QUIESCE, if the device is already + * autosuspended then its current wakeup setting is okay. + */ + if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) { + udev->do_remote_wakeup = 0; + return; + } + + /* If remote wakeup is permitted, see whether any interface drivers + * actually want it. + */ + w = 0; + if (device_may_wakeup(&udev->dev) && udev->actconfig) { + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { + intf = udev->actconfig->interface[i]; + w |= intf->needs_remote_wakeup; + } + } + + udev->do_remote_wakeup = w; +} + int usb_suspend(struct device *dev, pm_message_t msg) { struct usb_device *udev; @@ -1808,6 +1835,7 @@ int usb_suspend(struct device *dev, pm_message_t msg) } udev->skip_sys_resume = 0; + choose_wakeup(udev, msg); return usb_external_suspend_device(udev, msg); } diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index bdf87a8..2c95153 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -120,7 +120,7 @@ int usb_choose_configuration(struct usb_device *udev) * than a vendor-specific driver. */ else if (udev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC && - (!desc || desc->bInterfaceClass != + (desc && desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC)) { best = c; break; diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 97b40ce..4a6366a 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -515,13 +515,13 @@ static int fs_create_by_name (const char *name, mode_t mode, *dentry = NULL; mutex_lock(&parent->d_inode->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(dentry)) { + if (!IS_ERR(*dentry)) { if ((mode & S_IFMT) == S_IFDIR) error = usbfs_mkdir (parent->d_inode, *dentry, mode); else error = usbfs_create (parent->d_inode, *dentry, mode); } else - error = PTR_ERR(dentry); + error = PTR_ERR(*dentry); mutex_unlock(&parent->d_inode->i_mutex); return error; diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 9bc95fe..1a48aac 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1471,7 +1471,7 @@ int usb_reset_configuration(struct usb_device *dev) /* If not, reinstate the old alternate settings */ if (retval < 0) { reset_old_alts: - for (; i >= 0; i--) { + for (i--; i >= 0; i--) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 9c90b67..efa0372 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd) */ ehci->periodic_size = DEFAULT_I_TDPS; INIT_LIST_HEAD(&ehci->cached_itd_list); + INIT_LIST_HEAD(&ehci->cached_sitd_list); if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) return retval; diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index aeda96e..1f3f01e 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh) static void ehci_mem_cleanup (struct ehci_hcd *ehci) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); if (ehci->async) qh_put (ehci->async); ehci->async = NULL; diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index df533ce..2064045 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -2137,13 +2137,27 @@ sitd_complete ( (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); } iso_stream_put (ehci, stream); - /* OK to recycle this SITD now that its completion callback ran. */ + done: sitd->urb = NULL; - sitd->stream = NULL; - list_move(&sitd->sitd_list, &stream->free_list); - iso_stream_put(ehci, stream); - + if (ehci->clock_frame != sitd->frame) { + /* OK to recycle this SITD now. */ + sitd->stream = NULL; + list_move(&sitd->sitd_list, &stream->free_list); + iso_stream_put(ehci, stream); + } else { + /* HW might remember this SITD, so we can't recycle it yet. + * Move it to a safe place until a new frame starts. + */ + list_move(&sitd->sitd_list, &ehci->cached_sitd_list); + if (stream->refcount == 2) { + /* If iso_stream_put() were called here, stream + * would be freed. Instead, just prevent reuse. + */ + stream->ep->hcpriv = NULL; + stream->ep = NULL; + } + } return retval; } @@ -2209,9 +2223,10 @@ done: /*-------------------------------------------------------------------------*/ -static void free_cached_itd_list(struct ehci_hcd *ehci) +static void free_cached_lists(struct ehci_hcd *ehci) { struct ehci_itd *itd, *n; + struct ehci_sitd *sitd, *sn; list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { struct ehci_iso_stream *stream = itd->stream; @@ -2219,6 +2234,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci) list_move(&itd->itd_list, &stream->free_list); iso_stream_put(ehci, stream); } + + list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { + struct ehci_iso_stream *stream = sitd->stream; + sitd->stream = NULL; + list_move(&sitd->sitd_list, &stream->free_list); + iso_stream_put(ehci, stream); + } } /*-------------------------------------------------------------------------*/ @@ -2245,7 +2267,7 @@ scan_periodic (struct ehci_hcd *ehci) clock_frame = -1; } if (ehci->clock_frame != clock_frame) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); ehci->clock_frame = clock_frame; } clock %= mod; @@ -2408,7 +2430,7 @@ restart: clock = now; clock_frame = clock >> 3; if (ehci->clock_frame != clock_frame) { - free_cached_itd_list(ehci); + free_cached_lists(ehci); ehci->clock_frame = clock_frame; } } else { diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index b1dce96..556c0b4 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */ int next_uframe; /* scan periodic, start here */ unsigned periodic_sched; /* periodic activity count */ - /* list of itds completed while clock_frame was still active */ + /* list of itds & sitds completed while clock_frame was still active */ struct list_head cached_itd_list; + struct list_head cached_sitd_list; unsigned clock_frame; /* per root hub port */ @@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action) clear_bit (action, &ehci->actions); } -static void free_cached_itd_list(struct ehci_hcd *ehci); +static void free_cached_lists(struct ehci_hcd *ehci); /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 32bbce9..65cac8c 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c @@ -697,7 +697,7 @@ static int ohci_hub_control ( u16 wLength ) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); - int ports = hcd_to_bus (hcd)->root_hub->maxchild; + int ports = ohci->num_ports; u32 temp; int retval = 0; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index bffcef7..6c1f673 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -549,6 +549,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, return EP_INTERVAL(interval); } +/* The "Mult" field in the endpoint context is only set for SuperSpeed devices. + * High speed endpoint descriptors can define "the number of additional + * transaction opportunities per microframe", but that goes in the Max Burst + * endpoint context field. + */ +static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp) + return 0; + return ep->ss_ep_comp->desc.bmAttributes; +} + static inline u32 xhci_get_endpoint_type(struct usb_device *udev, struct usb_host_endpoint *ep) { @@ -579,6 +592,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev, return type; } +/* Return the maximum endpoint service interval time (ESIT) payload. + * Basically, this is the maxpacket size, multiplied by the burst size + * and mult size. + */ +static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, + struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + int max_burst; + int max_packet; + + /* Only applies for interrupt or isochronous endpoints */ + if (usb_endpoint_xfer_control(&ep->desc) || + usb_endpoint_xfer_bulk(&ep->desc)) + return 0; + + if (udev->speed == USB_SPEED_SUPER) { + if (ep->ss_ep_comp) + return ep->ss_ep_comp->desc.wBytesPerInterval; + xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); + /* Assume no bursts, no multiple opportunities to send. */ + return ep->desc.wMaxPacketSize; + } + + max_packet = ep->desc.wMaxPacketSize & 0x3ff; + max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; + /* A 0 in max burst means 1 transfer per ESIT */ + return max_packet * (max_burst + 1); +} + int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, @@ -590,6 +633,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_ring *ep_ring; unsigned int max_packet; unsigned int max_burst; + u32 max_esit_payload; ep_index = xhci_get_endpoint_index(&ep->desc); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); @@ -611,6 +655,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); + ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep)); /* FIXME dig Mult and streams info out of ep companion desc */ @@ -656,6 +701,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, default: BUG(); } + max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); + ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload); + + /* + * XXX no idea how to calculate the average TRB buffer length for bulk + * endpoints, as the driver gives us no clue how big each scatter gather + * list entry (or buffer) is going to be. + * + * For isochronous and interrupt endpoints, we set it to the max + * available, until we have new API in the USB core to allow drivers to + * declare how much bandwidth they actually need. + * + * Normally, it would be calculated by taking the total of the buffer + * lengths in the TD and then dividing by the number of TRBs in a TD, + * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't + * use Event Data TRBs, and we don't chain in a link TRB on short + * transfers, we're basically dividing by 1. + */ + ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload); + /* FIXME Debug endpoint context */ return 0; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8778135..9e904a6 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -609,6 +609,10 @@ struct xhci_ep_ctx { #define MAX_PACKET_MASK (0xffff << 16) #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) +/* tx_info bitmasks */ +#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) +#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) + /** * struct xhci_input_control_context diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 0cfd621..a442989 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -229,6 +229,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = { static struct usb_device_id id_table [] = { { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ + { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */ { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 1ed3d55..17726a0 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = { static inline int w1_DS18B20_convert_temp(u8 rom[9]) { - int t = ((s16)rom[1] << 8) | rom[0]; - t = t*1000/16; - return t; + s16 t = le16_to_cpup((__le16 *)rom); + return t*1000/16; } static inline int w1_DS18S20_convert_temp(u8 rom[9]) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index c568779..cae75c1 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3767,7 +3767,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { ext4_lblk_t start_blk; - ext4_lblk_t len_blks; int error = 0; /* fallback to generic here if not in extents fmt */ @@ -3781,8 +3780,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { error = ext4_xattr_fiemap(inode, fieinfo); } else { + ext4_lblk_t len_blks; + __u64 last_blk; + start_blk = start >> inode->i_sb->s_blocksize_bits; - len_blks = len >> inode->i_sb->s_blocksize_bits; + last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; + if (last_blk >= EXT_MAX_BLOCK) + last_blk = EXT_MAX_BLOCK-1; + len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; /* * Walk the extent tree gathering extent information. diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 7f24a0b..1aba003 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c @@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) struct inode *iplist[1]; struct jfs_superblock *j_sb, *j_sb2; uint old_agsize; + int agsizechanged = 0; struct buffer_head *bh, *bh2; /* If the volume hasn't grown, get out now */ @@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) */ if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) goto error_out; + + agsizechanged |= (bmp->db_agsize != old_agsize); + /* * the map now has extended to cover additional nblocks: * dn_mapsize = oldMapsize + nblocks; @@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) * will correctly identify the new ag); */ /* if new AG size the same as old AG size, done! */ - if (bmp->db_agsize != old_agsize) { + if (agsizechanged) { if ((rc = diExtendFS(ipimap, ipbmap))) goto error_out; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index bd39abc..37d555c 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -965,6 +965,8 @@ out_error: static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source) { target->flags = source->flags; + target->rsize = source->rsize; + target->wsize = source->wsize; target->acregmin = source->acregmin; target->acregmax = source->acregmax; target->acdirmin = source->acdirmin; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index af6948d..b5d55d3 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -837,6 +837,8 @@ out_zap_parent: /* If we have submounts, don't unhash ! */ if (have_submounts(dentry)) goto out_valid; + if (dentry->d_flags & DCACHE_DISCONNECTED) + goto out_valid; shrink_dcache_parent(dentry); } d_drop(dentry); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index bbf72d8..718f3fb 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -160,10 +160,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes) argp->p = page_address(argp->pagelist[0]); argp->pagelist++; if (argp->pagelen < PAGE_SIZE) { - argp->end = p + (argp->pagelen>>2); + argp->end = argp->p + (argp->pagelen>>2); argp->pagelen = 0; } else { - argp->end = p + (PAGE_SIZE>>2); + argp->end = argp->p + (PAGE_SIZE>>2); argp->pagelen -= PAGE_SIZE; } memcpy(((char*)p)+avail, argp->p, (nbytes - avail)); @@ -1425,10 +1425,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) argp->p = page_address(argp->pagelist[0]); argp->pagelist++; if (argp->pagelen < PAGE_SIZE) { - argp->end = p + (argp->pagelen>>2); + argp->end = argp->p + (argp->pagelen>>2); argp->pagelen = 0; } else { - argp->end = p + (PAGE_SIZE>>2); + argp->end = argp->p + (PAGE_SIZE>>2); argp->pagelen -= PAGE_SIZE; } } diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 21c808f..b18c6d6 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -407,6 +407,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, struct buffer_head *bh) { int ret = 0; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; mlog_entry_void(); @@ -426,6 +427,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, get_bh(bh); /* for end_buffer_write_sync() */ bh->b_end_io = end_buffer_write_sync; + ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check); submit_bh(WRITE, bh); wait_on_buffer(bh); diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 02bf178..18bc101 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c @@ -205,7 +205,7 @@ static ssize_t dlmfs_file_read(struct file *filp, if ((count + *ppos) > i_size_read(inode)) readlen = i_size_read(inode) - *ppos; else - readlen = count - *ppos; + readlen = count; lvb_buf = kmalloc(readlen, GFP_NOFS); if (!lvb_buf) diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 88459bd..ec4d97f 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -559,6 +559,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); + handle = NULL; mlog_errno(status); goto out; } diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 8ae65c9..a8e8572 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4083,6 +4083,9 @@ static int ocfs2_complete_reflink(struct inode *s_inode, di->i_attr = s_di->i_attr; if (preserve) { + t_inode->i_uid = s_inode->i_uid; + t_inode->i_gid = s_inode->i_gid; + t_inode->i_mode = s_inode->i_mode; di->i_uid = s_di->i_uid; di->i_gid = s_di->i_gid; di->i_mode = s_di->i_mode; diff --git a/fs/proc/base.c b/fs/proc/base.c index 3cd449d..8dce96c 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2910,7 +2910,7 @@ out_no_task: */ static const struct pid_entry tid_base_stuff[] = { DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), - DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations), + DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), REG("environ", S_IRUSR, proc_environ_operations), INF("auxv", S_IRUSR, proc_pid_auxv), ONE("status", S_IRUGO, proc_pid_status), diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index c094f58..1e686ee 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -45,8 +45,6 @@ static inline bool is_privroot_deh(struct dentry *dir, struct reiserfs_de_head *deh) { struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; - if (reiserfs_expose_privroot(dir->d_sb)) - return 0; return (dir == dir->d_parent && privroot->d_inode && deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); } diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 81f09fa..0392e82 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -557,7 +557,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, if (!err && new_size < i_size_read(dentry->d_inode)) { struct iattr newattrs = { .ia_ctime = current_fs_time(inode->i_sb), - .ia_size = buffer_size, + .ia_size = new_size, .ia_valid = ATTR_SIZE | ATTR_CTIME, }; @@ -976,21 +976,13 @@ int reiserfs_permission(struct inode *inode, int mask) return generic_permission(inode, mask, NULL); } -/* This will catch lookups from the fs root to .reiserfs_priv */ -static int -xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name) +static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) { - struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root; - if (container_of(q1, struct dentry, d_name) == priv_root) - return -ENOENT; - if (q1->len == name->len && - !memcmp(q1->name, name->name, name->len)) - return 0; - return 1; + return -EPERM; } static const struct dentry_operations xattr_lookup_poison_ops = { - .d_compare = xattr_lookup_poison, + .d_revalidate = xattr_hide_revalidate, }; int reiserfs_lookup_privroot(struct super_block *s) @@ -1004,8 +996,7 @@ int reiserfs_lookup_privroot(struct super_block *s) strlen(PRIVROOT_NAME)); if (!IS_ERR(dentry)) { REISERFS_SB(s)->priv_root = dentry; - if (!reiserfs_expose_privroot(s)) - s->s_root->d_op = &xattr_lookup_poison_ops; + dentry->d_op = &xattr_lookup_poison_ops; if (dentry->d_inode) dentry->d_inode->i_flags |= S_PRIVATE; } else diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 77414db..146d491 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1160,6 +1160,7 @@ xfs_fs_put_super( xfs_unmountfs(mp); xfs_freesb(mp); + xfs_inode_shrinker_unregister(mp); xfs_icsb_destroy_counters(mp); xfs_close_devices(mp); xfs_dmops_put(mp); @@ -1523,6 +1524,8 @@ xfs_fs_fill_super( if (error) goto fail_vnrele; + xfs_inode_shrinker_register(mp); + kfree(mtpt); return 0; @@ -1767,6 +1770,7 @@ init_xfs_fs(void) goto out_cleanup_procfs; vfs_initquota(); + xfs_inode_shrinker_init(); error = register_filesystem(&xfs_fs_type); if (error) @@ -1794,6 +1798,7 @@ exit_xfs_fs(void) { vfs_exitquota(); unregister_filesystem(&xfs_fs_type); + xfs_inode_shrinker_destroy(); xfs_sysctl_unregister(); xfs_cleanup_procfs(); xfs_buf_terminate(); diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 6b6b394..57adf2d 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -95,7 +95,8 @@ xfs_inode_ag_walk( struct xfs_perag *pag, int flags), int flags, int tag, - int exclusive) + int exclusive, + int *nr_to_scan) { struct xfs_perag *pag = &mp->m_perag[ag]; uint32_t first_index; @@ -135,7 +136,7 @@ restart: if (error == EFSCORRUPTED) break; - } while (1); + } while ((*nr_to_scan)--); if (skipped) { delay(1); @@ -153,23 +154,30 @@ xfs_inode_ag_iterator( struct xfs_perag *pag, int flags), int flags, int tag, - int exclusive) + int exclusive, + int *nr_to_scan) { int error = 0; int last_error = 0; xfs_agnumber_t ag; + int nr; + nr = nr_to_scan ? *nr_to_scan : INT_MAX; for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { if (!mp->m_perag[ag].pag_ici_init) continue; error = xfs_inode_ag_walk(mp, ag, execute, flags, tag, - exclusive); + exclusive, &nr); if (error) { last_error = error; if (error == EFSCORRUPTED) break; } + if (nr <= 0) + break; } + if (nr_to_scan) + *nr_to_scan = nr; return XFS_ERROR(last_error); } @@ -289,7 +297,7 @@ xfs_sync_data( ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, - XFS_ICI_NO_TAG, 0); + XFS_ICI_NO_TAG, 0, NULL); if (error) return XFS_ERROR(error); @@ -311,7 +319,7 @@ xfs_sync_attr( ASSERT((flags & ~SYNC_WAIT) == 0); return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, - XFS_ICI_NO_TAG, 0); + XFS_ICI_NO_TAG, 0, NULL); } STATIC int @@ -679,6 +687,7 @@ __xfs_inode_set_reclaim_tag( radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), XFS_ICI_RECLAIM_TAG); + pag->pag_ici_reclaimable++; } /* @@ -710,6 +719,7 @@ __xfs_inode_clear_reclaim_tag( { radix_tree_tag_clear(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); + pag->pag_ici_reclaimable--; } STATIC int @@ -770,5 +780,88 @@ xfs_reclaim_inodes( int mode) { return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode, - XFS_ICI_RECLAIM_TAG, 1); + XFS_ICI_RECLAIM_TAG, 1, NULL); +} + +/* + * Shrinker infrastructure. + * + * This is all far more complex than it needs to be. It adds a global list of + * mounts because the shrinkers can only call a global context. We need to make + * the shrinkers pass a context to avoid the need for global state. + */ +static LIST_HEAD(xfs_mount_list); +static struct rw_semaphore xfs_mount_list_lock; + +static int +xfs_reclaim_inode_shrink( + int nr_to_scan, + gfp_t gfp_mask) +{ + struct xfs_mount *mp; + xfs_agnumber_t ag; + int reclaimable = 0; + + if (nr_to_scan) { + if (!(gfp_mask & __GFP_FS)) + return -1; + + down_read(&xfs_mount_list_lock); + list_for_each_entry(mp, &xfs_mount_list, m_mplist) { + xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, + XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); + if (nr_to_scan <= 0) + break; + } + up_read(&xfs_mount_list_lock); + } + + down_read(&xfs_mount_list_lock); + list_for_each_entry(mp, &xfs_mount_list, m_mplist) { + for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { + + if (!mp->m_perag[ag].pag_ici_init) + continue; + reclaimable += mp->m_perag[ag].pag_ici_reclaimable; + } + } + up_read(&xfs_mount_list_lock); + return reclaimable; +} + +static struct shrinker xfs_inode_shrinker = { + .shrink = xfs_reclaim_inode_shrink, + .seeks = DEFAULT_SEEKS, +}; + +void __init +xfs_inode_shrinker_init(void) +{ + init_rwsem(&xfs_mount_list_lock); + register_shrinker(&xfs_inode_shrinker); +} + +void +xfs_inode_shrinker_destroy(void) +{ + ASSERT(list_empty(&xfs_mount_list)); + unregister_shrinker(&xfs_inode_shrinker); +} + +void +xfs_inode_shrinker_register( + struct xfs_mount *mp) +{ + down_write(&xfs_mount_list_lock); + list_add_tail(&mp->m_mplist, &xfs_mount_list); + up_write(&xfs_mount_list_lock); +} + +void +xfs_inode_shrinker_unregister( + struct xfs_mount *mp) +{ + down_write(&xfs_mount_list_lock); + list_del(&mp->m_mplist); + up_write(&xfs_mount_list_lock); } diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index ea932b4..0b28c13 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -54,6 +54,11 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag); int xfs_inode_ag_iterator(struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), - int flags, int tag, int write_lock); + int flags, int tag, int write_lock, int *nr_to_scan); + +void xfs_inode_shrinker_init(void); +void xfs_inode_shrinker_destroy(void); +void xfs_inode_shrinker_register(struct xfs_mount *mp); +void xfs_inode_shrinker_unregister(struct xfs_mount *mp); #endif diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 873e07e..145f596 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -891,7 +891,8 @@ xfs_qm_dqrele_all_inodes( uint flags) { ASSERT(mp->m_quotainfo); - xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0); + xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, + XFS_ICI_NO_TAG, 0, NULL); } /*------------------------------------------------------------------------*/ diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 6702bd8..1182604 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -229,6 +229,7 @@ typedef struct xfs_perag int pag_ici_init; /* incore inode cache initialised */ rwlock_t pag_ici_lock; /* incore inode lock */ struct radix_tree_root pag_ici_root; /* incore inode cache root */ + int pag_ici_reclaimable; /* reclaimable inodes */ #endif } xfs_perag_t; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 1df7e45..c95f81a 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -257,6 +257,7 @@ typedef struct xfs_mount { wait_queue_head_t m_wait_single_sync_task; __int64_t m_update_flags; /* sb flags we need to update on the next remount,rw */ + struct list_head m_mplist; /* inode shrinker mount list */ } xfs_mount_t; /* diff --git a/include/linux/ata.h b/include/linux/ata.h index 20f3156..f8bd0f9 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -1024,8 +1024,8 @@ static inline int ata_ok(u8 status) static inline int lba_28_ok(u64 block, u32 n_block) { - /* check the ending block number */ - return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256); + /* check the ending block number: must be LESS THAN 0x0fffffff */ + return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256); } static inline int lba_48_ok(u64 block, u32 n_block) diff --git a/include/linux/poison.h b/include/linux/poison.h index 2110a81..34066ff 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -48,6 +48,15 @@ #define POISON_FREE 0x6b /* for use-after-free poisoning */ #define POISON_END 0xa5 /* end-byte of poisoning */ +/********** mm/hugetlb.c **********/ +/* + * Private mappings of hugetlb pages use this poisoned value for + * page->mapping. The core VM should not be doing anything with this mapping + * but futex requires the existence of some page->mapping value even though it + * is unused if PAGE_MAPPING_ANON is set. + */ +#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON)) + /********** arch/$ARCH/mm/init.c **********/ #define POISON_FREE_INITMEM 0xcc diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 8be5135..2c55a7e 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -107,6 +107,7 @@ typedef enum { SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ SCTP_CMD_SEND_MSG, /* Send the whole use message */ + SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ SCTP_CMD_LAST } sctp_verb_t; diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 78740ec..fa6cde5 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t); int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); int sctp_inet_listen(struct socket *sock, int backlog); void sctp_write_space(struct sock *sk); +void sctp_data_ready(struct sock *sk, int len); unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait); void sctp_sock_rfree(struct sk_buff *skb); diff --git a/init/initramfs.c b/init/initramfs.c index b37d34b..b27d045 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -457,7 +457,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len) compress_name); message = msg_buf; } - } + } else + error("junk in compressed archive"); if (state != Reset) error("junk in compressed archive"); this_header = saved_offset + my_inptr; diff --git a/kernel/cred.c b/kernel/cred.c index 1ed8ca1..099f5e6 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -786,8 +786,6 @@ bool creds_are_invalid(const struct cred *cred) { if (cred->magic != CRED_MAGIC) return true; - if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers)) - return true; #ifdef CONFIG_SECURITY_SELINUX if (selinux_is_enabled()) { if ((unsigned long) cred->security < PAGE_SIZE) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 32d0ae2..e928e1a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4811,7 +4811,7 @@ err_fput_free_put_context: err_free_put_context: if (err < 0) - kfree(event); + free_event(event); err_put_context: if (err < 0) diff --git a/lib/flex_array.c b/lib/flex_array.c index 66eef2e..41b1804 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, ret->element_size = element_size; ret->total_nr_elements = total; if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) - memset(ret->parts[0], FLEX_ARRAY_FREE, + memset(&ret->parts[0], FLEX_ARRAY_FREE, FLEX_ARRAY_BASE_BYTES_LEFT); return ret; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2d16fa6..fd9ba95 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -546,6 +546,7 @@ static void free_huge_page(struct page *page) mapping = (struct address_space *) page_private(page); set_page_private(page, 0); + page->mapping = NULL; BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); @@ -2447,8 +2448,10 @@ retry: spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); - } else + } else { lock_page(page); + page->mapping = HUGETLB_POISON; + } } /* diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 954032b..dff3379 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2215,12 +2215,12 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) } unlock_page_cgroup(pc); + *ptr = mem; if (mem) { - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false, + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, page); css_put(&mem->css); } - *ptr = mem; return ret; } diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index bad1c49..72340dd 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c @@ -147,6 +147,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg, dev_load(sock_net(sk), ifr.ifr_name); dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); + if (!dev) + return -ENODEV; + if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 564a0f8..03c55ac 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1368,6 +1368,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_eat_skb(sk, skb, 0); if (!desc->count) break; + tp->copied_seq = seq; } tp->copied_seq = seq; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 112c611..16190ca 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -471,8 +471,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, if (hslot->count < hslot2->count) goto begin; - result = udp4_lib_lookup2(net, INADDR_ANY, sport, - daddr, hnum, dif, + result = udp4_lib_lookup2(net, saddr, sport, + INADDR_ANY, hnum, dif, hslot2, slot2); } rcu_read_unlock(); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 548a06e..d2ef3a3 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1006,7 +1006,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); t1 = (struct tcphdr *) skb_push(buff, tot_len); - skb_reset_transport_header(skb); + skb_reset_transport_header(buff); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d9714d2..4f57cd2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -258,8 +258,8 @@ static struct sock *__udp6_lib_lookup(struct net *net, if (hslot->count < hslot2->count) goto begin; - result = udp6_lib_lookup2(net, &in6addr_any, sport, - daddr, hnum, dif, + result = udp6_lib_lookup2(net, saddr, sport, + &in6addr_any, hnum, dif, hslot2, slot2); } rcu_read_unlock(); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 304b0b6..dfdc138 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -183,7 +183,6 @@ static void sta_addba_resp_timer_expired(unsigned long data) HT_AGG_STATE_REQ_STOP_BA_MSK)) != HT_ADDBA_REQUESTED_MSK) { spin_unlock_bh(&sta->lock); - *state = HT_AGG_STATE_IDLE; #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "timer expired on tid %d but we are not " "(or no longer) expecting addBA response there", diff --git a/net/sctp/associola.c b/net/sctp/associola.c index df5abbf..99c93ee 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc, /* Remove any peer addresses not present in the new association. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { trans = list_entry(pos, struct sctp_transport, transports); - if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) - sctp_assoc_del_peer(asoc, &trans->ipaddr); + if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { + sctp_assoc_rm_peer(asoc, trans); + continue; + } if (asoc->state >= SCTP_STATE_ESTABLISHED) sctp_transport_reset(trans); diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 905fda5..7ec09ba 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Use SCTP specific send buffer space queues. */ ep->sndbuf_policy = sctp_sndbuf_policy; + sk->sk_data_ready = sctp_data_ready; sk->sk_write_space = sctp_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 9e73291..224db01 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -207,7 +207,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, sp = sctp_sk(asoc->base.sk); num_types = sp->pf->supported_addrs(sp, types); - chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); + chunksize = sizeof(init) + addrs_len; + chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); chunksize += sizeof(ecap_param); if (sctp_prsctp_enable) @@ -237,14 +238,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, /* Add HMACS parameter length if any were defined */ auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) - chunksize += ntohs(auth_hmacs->length); + chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; /* Add CHUNKS parameter length */ auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) - chunksize += ntohs(auth_chunks->length); + chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; @@ -254,7 +255,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, /* If we have any extensions to report, account for that */ if (num_ext) - chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; + chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + + num_ext); /* RFC 2960 3.3.2 Initiation (INIT) (1) * @@ -396,13 +398,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) - chunksize += ntohs(auth_hmacs->length); + chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) - chunksize += ntohs(auth_chunks->length); + chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; @@ -411,7 +413,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, } if (num_ext) - chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; + chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + + num_ext); /* Now allocate and fill out the chunk. */ retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); @@ -3314,21 +3317,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, sctp_chunk_free(asconf); asoc->addip_last_asconf = NULL; - /* Send the next asconf chunk from the addip chunk queue. */ - if (!list_empty(&asoc->addip_chunk_list)) { - struct list_head *entry = asoc->addip_chunk_list.next; - asconf = list_entry(entry, struct sctp_chunk, list); - - list_del_init(entry); - - /* Hold the chunk until an ASCONF_ACK is received. */ - sctp_chunk_hold(asconf); - if (sctp_primitive_ASCONF(asoc, asconf)) - sctp_chunk_free(asconf); - else - asoc->addip_last_asconf = asconf; - } - return retval; } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 4e4ca65..42bbb24 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -961,6 +961,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc, } +/* Sent the next ASCONF packet currently stored in the association. + * This happens after the ASCONF_ACK was succeffully processed. + */ +static void sctp_cmd_send_asconf(struct sctp_association *asoc) +{ + /* Send the next asconf chunk from the addip chunk + * queue. + */ + if (!list_empty(&asoc->addip_chunk_list)) { + struct list_head *entry = asoc->addip_chunk_list.next; + struct sctp_chunk *asconf = list_entry(entry, + struct sctp_chunk, list); + list_del_init(entry); + + /* Hold the chunk until an ASCONF_ACK is received. */ + sctp_chunk_hold(asconf); + if (sctp_primitive_ASCONF(asoc, asconf)) + sctp_chunk_free(asconf); + else + asoc->addip_last_asconf = asconf; + } +} + /* These three macros allow us to pull the debugging code out of the * main flow of sctp_do_sm() to keep attention focused on the real @@ -1616,6 +1639,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, } error = sctp_cmd_send_msg(asoc, cmd->obj.msg); break; + case SCTP_CMD_SEND_NEXT_ASCONF: + sctp_cmd_send_asconf(asoc); + break; default: printk(KERN_WARNING "Impossible command: %u, %p\n", cmd->verb, cmd->obj.ptr); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 47bc20d..c3f75e7 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3675,8 +3675,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); if (!sctp_process_asconf_ack((struct sctp_association *)asoc, - asconf_ack)) + asconf_ack)) { + /* Successfully processed ASCONF_ACK. We can + * release the next asconf if we have one. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF, + SCTP_NULL()); return SCTP_DISPOSITION_CONSUME; + } abort = sctp_make_abort(asoc, asconf_ack, sizeof(sctp_errhdr_t)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9bd9d82..aa3ba60 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3718,12 +3718,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) sp->hmac = NULL; SCTP_DBG_OBJCNT_INC(sock); - percpu_counter_inc(&sctp_sockets_allocated); /* Set socket backlog limit. */ sk->sk_backlog.limit = sysctl_sctp_rmem[1]; local_bh_disable(); + percpu_counter_inc(&sctp_sockets_allocated); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); local_bh_enable(); @@ -3740,8 +3740,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) /* Release our hold on the endpoint. */ ep = sctp_sk(sk)->ep; sctp_endpoint_free(ep); - percpu_counter_dec(&sctp_sockets_allocated); local_bh_disable(); + percpu_counter_dec(&sctp_sockets_allocated); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } @@ -6188,6 +6188,16 @@ do_nonblock: goto out; } +void sctp_data_ready(struct sock *sk, int len) +{ + read_lock_bh(&sk->sk_callback_lock); + if (sk_has_sleeper(sk)) + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | + POLLRDNORM | POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + read_unlock_bh(&sk->sk_callback_lock); +} + /* If socket sndbuf has changed, wake up all per association waiters. */ void sctp_write_space(struct sock *sk) { diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 327011f..7809137 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -45,10 +45,10 @@ #define MAX_ADDR_STR 32 -static struct media *media_list = NULL; +static struct media media_list[MAX_MEDIA]; static u32 media_count = 0; -struct bearer *tipc_bearers = NULL; +struct bearer tipc_bearers[MAX_BEARERS]; /** * media_name_valid - validate media name @@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type, int res = -EINVAL; write_lock_bh(&tipc_net_lock); - if (!media_list) - goto exit; + if (tipc_mode != TIPC_NET_MODE) { + warn("Media <%s> rejected, not in networked mode yet\n", name); + goto exit; + } if (!media_name_valid(name)) { warn("Media <%s> rejected, illegal name\n", name); goto exit; @@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name) -int tipc_bearer_init(void) -{ - int res; - - write_lock_bh(&tipc_net_lock); - tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); - media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); - if (tipc_bearers && media_list) { - res = 0; - } else { - kfree(tipc_bearers); - kfree(media_list); - tipc_bearers = NULL; - media_list = NULL; - res = -ENOMEM; - } - write_unlock_bh(&tipc_net_lock); - return res; -} - void tipc_bearer_stop(void) { u32 i; - if (!tipc_bearers) - return; - for (i = 0; i < MAX_BEARERS; i++) { if (tipc_bearers[i].active) tipc_bearers[i].publ.blocked = 1; @@ -695,10 +674,6 @@ void tipc_bearer_stop(void) if (tipc_bearers[i].active) bearer_disable(tipc_bearers[i].publ.name); } - kfree(tipc_bearers); - kfree(media_list); - tipc_bearers = NULL; - media_list = NULL; media_count = 0; } diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index ca57348..000228e 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -114,7 +114,7 @@ struct bearer_name { struct link; -extern struct bearer *tipc_bearers; +extern struct bearer tipc_bearers[]; void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); struct sk_buff *tipc_media_get_names(void); diff --git a/net/tipc/net.c b/net/tipc/net.c index 7906608..f25b1cd 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -116,7 +116,8 @@ */ DEFINE_RWLOCK(tipc_net_lock); -struct network tipc_net = { NULL }; +struct _zone *tipc_zones[256] = { NULL, }; +struct network tipc_net = { tipc_zones }; struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) { @@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest) } } -static int net_init(void) -{ - memset(&tipc_net, 0, sizeof(tipc_net)); - tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC); - if (!tipc_net.zones) { - return -ENOMEM; - } - return 0; -} - static void net_stop(void) { u32 z_num; - if (!tipc_net.zones) - return; - - for (z_num = 1; z_num <= tipc_max_zones; z_num++) { + for (z_num = 1; z_num <= tipc_max_zones; z_num++) tipc_zone_delete(tipc_net.zones[z_num]); - } - kfree(tipc_net.zones); - tipc_net.zones = NULL; } static void net_route_named_msg(struct sk_buff *buf) @@ -282,9 +267,7 @@ int tipc_net_start(u32 addr) tipc_named_reinit(); tipc_port_reinit(); - if ((res = tipc_bearer_init()) || - (res = net_init()) || - (res = tipc_cltr_init()) || + if ((res = tipc_cltr_init()) || (res = tipc_bclink_init())) { return res; } diff --git a/security/inode.c b/security/inode.c index c3a7938..1c812e8 100644 --- a/security/inode.c +++ b/security/inode.c @@ -161,13 +161,13 @@ static int create_by_name(const char *name, mode_t mode, mutex_lock(&parent->d_inode->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(dentry)) { + if (!IS_ERR(*dentry)) { if ((mode & S_IFMT) == S_IFDIR) error = mkdir(parent->d_inode, *dentry, mode); else error = create(parent->d_inode, *dentry, mode); } else - error = PTR_ERR(dentry); + error = PTR_ERR(*dentry); mutex_unlock(&parent->d_inode->i_mutex); return error; diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 03fe63e..9ac7bfd 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -336,8 +336,10 @@ static int construct_alloc_key(struct key_type *type, key_already_present: mutex_unlock(&key_construction_mutex); - if (dest_keyring) + if (dest_keyring) { + __key_link(dest_keyring, key_ref_to_ptr(key_ref)); up_write(&dest_keyring->sem); + } mutex_unlock(&user->cons_lock); key_put(key); *_key = key = key_ref_to_ptr(key_ref); @@ -428,6 +430,11 @@ struct key *request_key_and_link(struct key_type *type, if (!IS_ERR(key_ref)) { key = key_ref_to_ptr(key_ref); + if (dest_keyring) { + construct_get_dest_keyring(&dest_keyring); + key_link(dest_keyring, key); + key_put(dest_keyring); + } } else if (PTR_ERR(key_ref) != -EAGAIN) { key = ERR_CAST(key_ref); } else { diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 062a8b0..fd831bd 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2273,6 +2273,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), + SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB), SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), {} }; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 71b7a96..1a97c81 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1174,9 +1174,10 @@ static int patch_cxt5045(struct hda_codec *codec) switch (codec->subsystem_id >> 16) { case 0x103c: + case 0x1631: case 0x1734: - /* HP & Fujitsu-Siemens laptops have really bad sound over 0dB - * on NID 0x17. Fix max PCM level to 0 dB + /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad + * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB * (originally it has 0x2b steps with 0dB offset 0x14) */ snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT, @@ -2471,6 +2472,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { CXT5066_DELL_LAPTOP), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), + SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), + SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), {} }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index bd8a567..b486daa 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4033,7 +4033,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = { SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG), SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734), SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU), - SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL), + SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734), SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU), SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW), SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 799ba25..ac2d528 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -1602,6 +1602,10 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { "Dell Studio 1555", STAC_DELL_M6_DMIC), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd, "Dell Studio 1557", STAC_DELL_M6_DMIC), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe, + "Dell Studio XPS 1645", STAC_DELL_M6_BOTH), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413, + "Dell Studio 1558", STAC_DELL_M6_BOTH), {} /* terminator */ }; @@ -1725,6 +1729,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = { "HP HDX", STAC_HP_HDX), /* HDX16 */ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620, "HP dv6", STAC_HP_DV5), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061, + "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */ SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, "HP", STAC_HP_DV5), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c index 75283fb..c2311f8 100644 --- a/sound/pci/maestro3.c +++ b/sound/pci/maestro3.c @@ -849,6 +849,7 @@ struct snd_m3 { struct snd_kcontrol *master_switch; struct snd_kcontrol *master_volume; struct tasklet_struct hwvol_tq; + unsigned int in_suspend; #ifdef CONFIG_PM u16 *suspend_mem; @@ -884,6 +885,7 @@ static struct pci_device_id snd_m3_ids[] = { MODULE_DEVICE_TABLE(pci, snd_m3_ids); static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = { + SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c), SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d), SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d), SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03), @@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsigned long private_data) outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER); outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER); + /* Ignore spurious HV interrupts during suspend / resume, this avoids + mistaking them for a mute button press. */ + if (chip->in_suspend) + return; + if (!chip->master_switch || !chip->master_volume) return; @@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state) if (chip->suspend_mem == NULL) return 0; + chip->in_suspend = 1; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_ac97_suspend(chip->ac97); @@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci) snd_m3_hv_init(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); + chip->in_suspend = 0; return 0; } #endif /* CONFIG_PM */