From: www.kernel.org Subject: Linux 2.6.18 Patch-mainline: 2.6.18 Automatically created from "patches.kernel.org/patch-2.6.18" by xen-port-patches.py Acked-by: jbeulich@novell.com --- arch/x86/Kconfig | 1 arch/x86/kernel/Makefile | 2 arch/x86/kernel/entry_32-xen.S | 2 arch/x86/kernel/time_32-xen.c | 104 ++++++++++++++++++++++++++-- include/asm-x86/mach-xen/asm/processor_32.h | 2 include/asm-x86/thread_info_32.h | 4 + 6 files changed, 106 insertions(+), 9 deletions(-) --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -42,7 +42,6 @@ config GENERIC_TIME def_bool y - depends on !X86_XEN config GENERIC_CMOS_UPDATE def_bool y --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -104,5 +104,5 @@ pci-dma_64-$(CONFIG_XEN) += pci-dma_32.o endif -disabled-obj-$(CONFIG_XEN) := i8259_$(BITS).o reboot.o smpboot_$(BITS).o +disabled-obj-$(CONFIG_XEN) := i8253.o i8259_$(BITS).o reboot.o smpboot_$(BITS).o tsc_$(BITS).o %/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := --- a/arch/x86/kernel/entry_32-xen.S +++ b/arch/x86/kernel/entry_32-xen.S @@ -388,8 +388,10 @@ movl %ebp,12(%esp) movl $__USER_CS,4(%esp) addl $4,%esp + CFI_ADJUST_CFA_OFFSET -4 /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) + CFI_ADJUST_CFA_OFFSET 4 /* * Load the potential sixth argument from user stack. * Careful about security. --- a/arch/x86/kernel/time_32-xen.c +++ b/arch/x86/kernel/time_32-xen.c @@ -76,8 +76,13 @@ #if defined (__i386__) #include +#include +DEFINE_SPINLOCK(i8253_lock); +EXPORT_SYMBOL(i8253_lock); #endif +#define XEN_SHIFT 22 + int pit_latch_buggy; /* extern */ #if defined(__x86_64__) @@ -97,10 +102,6 @@ DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); -extern struct init_timer_opts timer_tsc_init; -extern struct timer_opts timer_tsc; -#define timer_none timer_tsc - /* These are peridically updated in shared_info, and then copied here. */ struct shadow_time_info { u64 tsc_timestamp; /* TSC at last update of time vals. */ @@ -248,6 +249,7 @@ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); } +#ifdef CONFIG_X86_64 static unsigned long get_usec_offset(struct shadow_time_info *shadow) { u64 now, delta; @@ -255,6 +257,7 @@ delta = now - shadow->tsc_timestamp; return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift); } +#endif static void __update_wallclock(time_t sec, long nsec) { @@ -364,6 +367,8 @@ } EXPORT_SYMBOL(rtc_cmos_write); +#ifdef CONFIG_X86_64 + /* * This version of gettimeofday has microsecond resolution * and better than microsecond precision on fast x86 machines with TSC. @@ -498,6 +503,8 @@ EXPORT_SYMBOL(do_settimeofday); +#endif + static void sync_xen_wallclock(unsigned long dummy); static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0); static void sync_xen_wallclock(unsigned long dummy) @@ -549,11 +556,15 @@ return retval; } +#ifdef CONFIG_X86_64 /* monotonic_clock(): returns # of nanoseconds passed since time_init() * Note: This function is required to return accurate * time even in the absence of multiple timer ticks. */ unsigned long long monotonic_clock(void) +#else +unsigned long long sched_clock(void) +#endif { unsigned int cpu = get_cpu(); struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); @@ -573,9 +584,9 @@ return time; } +#ifdef CONFIG_X86_64 EXPORT_SYMBOL(monotonic_clock); -#ifdef __x86_64__ unsigned long long sched_clock(void) { return monotonic_clock(); @@ -745,6 +756,87 @@ return IRQ_HANDLED; } +#ifndef CONFIG_X86_64 + +void tsc_init(void) +{ + init_cpu_khz(); + printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n", + cpu_khz / 1000, cpu_khz % 1000); + + use_tsc_delay(); +} + +#include + +void mark_tsc_unstable(void) +{ +#ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */ + tsc_unstable = 1; +#endif +} +EXPORT_SYMBOL_GPL(mark_tsc_unstable); + +static cycle_t xen_clocksource_read(void) +{ + cycle_t ret = sched_clock(); + +#ifdef CONFIG_SMP + for (;;) { + static cycle_t last_ret; +#ifndef CONFIG_64BIT + cycle_t last = cmpxchg64(&last_ret, 0, 0); +#else + cycle_t last = last_ret; +#define cmpxchg64 cmpxchg +#endif + + if ((s64)(ret - last) < 0) { + if (last - ret > permitted_clock_jitter + && printk_ratelimit()) { + unsigned int cpu = get_cpu(); + struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); + + printk(KERN_WARNING "clocksource/%u: " + "Time went backwards: " + "ret=%Lx delta=%Ld shadow=%Lx offset=%Lx\n", + cpu, ret, ret - last, + shadow->system_timestamp, + get_nsec_offset(shadow)); + put_cpu(); + } + ret = last; + } + if (cmpxchg64(&last_ret, last, ret) == last) + break; + } +#endif + + return ret; +} + +static struct clocksource clocksource_xen = { + .name = "xen", + .rating = 400, + .read = xen_clocksource_read, + .mask = CLOCKSOURCE_MASK(64), + .mult = 1 << XEN_SHIFT, /* time directly in nanoseconds */ + .shift = XEN_SHIFT, + .is_continuous = 1, +}; + +static int __init init_xen_clocksource(void) +{ + clocksource_xen.mult = clocksource_khz2mult(cpu_khz, + clocksource_xen.shift); + + return clocksource_register(&clocksource_xen); +} + +module_init(init_xen_clocksource); + +#endif + static void init_missing_ticks_accounting(unsigned int cpu) { struct vcpu_register_runstate_memory_area area; @@ -933,11 +1025,11 @@ update_wallclock(); +#ifdef CONFIG_X86_64 init_cpu_khz(); printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); -#if defined(__x86_64__) vxtime.mode = VXTIME_TSC; vxtime.quot = (1000000L << 32) / vxtime_hz; vxtime.tsc_quot = (1000L << 32) / cpu_khz; --- a/include/asm-x86/mach-xen/asm/processor_32.h +++ b/include/asm-x86/mach-xen/asm/processor_32.h @@ -23,7 +23,7 @@ #include /* flag for disabling the tsc */ -extern int tsc_disable; +#define tsc_disable 0 struct desc_struct { unsigned long a,b; --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h @@ -170,11 +170,15 @@ #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) /* flags to check in __switch_to() */ +#ifndef CONFIG_XEN #define _TIF_WORK_CTXSW \ (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \ _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS) #define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG) +#else +#define _TIF_WORK_CTXSW _TIF_DEBUG +#endif /*