Subject: Fix xen build. From: jbeulich@novell.com Patch-mainline: obsolete --- drivers/acpi/hardware/hwsleep.c | 2 ++ drivers/ide/ide-lib.c | 11 +++++++++++ drivers/oprofile/buffer_sync.c | 35 ++++++++++++++++++++++++----------- drivers/oprofile/cpu_buffer.c | 6 ++++++ drivers/oprofile/oprof.c | 2 ++ drivers/oprofile/oprofile_files.c | 6 ++++++ drivers/pci/bus.c | 2 ++ drivers/pci/quirks.c | 2 ++ include/linux/mm.h | 2 ++ include/linux/oprofile.h | 8 +++++--- include/linux/page-flags.h | 3 +-- kernel/timer.c | 14 ++++++++++---- mm/memory.c | 4 ++++ 13 files changed, 77 insertions(+), 20 deletions(-) --- a/drivers/acpi/hardware/hwsleep.c +++ b/drivers/acpi/hardware/hwsleep.c @@ -441,6 +441,7 @@ * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ +#ifndef CONFIG_XEN acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void) { u32 in_value; @@ -490,6 +491,7 @@ } ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) +#endif /******************************************************************************* * --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c @@ -336,6 +336,16 @@ { u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ +#ifndef CONFIG_XEN + if (!PCI_DMA_BUS_IS_PHYS) { + addr = BLK_BOUNCE_ANY; + } else if (on && drive->media == ide_disk) { + struct device *dev = drive->hwif->dev; + + if (dev && dev->dma_mask) + addr = *dev->dma_mask; + } +#else if (on && drive->media == ide_disk) { struct device *dev = drive->hwif->dev; @@ -344,6 +354,7 @@ else if (dev && dev->dma_mask) addr = *dev->dma_mask; } +#endif if (drive->queue) blk_queue_bounce_limit(drive->queue, addr); --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -44,7 +44,9 @@ static DEFINE_SPINLOCK(task_mortuary); static void process_task_mortuary(void); +#ifdef CONFIG_XEN static int cpu_current_domain[NR_CPUS]; +#endif /* Take ownership of the task struct and place it on the * list for processing. Only after two full buffer syncs @@ -153,11 +155,13 @@ int sync_start(void) { int err; +#ifdef CONFIG_XEN int i; for (i = 0; i < NR_CPUS; i++) { cpu_current_domain[i] = COORDINATOR_DOMAIN; } +#endif start_cpu_work(); @@ -302,12 +306,14 @@ } } +#ifdef CONFIG_XEN static void add_domain_switch(unsigned long domain_id) { add_event_entry(ESCAPE_CODE); add_event_entry(DOMAIN_SWITCH_CODE); add_event_entry(domain_id); } +#endif static void add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) @@ -531,11 +537,14 @@ add_cpu_switch(cpu); +#ifdef CONFIG_XEN /* We need to assign the first samples in this CPU buffer to the same domain that we were processing at the last sync_buffer */ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) { add_domain_switch(cpu_current_domain[cpu]); } +#endif + /* Remember, only we can modify tail_pos */ available = get_slots(cpu_buf); @@ -553,8 +562,10 @@ } else if (s->event == CPU_TRACE_BEGIN) { state = sb_bt_start; add_trace_begin(); +#ifdef CONFIG_XEN } else if (s->event == CPU_DOMAIN_SWITCH) { - domain_switch = 1; + domain_switch = 1; +#endif } else { struct mm_struct * oldmm = mm; @@ -568,21 +579,21 @@ add_user_ctx_switch(new, cookie); } } else { +#ifdef CONFIG_XEN if (domain_switch) { cpu_current_domain[cpu] = s->eip; add_domain_switch(s->eip); domain_switch = 0; - } else { - if (cpu_current_domain[cpu] != + } else if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) { - add_sample_entry(s->eip, s->event); - } - else if (state >= sb_bt_start && - !add_sample(mm, s, cpu_mode)) { - if (state == sb_bt_start) { - state = sb_bt_ignore; - atomic_inc(&oprofile_stats.bt_lost_no_mapping); - } + add_sample_entry(s->eip, s->event); + } else +#endif + if (state >= sb_bt_start && + !add_sample(mm, s, cpu_mode)) { + if (state == sb_bt_start) { + state = sb_bt_ignore; + atomic_inc(&oprofile_stats.bt_lost_no_mapping); } } } @@ -591,10 +602,12 @@ } release_mm(mm); +#ifdef CONFIG_XEN /* We reset domain to COORDINATOR at each CPU switch */ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) { add_domain_switch(COORDINATOR_DOMAIN); } +#endif mark_done(cpu); --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -38,7 +38,11 @@ #define DEFAULT_TIMER_EXPIRE (HZ / 10) static int work_enabled; +#ifndef CONFIG_XEN +#define current_domain COORDINATOR_DOMAIN +#else static int32_t current_domain = COORDINATOR_DOMAIN; +#endif void free_cpu_buffers(void) { @@ -288,6 +292,7 @@ add_sample(cpu_buf, pc, 0); } +#ifdef CONFIG_XEN int oprofile_add_domain_switch(int32_t domain_id) { struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; @@ -306,6 +311,7 @@ return 1; } +#endif /* * This serves to avoid cpu buffer overflow, and makes sure --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -37,6 +37,7 @@ */ static int timer = 0; +#ifdef CONFIG_XEN int oprofile_set_active(int active_domains[], unsigned int adomains) { int err; @@ -62,6 +63,7 @@ mutex_unlock(&start_mutex); return err; } +#endif int oprofile_setup(void) { --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c @@ -124,6 +124,8 @@ .write = dump_write, }; +#ifdef CONFIG_XEN + #define TMPBUFSIZE 512 static unsigned int adomains = 0; @@ -313,12 +315,16 @@ .write = pdomain_write, }; +#endif /* CONFIG_XEN */ + void oprofile_create_files(struct super_block * sb, struct dentry * root) { oprofilefs_create_file(sb, root, "enable", &enable_fops); oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); +#ifdef CONFIG_XEN oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops); oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops); +#endif oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops); oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -46,10 +46,12 @@ type_mask |= IORESOURCE_IO | IORESOURCE_MEM; +#ifdef CONFIG_XEN /* If the boot parameter 'pci-mem-align' was specified then we need to align the memory addresses, at page size alignment. */ if (pci_mem_align && (align < (PAGE_SIZE-1))) align = PAGE_SIZE - 1; +#endif for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { struct resource *r = bus->resource[i]; --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -24,6 +24,7 @@ #include #include "pci.h" +#ifdef CONFIG_XEN /* A global flag which signals if we should page-align PCI mem windows. */ int pci_mem_align = 0; @@ -57,6 +58,7 @@ } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_align_mem_resources); +#endif /* The Mellanox Tavor device gives false positive parity errors * Mark this device with a broken_parity_status, to allow --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -175,10 +175,12 @@ /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); +#ifdef CONFIG_XEN /* Area-specific function for clearing the PTE at @ptep. Returns the * original value of @ptep. */ pte_t (*zap_pte)(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, int is_fullmm); +#endif #ifdef CONFIG_NUMA int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); struct mempolicy *(*get_policy)(struct vm_area_struct *vma, --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -16,9 +16,10 @@ #include #include #include - +#ifdef CONFIG_XEN #include - +#endif + /* Each escaped entry is prefixed by ESCAPE_CODE * then one of the following codes, then the * relevant data. @@ -50,11 +51,12 @@ /* create any necessary configuration files in the oprofile fs. * Optional. */ int (*create_files)(struct super_block * sb, struct dentry * root); +#ifdef CONFIG_XEN /* setup active domains with Xen */ int (*set_active)(int *active_domains, unsigned int adomains); /* setup passive domains with Xen */ int (*set_passive)(int *passive_domains, unsigned int pdomains); - +#endif /* Do any necessary interrupt setup. Optional. */ int (*setup)(void); /* Do any necessary interrupt shutdown. Optional. */ --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -88,6 +88,7 @@ #define PG_mappedtodisk 16 /* Has blocks allocated on-disk */ #define PG_reclaim 17 /* To be reclaimed asap */ +#define PG_foreign 18 /* Page is owned by foreign allocator. */ #define PG_buddy 19 /* Page is free, on buddy lists */ /* PG_readahead is only used for file reads; PG_reclaim is only for writes */ @@ -97,8 +98,6 @@ #define PG_checked PG_owner_priv_1 /* Used by some filesystems */ #define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */ -#define PG_foreign 20 /* Page is owned by foreign allocator. */ - #if (BITS_PER_LONG > 32) /* * 64-bit-only flags build down from bit 31 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -802,7 +802,7 @@ unsigned long get_next_timer_interrupt(unsigned long now) { struct tvec_base *base = __get_cpu_var(tvec_bases); - unsigned long expires, sl_next; + unsigned long expires; spin_lock(&base->lock); expires = __next_timer_interrupt(base); @@ -811,11 +811,17 @@ if (time_before_eq(expires, now)) return now; +#ifndef CONFIG_XEN + return cmp_next_hrtimer_event(now, expires); +#else expires = cmp_next_hrtimer_event(now, expires); - sl_next = softlockup_get_next_event(); + { + unsigned long sl_next = softlockup_get_next_event(); - return expires <= now || expires - now < sl_next - ? expires : now + sl_next; + return expires <= now || expires - now < sl_next + ? expires : now + sl_next; + } +#endif } #ifdef CONFIG_NO_IDLE_HZ --- a/mm/memory.c +++ b/mm/memory.c @@ -416,7 +416,9 @@ * and that the resulting page looks ok. */ if (unlikely(!pfn_valid(pfn))) { +#ifdef CONFIG_XEN if (!(vma->vm_flags & VM_RESERVED)) +#endif print_bad_pte(vma, pte, addr); return NULL; } @@ -675,10 +677,12 @@ page->index > details->last_index)) continue; } +#ifdef CONFIG_XEN if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte)) ptent = vma->vm_ops->zap_pte(vma, addr, pte, tlb->fullmm); else +#endif ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr);