aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-06-10 16:59:06 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-06-10 16:59:06 +0000
commit7f68576b77ef3c9fde1009100690ff996d4490e0 (patch)
tree6a87f66c7a03042baa556676add65252816fbd94 /xen/include
parent822a0b35350d8006b808000eef3665ee4df7b297 (diff)
downloadxen-7f68576b77ef3c9fde1009100690ff996d4490e0.tar.gz
xen-7f68576b77ef3c9fde1009100690ff996d4490e0.tar.bz2
xen-7f68576b77ef3c9fde1009100690ff996d4490e0.zip
bitkeeper revision 1.952 (40c8935a3XSRdQfnx5RoO7XgaggvOQ)
Towards x86_64 support. Merged a bunch of the existing x86_64 stuff back into a generic 'x86' architecture. Aim is to share as much as possible between 32- and 64-bit worlds.
Diffstat (limited to 'xen/include')
-rw-r--r--xen/include/asm-i386/bitops.h368
-rw-r--r--xen/include/asm-i386/dma.h298
-rw-r--r--xen/include/asm-i386/flushtlb.h49
-rw-r--r--xen/include/asm-i386/hdreg.h13
-rw-r--r--xen/include/asm-i386/ide.h128
-rw-r--r--xen/include/asm-i386/io.h284
-rw-r--r--xen/include/asm-i386/msr.h121
-rw-r--r--xen/include/asm-i386/pci.h292
-rw-r--r--xen/include/asm-i386/pgalloc.h79
-rw-r--r--xen/include/asm-i386/scatterlist.h16
-rw-r--r--xen/include/asm-x86/acpi.h (renamed from xen/include/asm-i386/acpi.h)0
-rw-r--r--xen/include/asm-x86/apic.h (renamed from xen/include/asm-i386/apic.h)12
-rw-r--r--xen/include/asm-x86/apicdef.h (renamed from xen/include/asm-i386/apicdef.h)0
-rw-r--r--xen/include/asm-x86/atomic.h (renamed from xen/include/asm-i386/atomic.h)6
-rw-r--r--xen/include/asm-x86/bitops.h (renamed from xen/include/asm-x86_64/bitops.h)82
-rw-r--r--xen/include/asm-x86/cache.h (renamed from xen/include/asm-i386/cache.h)6
-rw-r--r--xen/include/asm-x86/config.h (renamed from xen/include/asm-i386/config.h)4
-rw-r--r--xen/include/asm-x86/cpufeature.h (renamed from xen/include/asm-i386/cpufeature.h)13
-rw-r--r--xen/include/asm-x86/current.h (renamed from xen/include/asm-i386/current.h)6
-rw-r--r--xen/include/asm-x86/debugreg.h (renamed from xen/include/asm-i386/debugreg.h)8
-rw-r--r--xen/include/asm-x86/delay.h (renamed from xen/include/asm-i386/delay.h)6
-rw-r--r--xen/include/asm-x86/desc.h (renamed from xen/include/asm-i386/desc.h)0
-rw-r--r--xen/include/asm-x86/div64.h (renamed from xen/include/asm-i386/div64.h)0
-rw-r--r--xen/include/asm-x86/domain_page.h (renamed from xen/include/asm-i386/domain_page.h)0
-rw-r--r--xen/include/asm-x86/fixmap.h (renamed from xen/include/asm-i386/fixmap.h)0
-rw-r--r--xen/include/asm-x86/flushtlb.h (renamed from xen/include/asm-x86_64/flushtlb.h)37
-rw-r--r--xen/include/asm-x86/hardirq.h (renamed from xen/include/asm-i386/hardirq.h)0
-rw-r--r--xen/include/asm-x86/i387.h (renamed from xen/include/asm-i386/i387.h)0
-rw-r--r--xen/include/asm-x86/io.h99
-rw-r--r--xen/include/asm-x86/io_apic.h (renamed from xen/include/asm-i386/io_apic.h)0
-rw-r--r--xen/include/asm-x86/irq.h (renamed from xen/include/asm-i386/irq.h)0
-rw-r--r--xen/include/asm-x86/ldt.h (renamed from xen/include/asm-i386/ldt.h)0
-rw-r--r--xen/include/asm-x86/mc146818rtc.h (renamed from xen/include/asm-i386/mc146818rtc.h)0
-rw-r--r--xen/include/asm-x86/mpspec.h (renamed from xen/include/asm-i386/mpspec.h)16
-rw-r--r--xen/include/asm-x86/msr.h (renamed from xen/include/asm-x86_64/msr.h)78
-rw-r--r--xen/include/asm-x86/page.h (renamed from xen/include/asm-i386/page.h)0
-rw-r--r--xen/include/asm-x86/param.h (renamed from xen/include/asm-i386/param.h)4
-rw-r--r--xen/include/asm-x86/pci.h35
-rw-r--r--xen/include/asm-x86/pdb.h (renamed from xen/include/asm-i386/pdb.h)0
-rw-r--r--xen/include/asm-x86/processor.h (renamed from xen/include/asm-i386/processor.h)0
-rw-r--r--xen/include/asm-x86/ptrace.h (renamed from xen/include/asm-i386/ptrace.h)0
-rw-r--r--xen/include/asm-x86/rwlock.h (renamed from xen/include/asm-i386/rwlock.h)18
-rw-r--r--xen/include/asm-x86/smp.h (renamed from xen/include/asm-i386/smp.h)9
-rw-r--r--xen/include/asm-x86/smpboot.h (renamed from xen/include/asm-i386/smpboot.h)0
-rw-r--r--xen/include/asm-x86/softirq.h (renamed from xen/include/asm-i386/softirq.h)0
-rw-r--r--xen/include/asm-x86/spinlock.h (renamed from xen/include/asm-i386/spinlock.h)2
-rw-r--r--xen/include/asm-x86/string.h (renamed from xen/include/asm-i386/string.h)0
-rw-r--r--xen/include/asm-x86/system.h (renamed from xen/include/asm-i386/system.h)42
-rw-r--r--xen/include/asm-x86/time.h (renamed from xen/include/asm-i386/time.h)0
-rw-r--r--xen/include/asm-x86/timex.h (renamed from xen/include/asm-i386/timex.h)0
-rw-r--r--xen/include/asm-x86/types.h (renamed from xen/include/asm-i386/types.h)22
-rw-r--r--xen/include/asm-x86/uaccess.h (renamed from xen/include/asm-i386/uaccess.h)0
-rw-r--r--xen/include/asm-x86/unaligned.h (renamed from xen/include/asm-i386/unaligned.h)6
-rw-r--r--xen/include/asm-x86/x86_64/config.h (renamed from xen/include/asm-x86_64/config.h)0
-rw-r--r--xen/include/asm-x86/x86_64/current.h (renamed from xen/include/asm-x86_64/current.h)0
-rw-r--r--xen/include/asm-x86/x86_64/desc.h (renamed from xen/include/asm-x86_64/desc.h)0
-rw-r--r--xen/include/asm-x86/x86_64/ldt.h (renamed from xen/include/asm-x86_64/ldt.h)0
-rw-r--r--xen/include/asm-x86/x86_64/page.h (renamed from xen/include/asm-x86_64/page.h)0
-rw-r--r--xen/include/asm-x86/x86_64/pda.h (renamed from xen/include/asm-x86_64/pda.h)0
-rw-r--r--xen/include/asm-x86/x86_64/processor.h (renamed from xen/include/asm-x86_64/processor.h)0
-rw-r--r--xen/include/asm-x86/x86_64/ptrace.h (renamed from xen/include/asm-x86_64/ptrace.h)0
-rw-r--r--xen/include/asm-x86/x86_64/uaccess.h (renamed from xen/include/asm-x86_64/uaccess.h)0
-rw-r--r--xen/include/asm-x86_64/apic.h102
-rw-r--r--xen/include/asm-x86_64/apicdef.h363
-rw-r--r--xen/include/asm-x86_64/atomic.h205
-rw-r--r--xen/include/asm-x86_64/cache.h13
-rw-r--r--xen/include/asm-x86_64/cpufeature.h73
-rw-r--r--xen/include/asm-x86_64/debugreg.h65
-rw-r--r--xen/include/asm-x86_64/delay.h14
-rw-r--r--xen/include/asm-x86_64/dma.h301
-rw-r--r--xen/include/asm-x86_64/domain_page.h27
-rw-r--r--xen/include/asm-x86_64/fixmap.h107
-rw-r--r--xen/include/asm-x86_64/hardirq.h90
-rw-r--r--xen/include/asm-x86_64/hdreg.h13
-rw-r--r--xen/include/asm-x86_64/i387.h38
-rw-r--r--xen/include/asm-x86_64/ide.h127
-rw-r--r--xen/include/asm-x86_64/io.h273
-rw-r--r--xen/include/asm-x86_64/io_apic.h148
-rw-r--r--xen/include/asm-x86_64/irq.h136
-rw-r--r--xen/include/asm-x86_64/mc146818rtc.h113
-rw-r--r--xen/include/asm-x86_64/mpspec.h212
-rw-r--r--xen/include/asm-x86_64/param.h24
-rw-r--r--xen/include/asm-x86_64/pci.h336
-rw-r--r--xen/include/asm-x86_64/pdb.h51
-rw-r--r--xen/include/asm-x86_64/pgalloc.h43
-rw-r--r--xen/include/asm-x86_64/rwlock.h84
-rw-r--r--xen/include/asm-x86_64/scatterlist.h16
-rw-r--r--xen/include/asm-x86_64/smp.h103
-rw-r--r--xen/include/asm-x86_64/smpboot.h130
-rw-r--r--xen/include/asm-x86_64/softirq.h17
-rw-r--r--xen/include/asm-x86_64/spinlock.h174
-rw-r--r--xen/include/asm-x86_64/string.h50
-rw-r--r--xen/include/asm-x86_64/system.h220
-rw-r--r--xen/include/asm-x86_64/time.h23
-rw-r--r--xen/include/asm-x86_64/timex.h67
-rw-r--r--xen/include/asm-x86_64/types.h44
-rw-r--r--xen/include/asm-x86_64/unaligned.h37
-rw-r--r--xen/include/hypervisor-ifs/arch-x86/hypervisor-if.h (renamed from xen/include/hypervisor-ifs/arch-i386/hypervisor-if.h)0
-rw-r--r--xen/include/xen/mm.h1
99 files changed, 366 insertions, 5633 deletions
diff --git a/xen/include/asm-i386/bitops.h b/xen/include/asm-i386/bitops.h
deleted file mode 100644
index e98f6b356f..0000000000
--- a/xen/include/asm-i386/bitops.h
+++ /dev/null
@@ -1,368 +0,0 @@
-#ifndef _I386_BITOPS_H
-#define _I386_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-#include <xen/config.h>
-
-/*
- * These have to be done with inline assembly: that way the bit-setting
- * is guaranteed to be atomic. All bit operations return 0 if the bit
- * was cleared before the operation and != 0 if it was not.
- *
- * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
- */
-
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
-#define ADDR (*(volatile long *) addr)
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void set_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
-}
-
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __inline__ void __set_bit(int nr, volatile void * addr)
-{
- __asm__(
- "btsl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
- * in order to ensure changes are visible on other processors.
- */
-static __inline__ void clear_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
-}
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static __inline__ void __change_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__(
- "btcl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
-}
-
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static __inline__ void change_bit(int nr, volatile void * addr)
-{
- __asm__ __volatile__( LOCK_PREFIX
- "btcl %1,%0"
- :"=m" (ADDR)
- :"Ir" (nr));
-}
-
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int test_and_set_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr) : "memory");
- return oldbit;
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__(
- "btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr));
- return oldbit;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr) : "memory");
- return oldbit;
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__(
- "btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr));
- return oldbit;
-}
-
-/* WARNING: non atomic and it can be reordered! */
-static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__(
- "btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr) : "memory");
- return oldbit;
-}
-
-/**
- * test_and_change_bit - Change a bit and return its new value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static __inline__ int test_and_change_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__( LOCK_PREFIX
- "btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
- :"Ir" (nr) : "memory");
- return oldbit;
-}
-
-
-static __inline__ int constant_test_bit(int nr, const volatile void * addr)
-{
- return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int variable_test_bit(int nr, volatile void * addr)
-{
- int oldbit;
-
- __asm__ __volatile__(
- "btl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit)
- :"m" (ADDR),"Ir" (nr));
- return oldbit;
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-static __inline__ int find_first_zero_bit(void * addr, unsigned size)
-{
- int d0, d1, d2;
- int res;
-
- if (!size)
- return 0;
- /* This looks at memory. Mark it volatile to tell gcc not to move it around */
- __asm__ __volatile__(
- "movl $-1,%%eax\n\t"
- "xorl %%edx,%%edx\n\t"
- "repe; scasl\n\t"
- "je 1f\n\t"
- "xorl -4(%%edi),%%eax\n\t"
- "subl $4,%%edi\n\t"
- "bsfl %%eax,%%edx\n"
- "1:\tsubl %%ebx,%%edi\n\t"
- "shll $3,%%edi\n\t"
- "addl %%edi,%%edx"
- :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
- :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
- return res;
-}
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
-{
- unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
- int set = 0, bit = offset & 31, res;
-
- if (bit) {
- /*
- * Look for zero in first byte
- */
- __asm__("bsfl %1,%0\n\t"
- "jne 1f\n\t"
- "movl $32, %0\n"
- "1:"
- : "=r" (set)
- : "r" (~(*p >> bit)));
- if (set < (32 - bit))
- return set + offset;
- set = 32 - bit;
- p++;
- }
- /*
- * No zero yet, search remaining full bytes for a zero
- */
- res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
- return (offset + set + res);
-}
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static __inline__ unsigned long ffz(unsigned long word)
-{
- __asm__("bsfl %1,%0"
- :"=r" (word)
- :"r" (~word));
- return word;
-}
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static __inline__ int ffs(int x)
-{
- int r;
-
- __asm__("bsfl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "g" (x));
- return r+1;
-}
-
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-#define ext2_set_bit __test_and_set_bit
-#define ext2_clear_bit __test_and_clear_bit
-#define ext2_test_bit test_bit
-#define ext2_find_first_zero_bit find_first_zero_bit
-#define ext2_find_next_zero_bit find_next_zero_bit
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) __set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-
-#endif /* _I386_BITOPS_H */
diff --git a/xen/include/asm-i386/dma.h b/xen/include/asm-i386/dma.h
deleted file mode 100644
index 2d0af85306..0000000000
--- a/xen/include/asm-i386/dma.h
+++ /dev/null
@@ -1,298 +0,0 @@
-/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- */
-
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
-
-#include <xen/config.h>
-#include <xen/spinlock.h> /* And spinlocks */
-#include <asm/io.h> /* need byte IO */
-#include <xen/delay.h>
-
-
-#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
-#define dma_outb outb_p
-#else
-#define dma_outb outb
-#endif
-
-#define dma_inb inb
-
-/*
- * NOTES about DMA transfers:
- *
- * controller 1: channels 0-3, byte operations, ports 00-1F
- * controller 2: channels 4-7, word operations, ports C0-DF
- *
- * - ALL registers are 8 bits only, regardless of transfer size
- * - channel 4 is not used - cascades 1 into 2.
- * - channels 0-3 are byte - addresses/counts are for physical bytes
- * - channels 5-7 are word - addresses/counts are for physical words
- * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- * - transfer count loaded to registers is 1 less than actual count
- * - controller 2 offsets are all even (2x offsets for controller 1)
- * - page registers for 5-7 don't use data bit 0, represent 128K pages
- * - page registers for 0-3 use bit 0, represent 64K pages
- *
- * DMA transfers are limited to the lower 16MB of _physical_ memory.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- * Address mapping for channels 0-3:
- *
- * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * P7 ... P0 A7 ... A0 A7 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Address mapping for channels 5-7:
- *
- * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
- * | ... | \ \ ... \ \ \ ... \ \
- * | ... | \ \ ... \ \ \ ... \ (not used)
- * | ... | \ \ ... \ \ \ ... \
- * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation.
- *
- */
-
-#define MAX_DMA_CHANNELS 8
-
-/* The maximum address that we can perform a DMA transfer to on this platform */
-/*#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)*/
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG 0x08 /* command register (w) */
-#define DMA1_STAT_REG 0x08 /* status register (r) */
-#define DMA1_REQ_REG 0x09 /* request register (w) */
-#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
-#define DMA1_MODE_REG 0x0B /* mode register (w) */
-#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
-#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
-#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
-
-#define DMA2_CMD_REG 0xD0 /* command register (w) */
-#define DMA2_STAT_REG 0xD0 /* status register (r) */
-#define DMA2_REQ_REG 0xD2 /* request register (w) */
-#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
-#define DMA2_MODE_REG 0xD6 /* mode register (w) */
-#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
-#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
-#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
-
-#define DMA_ADDR_0 0x00 /* DMA address registers */
-#define DMA_ADDR_1 0x02
-#define DMA_ADDR_2 0x04
-#define DMA_ADDR_3 0x06
-#define DMA_ADDR_4 0xC0
-#define DMA_ADDR_5 0xC4
-#define DMA_ADDR_6 0xC8
-#define DMA_ADDR_7 0xCC
-
-#define DMA_CNT_0 0x01 /* DMA count registers */
-#define DMA_CNT_1 0x03
-#define DMA_CNT_2 0x05
-#define DMA_CNT_3 0x07
-#define DMA_CNT_4 0xC2
-#define DMA_CNT_5 0xC6
-#define DMA_CNT_6 0xCA
-#define DMA_CNT_7 0xCE
-
-#define DMA_PAGE_0 0x87 /* DMA page registers */
-#define DMA_PAGE_1 0x83
-#define DMA_PAGE_2 0x81
-#define DMA_PAGE_3 0x82
-#define DMA_PAGE_5 0x8B
-#define DMA_PAGE_6 0x89
-#define DMA_PAGE_7 0x8A
-
-#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT 0x10
-
-
-extern spinlock_t dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&dma_spin_lock, flags);
- return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
- spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr, DMA1_MASK_REG);
- else
- dma_outb(dmanr & 3, DMA2_MASK_REG);
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr | 4, DMA1_MASK_REG);
- else
- dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while holding the DMA lock ! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(0, DMA1_CLEAR_FF_REG);
- else
- dma_outb(0, DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
- if (dmanr<=3)
- dma_outb(mode | dmanr, DMA1_MODE_REG);
- else
- dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
-}
-
-/* Set only the page register bits of the transfer address.
- * This is used for successive transfers when we know the contents of
- * the lower 16 bits of the DMA current address register, but a 64k boundary
- * may have been crossed.
- */
-static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
-{
- switch(dmanr) {
- case 0:
- dma_outb(pagenr, DMA_PAGE_0);
- break;
- case 1:
- dma_outb(pagenr, DMA_PAGE_1);
- break;
- case 2:
- dma_outb(pagenr, DMA_PAGE_2);
- break;
- case 3:
- dma_outb(pagenr, DMA_PAGE_3);
- break;
- case 5:
- dma_outb(pagenr & 0xfe, DMA_PAGE_5);
- break;
- case 6:
- dma_outb(pagenr & 0xfe, DMA_PAGE_6);
- break;
- case 7:
- dma_outb(pagenr & 0xfe, DMA_PAGE_7);
- break;
- }
-}
-
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- set_dma_page(dmanr, a>>16);
- if (dmanr <= 3) {
- dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- } else {
- dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- }
-}
-
-
-/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- count--;
- if (dmanr <= 3) {
- dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- } else {
- dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
- : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
- /* using short to get 16-bit wrap around */
- unsigned short count;
-
- count = 1 + dma_inb(io_port);
- count += dma_inb(io_port) << 8;
-
- return (dmanr<=3)? count : (count<<1);
-}
-
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-/* From PCI */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
-#endif /* _ASM_DMA_H */
diff --git a/xen/include/asm-i386/flushtlb.h b/xen/include/asm-i386/flushtlb.h
deleted file mode 100644
index f0d4bb946c..0000000000
--- a/xen/include/asm-i386/flushtlb.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/******************************************************************************
- * flushtlb.h
- *
- * TLB flushes are timestamped using a global virtual 'clock' which ticks
- * on any TLB flush on any processor.
- *
- * Copyright (c) 2003, K A Fraser
- */
-
-#ifndef __FLUSHTLB_H__
-#define __FLUSHTLB_H__
-
-#include <xen/smp.h>
-
-/*
- * Every time the TLB clock passes an "epoch", every CPU's TLB is flushed.
- * Therefore, if the current TLB time and a previously-read timestamp differ
- * in their significant bits (i.e., ~TLBCLOCK_EPOCH_MASK), then the TLB clock
- * has wrapped at least once and every CPU's TLB is guaranteed to have been
- * flushed meanwhile.
- * This allows us to deal gracefully with a bounded (a.k.a. wrapping) clock.
- */
-#define TLBCLOCK_EPOCH_MASK ((1U<<16)-1)
-
-/*
- * 'cpu_stamp' is the current timestamp for the CPU we are testing.
- * 'lastuse_stamp' is a timestamp taken when the PFN we are testing was last
- * used for a purpose that may have caused the CPU's TLB to become tainted.
- */
-static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
-{
- /*
- * Why does this work?
- * 1. XOR sets high-order bits determines if stamps from differing epochs.
- * 2. Subtraction sets high-order bits if 'cpu_stamp > lastuse_stamp'.
- * In either case a flush is unnecessary: we therefore OR the results from
- * (1) and (2), mask the high-order bits, and return the inverse.
- */
- return !(((lastuse_stamp^cpu_stamp)|(lastuse_stamp-cpu_stamp)) &
- ~TLBCLOCK_EPOCH_MASK);
-}
-
-extern u32 tlbflush_clock;
-extern u32 tlbflush_time[NR_CPUS];
-
-extern void tlb_clocktick(void);
-extern void new_tlbflush_clock_period(void);
-
-#endif /* __FLUSHTLB_H__ */
diff --git a/xen/include/asm-i386/hdreg.h b/xen/include/asm-i386/hdreg.h
deleted file mode 100644
index 2c4ca680f6..0000000000
--- a/xen/include/asm-i386/hdreg.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * linux/include/asm-i386/hdreg.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-#ifndef __ASMi386_HDREG_H
-#define __ASMi386_HDREG_H
-
-//typedef unsigned short ide_ioreg_t;
-typedef unsigned long ide_ioreg_t;
-
-#endif /* __ASMi386_HDREG_H */
diff --git a/xen/include/asm-i386/ide.h b/xen/include/asm-i386/ide.h
deleted file mode 100644
index e1b20f0758..0000000000
--- a/xen/include/asm-i386/ide.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * linux/include/asm-i386/ide.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/*
- * This file contains the i386 architecture specific IDE code.
- */
-
-#ifndef __ASMi386_IDE_H
-#define __ASMi386_IDE_H
-
-#ifdef __KERNEL__
-
-#include <xen/config.h>
-
-#ifndef MAX_HWIFS
-# ifdef CONFIG_BLK_DEV_IDEPCI
-#define MAX_HWIFS 10
-# else
-#define MAX_HWIFS 6
-# endif
-#endif
-
-#define ide__sti() __sti()
-
-static __inline__ int ide_default_irq(ide_ioreg_t base)
-{
- switch (base) {
- case 0x1f0: return 14;
- case 0x170: return 15;
- case 0x1e8: return 11;
- case 0x168: return 10;
- case 0x1e0: return 8;
- case 0x160: return 12;
- default:
- return 0;
- }
-}
-
-static __inline__ ide_ioreg_t ide_default_io_base(int index)
-{
- switch (index) {
- case 0: return 0x1f0;
- case 1: return 0x170;
- case 2: return 0x1e8;
- case 3: return 0x168;
- case 4: return 0x1e0;
- case 5: return 0x160;
- default:
- return 0;
- }
-}
-
-static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl_port, int *irq)
-{
- ide_ioreg_t reg = data_port;
- int i;
-
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = reg;
- reg += 1;
- }
- if (ctrl_port) {
- hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
- } else {
- hw->io_ports[IDE_CONTROL_OFFSET] = hw->io_ports[IDE_DATA_OFFSET] + 0x206;
- }
- if (irq != NULL)
- *irq = 0;
- hw->io_ports[IDE_IRQ_OFFSET] = 0;
-}
-
-static __inline__ void ide_init_default_hwifs(void)
-{
-#ifndef CONFIG_BLK_DEV_IDEPCI
- hw_regs_t hw;
- int index;
-
- for(index = 0; index < MAX_HWIFS; index++) {
- ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
- hw.irq = ide_default_irq(ide_default_io_base(index));
- ide_register_hw(&hw, NULL);
- }
-#endif /* CONFIG_BLK_DEV_IDEPCI */
-}
-
-typedef union {
- unsigned all : 8; /* all of the bits together */
- struct {
- unsigned head : 4; /* always zeros here */
- unsigned unit : 1; /* drive select number, 0 or 1 */
- unsigned bit5 : 1; /* always 1 */
- unsigned lba : 1; /* using LBA instead of CHS */
- unsigned bit7 : 1; /* always 1 */
- } b;
-} select_t;
-
-typedef union {
- unsigned all : 8; /* all of the bits together */
- struct {
- unsigned bit0 : 1;
- unsigned nIEN : 1; /* device INTRQ to host */
- unsigned SRST : 1; /* host soft reset bit */
- unsigned bit3 : 1; /* ATA-2 thingy */
- unsigned reserved456 : 3;
- unsigned HOB : 1; /* 48-bit address ordering */
- } b;
-} control_t;
-
-#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
-#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
-#define ide_check_region(from,extent) check_region((from), (extent))
-#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
-#define ide_release_region(from,extent) release_region((from), (extent))
-
-/*
- * The following are not needed for the non-m68k ports
- */
-#define ide_ack_intr(hwif) (1)
-#define ide_fix_driveid(id) do {} while (0)
-#define ide_release_lock(lock) do {} while (0)
-#define ide_get_lock(lock, hdlr, data) do {} while (0)
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASMi386_IDE_H */
diff --git a/xen/include/asm-i386/io.h b/xen/include/asm-i386/io.h
deleted file mode 100644
index f9e8cc936a..0000000000
--- a/xen/include/asm-i386/io.h
+++ /dev/null
@@ -1,284 +0,0 @@
-#ifndef _ASM_IO_H
-#define _ASM_IO_H
-
-#include <xen/config.h>
-#include <asm/page.h>
-
-#define IO_SPACE_LIMIT 0xffff
-
-/*#include <xen/vmalloc.h>*/
-
-/*
- * Temporary debugging check to catch old code using
- * unmapped ISA addresses. Will be removed in 2.4.
- */
-#if CONFIG_DEBUG_IOVIRT
- extern void *__io_virt_debug(unsigned long x, const char *file, int line);
- extern unsigned long __io_phys_debug(unsigned long x, const char *file, int line);
- #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__)
-//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__)
-#else
- #define __io_virt(x) ((void *)(x))
-//#define __io_phys(x) __pa(x)
-#endif
-
-
-/**
- * virt_to_phys - map virtual addresses to physical
- * @address: address to remap
- *
- * The returned physical address is the physical (CPU) mapping for
- * the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
- *
- * This function does not give bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline unsigned long virt_to_phys(volatile void * address)
-{
- return __pa(address);
-}
-
-/**
- * phys_to_virt - map physical address to virtual
- * @address: address to remap
- *
- * The returned virtual address is a current CPU mapping for
- * the memory address given. It is only valid to use this function on
- * addresses that have a kernel mapping
- *
- * This function does not handle bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline void * phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-
-/*
- * Change "struct pfn_info" to physical address.
- */
-#ifdef CONFIG_HIGHMEM64G
-#define page_to_phys(page) ((u64)(page - frame_table) << PAGE_SHIFT)
-#else
-#define page_to_phys(page) ((page - frame_table) << PAGE_SHIFT)
-#endif
-
-#define page_to_pfn(_page) ((unsigned long)((_page) - frame_table))
-#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
-
-
-extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-
-static inline void * ioremap (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, 0);
-}
-
-/*
- * This one maps high address device memory and turns off caching for that area.
- * it's useful if some control registers are in such an area and write combining
- * or read caching is not desirable:
- */
-static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, _PAGE_PCD);
-}
-
-extern void iounmap(void *addr);
-
-/*
- * IO bus memory addresses are also 1:1 with the physical address
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-#define page_to_bus page_to_phys
-
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
- */
-
-#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
-#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
-#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-
-#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
-#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
-#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-#define memset_io(a,b,c) memset(__io_virt(a),(b),(c))
-#define memcpy_fromio(a,b,c) memcpy((a),__io_virt(b),(c))
-#define memcpy_toio(a,b,c) memcpy(__io_virt(a),(b),(c))
-
-/*
- * ISA space is 'always mapped' on a typical x86 system, no need to
- * explicitly ioremap() it. The fact that the ISA IO space is mapped
- * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
- * are physical addresses. The following constant pointer can be
- * used as the IO-area pointer (it can be iounmapped as well, so the
- * analogy with PCI is quite large):
- */
-#define __ISA_IO_base ((char *)(PAGE_OFFSET))
-
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
-#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
-
-
-/*
- * Again, i386 does not require mem IO specific function.
- */
-
-#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d))
-
-static inline int check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-static inline int isa_check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (isa_readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-/*
- * Cache management
- *
- * This needed for two cases
- * 1. Out of order aware processors
- * 2. Accidentally out of order processors (PPro errata #51)
- */
-
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
-
-static inline void flush_write_buffers(void)
-{
- __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
-}
-
-#define dma_cache_inv(_start,_size) flush_write_buffers()
-#define dma_cache_wback(_start,_size) flush_write_buffers()
-#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
-
-#else
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-#define flush_write_buffers()
-
-#endif
-
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
-#else
-#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-#endif
-
-#ifdef REALLY_SLOW_IO
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-#else
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
-
-
-/*
- * Talk about misusing macros..
- */
-#define __OUT1(s,x) \
-static inline void out##s(unsigned x value, unsigned short port) {
-
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
-
-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));}
-
-#define __IN1(s) \
-static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
-
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
-
-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; }
-
-#define __INS(s) \
-static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define __OUTS(s) \
-static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define RETURN_TYPE unsigned char
-__IN(b,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned short
-__IN(w,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned int
-__IN(l,"")
-#undef RETURN_TYPE
-
-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
-
-__INS(b)
-__INS(w)
-__INS(l)
-
-__OUTS(b)
-__OUTS(w)
-__OUTS(l)
-
-#endif
diff --git a/xen/include/asm-i386/msr.h b/xen/include/asm-i386/msr.h
deleted file mode 100644
index 45ec765e6e..0000000000
--- a/xen/include/asm-i386/msr.h
+++ /dev/null
@@ -1,121 +0,0 @@
-#ifndef __ASM_MSR_H
-#define __ASM_MSR_H
-
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-
-#define rdmsr(msr,val1,val2) \
- __asm__ __volatile__("rdmsr" \
- : "=a" (val1), "=d" (val2) \
- : "c" (msr))
-
-#define wrmsr(msr,val1,val2) \
- __asm__ __volatile__("wrmsr" \
- : /* no outputs */ \
- : "c" (msr), "a" (val1), "d" (val2))
-
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdtscl(low) \
- __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
-
-#define rdtscll(val) \
- __asm__ __volatile__("rdtsc" : "=A" (val))
-
-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
-
-#define rdpmc(counter,low,high) \
- __asm__ __volatile__("rdpmc" \
- : "=a" (low), "=d" (high) \
- : "c" (counter))
-
-/* symbolic names for some interesting MSRs */
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0
-#define MSR_IA32_P5_MC_TYPE 1
-#define MSR_IA32_PLATFORM_ID 0x17
-#define MSR_IA32_EBL_CR_POWERON 0x2a
-
-#define MSR_IA32_APICBASE 0x1b
-#define MSR_IA32_APICBASE_BSP (1<<8)
-#define MSR_IA32_APICBASE_ENABLE (1<<11)
-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-
-#define MSR_IA32_UCODE_WRITE 0x79
-#define MSR_IA32_UCODE_REV 0x8b
-
-#define MSR_IA32_BBL_CR_CTL 0x119
-
-#define MSR_IA32_MCG_CAP 0x179
-#define MSR_IA32_MCG_STATUS 0x17a
-#define MSR_IA32_MCG_CTL 0x17b
-
-#define MSR_IA32_THERM_CONTROL 0x19a
-#define MSR_IA32_THERM_INTERRUPT 0x19b
-#define MSR_IA32_THERM_STATUS 0x19c
-#define MSR_IA32_MISC_ENABLE 0x1a0
-
-#define MSR_IA32_DEBUGCTLMSR 0x1d9
-#define MSR_IA32_LASTBRANCHFROMIP 0x1db
-#define MSR_IA32_LASTBRANCHTOIP 0x1dc
-#define MSR_IA32_LASTINTFROMIP 0x1dd
-#define MSR_IA32_LASTINTTOIP 0x1de
-
-#define MSR_IA32_MC0_CTL 0x400
-#define MSR_IA32_MC0_STATUS 0x401
-#define MSR_IA32_MC0_ADDR 0x402
-#define MSR_IA32_MC0_MISC 0x403
-
-#define MSR_P6_PERFCTR0 0xc1
-#define MSR_P6_PERFCTR1 0xc2
-#define MSR_P6_EVNTSEL0 0x186
-#define MSR_P6_EVNTSEL1 0x187
-
-/* AMD Defined MSRs */
-#define MSR_K6_EFER 0xC0000080
-#define MSR_K6_STAR 0xC0000081
-#define MSR_K6_WHCR 0xC0000082
-#define MSR_K6_UWCCR 0xC0000085
-#define MSR_K6_EPMR 0xC0000086
-#define MSR_K6_PSOR 0xC0000087
-#define MSR_K6_PFIR 0xC0000088
-
-#define MSR_K7_EVNTSEL0 0xC0010000
-#define MSR_K7_PERFCTR0 0xC0010004
-#define MSR_K7_HWCR 0xC0010015
-#define MSR_K7_CLK_CTL 0xC001001b
-#define MSR_K7_FID_VID_CTL 0xC0010041
-#define MSR_K7_VID_STATUS 0xC0010042
-
-/* Centaur-Hauls/IDT defined MSRs. */
-#define MSR_IDT_FCR1 0x107
-#define MSR_IDT_FCR2 0x108
-#define MSR_IDT_FCR3 0x109
-#define MSR_IDT_FCR4 0x10a
-
-#define MSR_IDT_MCR0 0x110
-#define MSR_IDT_MCR1 0x111
-#define MSR_IDT_MCR2 0x112
-#define MSR_IDT_MCR3 0x113
-#define MSR_IDT_MCR4 0x114
-#define MSR_IDT_MCR5 0x115
-#define MSR_IDT_MCR6 0x116
-#define MSR_IDT_MCR7 0x117
-#define MSR_IDT_MCR_CTRL 0x120
-
-/* VIA Cyrix defined MSRs*/
-#define MSR_VIA_FCR 0x1107
-#define MSR_VIA_LONGHAUL 0x110a
-#define MSR_VIA_BCR2 0x1147
-
-/* Transmeta defined MSRs */
-#define MSR_TMTA_LONGRUN_CTRL 0x80868010
-#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
-#define MSR_TMTA_LRTI_READOUT 0x80868018
-#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
-
-#endif /* __ASM_MSR_H */
diff --git a/xen/include/asm-i386/pci.h b/xen/include/asm-i386/pci.h
deleted file mode 100644
index 34c69c7aa4..0000000000
--- a/xen/include/asm-i386/pci.h
+++ /dev/null
@@ -1,292 +0,0 @@
-#ifndef __i386_PCI_H
-#define __i386_PCI_H
-
-#include <xen/config.h>
-
-#ifdef __KERNEL__
-
-/* Can be used to override the logic in pci_scan_bus for skipping
- already-configured bus numbers - to be used for buggy BIOSes
- or architectures with incomplete PCI setup by the loader */
-
-#ifdef CONFIG_PCI
-extern unsigned int pcibios_assign_all_busses(void);
-#else
-#define pcibios_assign_all_busses() 0
-#endif
-#define pcibios_scan_all_fns() 0
-
-extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM (pci_mem_start)
-
-void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
-extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-
-void pcibios_set_master(struct pci_dev *dev);
-void pcibios_penalize_isa_irq(int irq);
-struct irq_routing_table *pcibios_get_irq_routing_table(void);
-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
-/* Dynamic DMA mapping stuff.
- * i386 has everything mapped statically.
- */
-
-#include <xen/types.h>
-#include <xen/slab.h>
-#include <asm/scatterlist.h>
-/*#include <xen/string.h>*/
-#include <asm/io.h>
-
-struct pci_dev;
-
-/* The PCI address space does equal the physical memory
- * address space. The networking and block device layers use
- * this boolean for bounce buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS (1)
-
-/* Allocate and map kernel buffer using consistent mode DMA for a device.
- * hwdev should be valid struct pci_dev pointer for PCI devices,
- * NULL for PCI-like buses (ISA, EISA).
- * Returns non-NULL cpu-view pointer to the buffer if successful and
- * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
- * is undefined.
- */
-extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle);
-
-/* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
- *
- * References to the memory and mappings associated with cpu_addr/dma_addr
- * past this call are illegal.
- */
-extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-/* Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single is performed.
- */
-static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
- return virt_to_bus(ptr);
-}
-
-/* Unmap a single streaming mode DMA translation. The dma_addr and size
- * must match what was provided for in a previous pci_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guarenteed to see
- * whatever the device wrote there.
- */
-static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/*
- * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
- * to pci_map_single, but takes a struct pfn_info instead of a virtual address
- */
-static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct pfn_info *page,
- unsigned long offset, size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-
- return (dma_addr_t)(page - frame_table) * PAGE_SIZE + offset;
-}
-
-static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/* pci_unmap_{page,single} is a nop so... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME) (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scather-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- int i;
-
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-
- /*
- * temporary 2.4 hack
- */
- for (i = 0; i < nents; i++ ) {
- if (sg[i].address && sg[i].page)
- out_of_line_bug();
-#if 0
- /* Invalid check, since address==0 is valid. */
- else if (!sg[i].address && !sg[i].page)
- out_of_line_bug();
-#endif
-
- /* XXX Switched round, since address==0 is valid. */
- if (sg[i].page)
- sg[i].dma_address = page_to_bus(sg[i].page) + sg[i].offset;
- else
- sg[i].dma_address = virt_to_bus(sg[i].address);
- }
-
- flush_write_buffers();
- return nents;
-}
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, the
- * device again owns the buffer.
- */
-static inline void pci_dma_sync_single(struct pci_dev *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
-}
-
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single but for a scatter-gather list,
- * same rules and usage.
- */
-static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
-}
-
-/* Return whether the given PCI device DMA address mask can
- * be supported properly. For example, if your device can
- * only drive the low 24-bits during PCI bus mastering, then
- * you would pass 0x00ffffff as the mask to this function.
- */
-static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
-
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-
-static __inline__ dma64_addr_t
-pci_dac_page_to_dma(struct pci_dev *pdev, struct pfn_info *page, unsigned long offset, int direction)
-{
- return ((dma64_addr_t) page_to_bus(page) +
- (dma64_addr_t) offset);
-}
-
-static __inline__ struct pfn_info *
-pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- unsigned long poff = (dma_addr >> PAGE_SHIFT);
-
- return frame_table + poff;
-}
-
-static __inline__ unsigned long
-pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return (dma_addr & ~PAGE_MASK);
-}
-
-static __inline__ void
-pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
- flush_write_buffers();
-}
-
-/* These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
-/* Return the index of the PCI controller for device. */
-static inline int pci_controller_num(struct pci_dev *dev)
-{
- return 0;
-}
-
-#if 0 /* XXX Not in land of Xen XXX */
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* __i386_PCI_H */
diff --git a/xen/include/asm-i386/pgalloc.h b/xen/include/asm-i386/pgalloc.h
deleted file mode 100644
index db1d6b948e..0000000000
--- a/xen/include/asm-i386/pgalloc.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef _I386_PGALLOC_H
-#define _I386_PGALLOC_H
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-
-#define pgd_quicklist (current_cpu_data.pgd_quick)
-#define pmd_quicklist (current_cpu_data.pmd_quick)
-#define pte_quicklist (current_cpu_data.pte_quick)
-#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
-
-
-/*
- * Allocate and free page tables.
- */
-
-
-#define pte_free(pte) pte_free_fast(pte)
-#define pgd_alloc(mm) get_pgd_fast()
-#define pgd_free(pgd) free_pgd_fast(pgd)
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- * (In the PAE case we free the pmds as part of the pgd.)
- */
-
-#define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
-#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free_slow(x) do { } while (0)
-#define pmd_free_fast(x) do { } while (0)
-#define pmd_free(x) do { } while (0)
-#define pgd_populate(mm, pmd, pte) BUG()
-
-/*
- * TLB flushing:
- *
- * - flush_tlb() flushes the current mm struct TLBs
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- *
- * ..but the i386 has somewhat limited tlb flushing capabilities,
- * and page-granular flushes are available only on i486 and up.
- */
-
-#ifndef CONFIG_SMP
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb()
-#define flush_tlb_all_pge() __flush_tlb_pge()
-#define local_flush_tlb() __flush_tlb()
-#define flush_tlb_cpu(_cpu) __flush_tlb()
-#define flush_tlb_mask(_mask) __flush_tlb()
-#define try_flush_tlb_mask(_mask) __flush_tlb()
-
-#else
-
-#include <xen/smp.h>
-
-extern int try_flush_tlb_mask(unsigned long mask);
-extern void flush_tlb_mask(unsigned long mask);
-extern void flush_tlb_all_pge(void);
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() flush_tlb_mask((1 << smp_num_cpus) - 1)
-#define local_flush_tlb() __flush_tlb()
-#define flush_tlb_cpu(_cpu) flush_tlb_mask(1 << (_cpu))
-
-#endif
-
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* i386 does not keep any page table caches in TLB */
-}
-
-#endif /* _I386_PGALLOC_H */
diff --git a/xen/include/asm-i386/scatterlist.h b/xen/include/asm-i386/scatterlist.h
deleted file mode 100644
index 9d858415db..0000000000
--- a/xen/include/asm-i386/scatterlist.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _I386_SCATTERLIST_H
-#define _I386_SCATTERLIST_H
-
-struct scatterlist {
- char * address; /* Location data is to be transferred to, NULL for
- * highmem page */
- struct pfn_info * page; /* Location for highmem page, if any */
- unsigned int offset;/* for highmem, page offset */
-
- dma_addr_t dma_address;
- unsigned int length;
-};
-
-#define ISA_DMA_THRESHOLD (0x00ffffff)
-
-#endif /* !(_I386_SCATTERLIST_H) */
diff --git a/xen/include/asm-i386/acpi.h b/xen/include/asm-x86/acpi.h
index 4d750d486f..4d750d486f 100644
--- a/xen/include/asm-i386/acpi.h
+++ b/xen/include/asm-x86/acpi.h
diff --git a/xen/include/asm-i386/apic.h b/xen/include/asm-x86/apic.h
index f97e0b32d8..589692a2fd 100644
--- a/xen/include/asm-i386/apic.h
+++ b/xen/include/asm-x86/apic.h
@@ -20,19 +20,19 @@
* Basic functions accessing APICs.
*/
-static __inline void apic_write(unsigned long reg, unsigned long v)
+static __inline void apic_write(unsigned long reg, u32 v)
{
- *((volatile unsigned long *)(APIC_BASE+reg)) = v;
+ *((volatile u32 *)(APIC_BASE+reg)) = v;
}
-static __inline void apic_write_atomic(unsigned long reg, unsigned long v)
+static __inline void apic_write_atomic(unsigned long reg, u32 v)
{
- xchg((volatile unsigned long *)(APIC_BASE+reg), v);
+ xchg((volatile u32 *)(APIC_BASE+reg), v);
}
-static __inline unsigned long apic_read(unsigned long reg)
+static __inline u32 apic_read(unsigned long reg)
{
- return *((volatile unsigned long *)(APIC_BASE+reg));
+ return *((volatile u32 *)(APIC_BASE+reg));
}
static __inline__ void apic_wait_icr_idle(void)
diff --git a/xen/include/asm-i386/apicdef.h b/xen/include/asm-x86/apicdef.h
index 9f07409b3f..9f07409b3f 100644
--- a/xen/include/asm-i386/apicdef.h
+++ b/xen/include/asm-x86/apicdef.h
diff --git a/xen/include/asm-i386/atomic.h b/xen/include/asm-x86/atomic.h
index c9f2e32763..b64adaedba 100644
--- a/xen/include/asm-i386/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -1,5 +1,5 @@
-#ifndef __ARCH_I386_ATOMIC__
-#define __ARCH_I386_ATOMIC__
+#ifndef __ARCH_X86_ATOMIC__
+#define __ARCH_X86_ATOMIC__
#include <xen/config.h>
@@ -192,4 +192,4 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#endif
+#endif /* __ARCH_X86_ATOMIC__ */
diff --git a/xen/include/asm-x86_64/bitops.h b/xen/include/asm-x86/bitops.h
index 45209b11bd..58ae424e54 100644
--- a/xen/include/asm-x86_64/bitops.h
+++ b/xen/include/asm-x86/bitops.h
@@ -1,5 +1,5 @@
-#ifndef _X86_64_BITOPS_H
-#define _X86_64_BITOPS_H
+#ifndef _X86_BITOPS_H
+#define _X86_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
@@ -36,7 +36,7 @@
static __inline__ void set_bit(long nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
- "btsq %1,%0"
+ "bts"__OS" %1,%0"
:"=m" (ADDR)
:"dIr" (nr));
}
@@ -53,7 +53,7 @@ static __inline__ void set_bit(long nr, volatile void * addr)
static __inline__ void __set_bit(long nr, volatile void * addr)
{
__asm__(
- "btsq %1,%0"
+ "bts"__OS" %1,%0"
:"=m" (ADDR)
:"dIr" (nr));
}
@@ -71,7 +71,7 @@ static __inline__ void __set_bit(long nr, volatile void * addr)
static __inline__ void clear_bit(long nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
- "btrq %1,%0"
+ "btr"__OS" %1,%0"
:"=m" (ADDR)
:"dIr" (nr));
}
@@ -90,7 +90,7 @@ static __inline__ void clear_bit(long nr, volatile void * addr)
static __inline__ void __change_bit(long nr, volatile void * addr)
{
__asm__ __volatile__(
- "btcq %1,%0"
+ "btc"__OS" %1,%0"
:"=m" (ADDR)
:"dIr" (nr));
}
@@ -107,7 +107,7 @@ static __inline__ void __change_bit(long nr, volatile void * addr)
static __inline__ void change_bit(long nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
- "btcq %1,%0"
+ "btc"__OS" %1,%0"
:"=m" (ADDR)
:"dIr" (nr));
}
@@ -122,10 +122,10 @@ static __inline__ void change_bit(long nr, volatile void * addr)
*/
static __inline__ int test_and_set_bit(long nr, volatile void * addr)
{
- long oldbit;
+ long oldbit;
__asm__ __volatile__( LOCK_PREFIX
- "btsq %2,%1\n\tsbbq %0,%0"
+ "bts"__OS" %2,%1\n\tsbb"__OS" %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
@@ -145,7 +145,7 @@ static __inline__ int __test_and_set_bit(long nr, volatile void * addr)
long oldbit;
__asm__(
- "btsq %2,%1\n\tsbbq %0,%0"
+ "bts"__OS" %2,%1\n\tsbb"__OS" %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"dIr" (nr));
return oldbit;
@@ -164,7 +164,7 @@ static __inline__ int test_and_clear_bit(long nr, volatile void * addr)
long oldbit;
__asm__ __volatile__( LOCK_PREFIX
- "btrq %2,%1\n\tsbbq %0,%0"
+ "btr"__OS" %2,%1\n\tsbb"__OS" %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
@@ -184,7 +184,7 @@ static __inline__ int __test_and_clear_bit(long nr, volatile void * addr)
long oldbit;
__asm__(
- "btrq %2,%1\n\tsbbq %0,%0"
+ "btr"__OS" %2,%1\n\tsbb"__OS" %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"dIr" (nr));
return oldbit;
@@ -196,7 +196,7 @@ static __inline__ int __test_and_change_bit(long nr, volatile void * addr)
long oldbit;
__asm__ __volatile__(
- "btcq %2,%1\n\tsbbq %0,%0"
+ "btc"__OS" %2,%1\n\tsbb"__OS" %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
@@ -215,20 +215,12 @@ static __inline__ int test_and_change_bit(long nr, volatile void * addr)
long oldbit;
__asm__ __volatile__( LOCK_PREFIX
- "btcq %2,%1\n\tsbbq %0,%0"
+ "btc"__OS" %2,%1\n\tsbb"__OS" %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
-#if 0 /* Fool kernel-doc since it doesn't do macros yet */
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static int test_bit(int nr, const volatile void * addr);
-#endif
static __inline__ int constant_test_bit(long nr, const volatile void * addr)
{
@@ -240,7 +232,7 @@ static __inline__ int variable_test_bit(long nr, volatile void * addr)
long oldbit;
__asm__ __volatile__(
- "btq %2,%1\n\tsbbq %0,%0"
+ "bt"__OS" %2,%1\n\tsbb"__OS" %0,%0"
:"=r" (oldbit)
:"m" (ADDR),"dIr" (nr));
return oldbit;
@@ -271,12 +263,12 @@ static __inline__ int find_first_zero_bit(void * addr, unsigned size)
"xorl %%edx,%%edx\n\t"
"repe; scasl\n\t"
"je 1f\n\t"
- "xorl -4(%%rdi),%%eax\n\t"
- "subq $4,%%rdi\n\t"
+ "xorl -4(%%"__OP"di),%%eax\n\t"
+ "sub"__OS" $4,%%"__OP"di\n\t"
"bsfl %%eax,%%edx\n"
- "1:\tsubq %%rbx,%%rdi\n\t"
- "shlq $3,%%rdi\n\t"
- "addq %%rdi,%%rdx"
+ "1:\tsub"__OS" %%"__OP"bx,%%"__OP"di\n\t"
+ "shl"__OS" $3,%%"__OP"di\n\t"
+ "add"__OS" %%"__OP"di,%%"__OP"dx"
:"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
:"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
return res;
@@ -315,32 +307,6 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
return (offset + set + res);
}
-/*
- * Find string of zero bits in a bitmap. -1 when not found.
- */
-extern unsigned long
-find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
-
-static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, bitmap);
- i++;
- }
-}
-
-static inline void clear_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- clear_bit(i, bitmap);
- i++;
- }
-}
-
/**
* ffz - find first zero in word.
* @word: The word to search
@@ -349,13 +315,12 @@ static inline void clear_bit_string(unsigned long *bitmap, unsigned long i,
*/
static __inline__ unsigned long ffz(unsigned long word)
{
- __asm__("bsfq %1,%0"
+ __asm__("bsf"__OS" %1,%0"
:"=r" (word)
:"r" (~word));
return word;
}
-
/**
* ffs - find first bit set
* @x: the word to search
@@ -386,8 +351,6 @@ static __inline__ int ffs(int x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
-
-
#define ext2_set_bit __test_and_set_bit
#define ext2_clear_bit __test_and_clear_bit
#define ext2_test_bit test_bit
@@ -401,5 +364,4 @@ static __inline__ int ffs(int x)
#define minix_test_bit(nr,addr) test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
-
-#endif /* _X86_64_BITOPS_H */
+#endif /* _X86_BITOPS_H */
diff --git a/xen/include/asm-i386/cache.h b/xen/include/asm-x86/cache.h
index db954a06ed..2539a6f240 100644
--- a/xen/include/asm-i386/cache.h
+++ b/xen/include/asm-x86/cache.h
@@ -1,8 +1,8 @@
/*
- * include/asm-i386/cache.h
+ * include/asm-x86/cache.h
*/
-#ifndef __ARCH_I386_CACHE_H
-#define __ARCH_I386_CACHE_H
+#ifndef __ARCH_X86_CACHE_H
+#define __ARCH_X86_CACHE_H
#include <xen/config.h>
diff --git a/xen/include/asm-i386/config.h b/xen/include/asm-x86/config.h
index bd3532e773..309cfed5e0 100644
--- a/xen/include/asm-i386/config.h
+++ b/xen/include/asm-x86/config.h
@@ -143,4 +143,8 @@ extern void __out_of_line_bug(int line) __attribute__((noreturn));
#define out_of_line_bug() __out_of_line_bug(__LINE__)
#endif /* __ASSEMBLY__ */
+/* For generic assembly code: use macros to define operation/operand sizes. */
+#define __OS "l" /* Operation Suffix */
+#define __OP "e" /* Operand Prefix */
+
#endif /* __XEN_I386_CONFIG_H__ */
diff --git a/xen/include/asm-i386/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index f78a86891e..8b2e913bff 100644
--- a/xen/include/asm-i386/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -4,8 +4,8 @@
* Defines x86 CPU feature bits
*/
-#ifndef __ASM_I386_CPUFEATURE_H
-#define __ASM_I386_CPUFEATURE_H
+#ifndef __ASM_X86_CPUFEATURE_H
+#define __ASM_X86_CPUFEATURE_H
/* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */
#define CPU_FEATURE_P(CAP, FEATURE) test_bit(CAP, X86_FEATURE_##FEATURE ##_BIT)
@@ -101,11 +101,4 @@
#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
-#endif /* __ASM_I386_CPUFEATURE_H */
-
-/*
- * Local Variables:
- * mode:c
- * comment-column:42
- * End:
- */
+#endif /* __ASM_X86_CPUFEATURE_H */
diff --git a/xen/include/asm-i386/current.h b/xen/include/asm-x86/current.h
index ee5b4b8516..fdbc373b92 100644
--- a/xen/include/asm-i386/current.h
+++ b/xen/include/asm-x86/current.h
@@ -1,5 +1,5 @@
-#ifndef _I386_CURRENT_H
-#define _I386_CURRENT_H
+#ifndef _X86_CURRENT_H
+#define _X86_CURRENT_H
struct task_struct;
@@ -49,4 +49,4 @@ static inline unsigned long get_stack_top(void)
"i" (STACK_SIZE-STACK_RESERVED) )
-#endif /* !(_I386_CURRENT_H) */
+#endif /* _X86_CURRENT_H */
diff --git a/xen/include/asm-i386/debugreg.h b/xen/include/asm-x86/debugreg.h
index f0b2b06ae0..7ca7deced7 100644
--- a/xen/include/asm-i386/debugreg.h
+++ b/xen/include/asm-x86/debugreg.h
@@ -1,5 +1,5 @@
-#ifndef _I386_DEBUGREG_H
-#define _I386_DEBUGREG_H
+#ifndef _X86_DEBUGREG_H
+#define _X86_DEBUGREG_H
/* Indicate the register numbers for a number of the specific
@@ -57,8 +57,8 @@
We can slow the instruction pipeline for instructions coming via the
gdt or the ldt if we want to. I am not sure why this is an advantage */
-#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
+#define DR_CONTROL_RESERVED (~0xFFFF03FFUL) /* Reserved by Intel */
#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
-#endif
+#endif /* _X86_DEBUGREG_H */
diff --git a/xen/include/asm-i386/delay.h b/xen/include/asm-x86/delay.h
index 9e0adb4a27..b294c41918 100644
--- a/xen/include/asm-i386/delay.h
+++ b/xen/include/asm-x86/delay.h
@@ -1,5 +1,5 @@
-#ifndef _I386_DELAY_H
-#define _I386_DELAY_H
+#ifndef _X86_DELAY_H
+#define _X86_DELAY_H
/*
* Copyright (C) 1993 Linus Torvalds
@@ -11,4 +11,4 @@ extern unsigned long ticks_per_usec;
extern void __udelay(unsigned long usecs);
#define udelay(n) __udelay(n)
-#endif /* defined(_I386_DELAY_H) */
+#endif /* defined(_X86_DELAY_H) */
diff --git a/xen/include/asm-i386/desc.h b/xen/include/asm-x86/desc.h
index 780f9c8728..780f9c8728 100644
--- a/xen/include/asm-i386/desc.h
+++ b/xen/include/asm-x86/desc.h
diff --git a/xen/include/asm-i386/div64.h b/xen/include/asm-x86/div64.h
index ef915df700..ef915df700 100644
--- a/xen/include/asm-i386/div64.h
+++ b/xen/include/asm-x86/div64.h
diff --git a/xen/include/asm-i386/domain_page.h b/xen/include/asm-x86/domain_page.h
index d8cdf0b74e..d8cdf0b74e 100644
--- a/xen/include/asm-i386/domain_page.h
+++ b/xen/include/asm-x86/domain_page.h
diff --git a/xen/include/asm-i386/fixmap.h b/xen/include/asm-x86/fixmap.h
index fcfa97aee9..fcfa97aee9 100644
--- a/xen/include/asm-i386/fixmap.h
+++ b/xen/include/asm-x86/fixmap.h
diff --git a/xen/include/asm-x86_64/flushtlb.h b/xen/include/asm-x86/flushtlb.h
index f0d4bb946c..5cc60f4368 100644
--- a/xen/include/asm-x86_64/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -10,6 +10,7 @@
#ifndef __FLUSHTLB_H__
#define __FLUSHTLB_H__
+#include <xen/config.h>
#include <xen/smp.h>
/*
@@ -46,4 +47,40 @@ extern u32 tlbflush_time[NR_CPUS];
extern void tlb_clocktick(void);
extern void new_tlbflush_clock_period(void);
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() __flush_tlb()
+#define flush_tlb_all_pge() __flush_tlb_pge()
+#define local_flush_tlb() __flush_tlb()
+#define flush_tlb_cpu(_cpu) __flush_tlb()
+#define flush_tlb_mask(_mask) __flush_tlb()
+#define try_flush_tlb_mask(_mask) __flush_tlb()
+
+#else
+
+#include <xen/smp.h>
+
+extern int try_flush_tlb_mask(unsigned long mask);
+extern void flush_tlb_mask(unsigned long mask);
+extern void flush_tlb_all_pge(void);
+
+#define flush_tlb() __flush_tlb()
+#define flush_tlb_all() flush_tlb_mask((1 << smp_num_cpus) - 1)
+#define local_flush_tlb() __flush_tlb()
+#define flush_tlb_cpu(_cpu) flush_tlb_mask(1 << (_cpu))
+
+#endif
+
#endif /* __FLUSHTLB_H__ */
diff --git a/xen/include/asm-i386/hardirq.h b/xen/include/asm-x86/hardirq.h
index 5b3cb77c91..5b3cb77c91 100644
--- a/xen/include/asm-i386/hardirq.h
+++ b/xen/include/asm-x86/hardirq.h
diff --git a/xen/include/asm-i386/i387.h b/xen/include/asm-x86/i387.h
index 95a6bb6cde..95a6bb6cde 100644
--- a/xen/include/asm-i386/i387.h
+++ b/xen/include/asm-x86/i387.h
diff --git a/xen/include/asm-x86/io.h b/xen/include/asm-x86/io.h
new file mode 100644
index 0000000000..c88648aa76
--- /dev/null
+++ b/xen/include/asm-x86/io.h
@@ -0,0 +1,99 @@
+#ifndef _ASM_IO_H
+#define _ASM_IO_H
+
+#include <xen/config.h>
+#include <asm/page.h>
+
+#define IO_SPACE_LIMIT 0xffff
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline unsigned long virt_to_phys(volatile void * address)
+{
+ return __pa(address);
+}
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline void * phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+/*
+ * Change "struct pfn_info" to physical address.
+ */
+#ifdef CONFIG_HIGHMEM64G
+#define page_to_phys(page) ((u64)(page - frame_table) << PAGE_SHIFT)
+#else
+#define page_to_phys(page) ((page - frame_table) << PAGE_SHIFT)
+#endif
+
+#define page_to_pfn(_page) ((unsigned long)((_page) - frame_table))
+#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
+
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+#define page_to_bus page_to_phys
+
+#define __OUT1(s,x) \
+static inline void out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port));}
+
+#define __IN1(s) \
+static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; }
+
+#define RETURN_TYPE unsigned char
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+#endif
diff --git a/xen/include/asm-i386/io_apic.h b/xen/include/asm-x86/io_apic.h
index 8b94875891..8b94875891 100644
--- a/xen/include/asm-i386/io_apic.h
+++ b/xen/include/asm-x86/io_apic.h
diff --git a/xen/include/asm-i386/irq.h b/xen/include/asm-x86/irq.h
index 2c7c67a0da..2c7c67a0da 100644
--- a/xen/include/asm-i386/irq.h
+++ b/xen/include/asm-x86/irq.h
diff --git a/xen/include/asm-i386/ldt.h b/xen/include/asm-x86/ldt.h
index 4da2a15afc..4da2a15afc 100644
--- a/xen/include/asm-i386/ldt.h
+++ b/xen/include/asm-x86/ldt.h
diff --git a/xen/include/asm-i386/mc146818rtc.h b/xen/include/asm-x86/mc146818rtc.h
index 8758528f7c..8758528f7c 100644
--- a/xen/include/asm-i386/mc146818rtc.h
+++ b/xen/include/asm-x86/mc146818rtc.h
diff --git a/xen/include/asm-i386/mpspec.h b/xen/include/asm-x86/mpspec.h
index 7a20710c2a..1e73671c25 100644
--- a/xen/include/asm-i386/mpspec.h
+++ b/xen/include/asm-x86/mpspec.h
@@ -31,7 +31,7 @@
struct intel_mp_floating
{
char mpf_signature[4]; /* "_MP_" */
- unsigned long mpf_physptr; /* Configuration table address */
+ unsigned int mpf_physptr; /* Configuration table address */
unsigned char mpf_length; /* Our length (paragraphs) */
unsigned char mpf_specification;/* Specification version */
unsigned char mpf_checksum; /* Checksum (makes sum 0) */
@@ -51,11 +51,11 @@ struct mp_config_table
char mpc_checksum;
char mpc_oem[8];
char mpc_productid[12];
- unsigned long mpc_oemptr; /* 0 if not present */
+ unsigned int mpc_oemptr; /* 0 if not present */
unsigned short mpc_oemsize; /* 0 if not present */
unsigned short mpc_oemcount;
- unsigned long mpc_lapic; /* APIC address */
- unsigned long reserved;
+ unsigned int mpc_lapic; /* APIC address */
+ unsigned int reserved;
};
/* Followed by entries */
@@ -75,12 +75,12 @@ struct mpc_config_processor
unsigned char mpc_cpuflag;
#define CPU_ENABLED 1 /* Processor is available */
#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
- unsigned long mpc_cpufeature;
+ unsigned int mpc_cpufeature;
#define CPU_STEPPING_MASK 0x0F
#define CPU_MODEL_MASK 0xF0
#define CPU_FAMILY_MASK 0xF00
- unsigned long mpc_featureflag; /* CPUID feature value */
- unsigned long mpc_reserved[2];
+ unsigned int mpc_featureflag; /* CPUID feature value */
+ unsigned int mpc_reserved[2];
};
struct mpc_config_bus
@@ -117,7 +117,7 @@ struct mpc_config_ioapic
unsigned char mpc_apicver;
unsigned char mpc_flags;
#define MPC_APIC_USABLE 0x01
- unsigned long mpc_apicaddr;
+ unsigned int mpc_apicaddr;
};
struct mpc_config_intsrc
diff --git a/xen/include/asm-x86_64/msr.h b/xen/include/asm-x86/msr.h
index f630034630..2a938479b9 100644
--- a/xen/include/asm-x86_64/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -1,7 +1,6 @@
-#ifndef X86_64_MSR_H
-#define X86_64_MSR_H 1
+#ifndef __ASM_MSR_H
+#define __ASM_MSR_H
-#ifndef __ASSEMBLY__
/*
* Access to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
@@ -9,10 +8,9 @@
*/
#define rdmsr(msr,val1,val2) \
- __asm__ __volatile__("rdmsr" \
- : "=a" (val1), "=d" (val2) \
- : "c" (msr))
-
+ __asm__ __volatile__("rdmsr" \
+ : "=a" (val1), "=d" (val2) \
+ : "c" (msr))
#define rdmsrl(msr,val) do { unsigned long a__,b__; \
__asm__ __volatile__("rdmsr" \
@@ -32,11 +30,16 @@
#define rdtscl(low) \
__asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+#ifdef x86_32
+#define rdtscll(val) \
+ __asm__ __volatile__("rdtsc" : "=A" (val))
+#else
#define rdtscll(val) do { \
unsigned int a,d; \
asm volatile("rdtsc" : "=a" (a), "=d" (d)); \
(val) = ((unsigned long)a) | (((unsigned long)d)<<32); \
} while(0)
+#endif
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
@@ -45,7 +48,12 @@
: "=a" (low), "=d" (high) \
: "c" (counter))
-#endif
+/* symbolic names for some interesting MSRs */
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0
+#define MSR_IA32_P5_MC_TYPE 1
+#define MSR_IA32_PLATFORM_ID 0x17
+#define MSR_IA32_EBL_CR_POWERON 0x2a
/* AMD/K8 specific MSRs */
#define MSR_EFER 0xc0000080 /* extended feature register */
@@ -107,6 +115,36 @@
#define MSR_IA32_MC0_ADDR 0x402
#define MSR_IA32_MC0_MISC 0x403
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE 0x79
+#define MSR_IA32_UCODE_REV 0x8b
+
+#define MSR_IA32_BBL_CR_CTL 0x119
+
+#define MSR_IA32_MCG_CAP 0x179
+#define MSR_IA32_MCG_STATUS 0x17a
+#define MSR_IA32_MCG_CTL 0x17b
+
+#define MSR_IA32_THERM_CONTROL 0x19a
+#define MSR_IA32_THERM_INTERRUPT 0x19b
+#define MSR_IA32_THERM_STATUS 0x19c
+#define MSR_IA32_MISC_ENABLE 0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR 0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+#define MSR_IA32_LASTINTFROMIP 0x1dd
+#define MSR_IA32_LASTINTTOIP 0x1de
+
+#define MSR_IA32_MC0_CTL 0x400
+#define MSR_IA32_MC0_STATUS 0x401
+#define MSR_IA32_MC0_ADDR 0x402
+#define MSR_IA32_MC0_MISC 0x403
+
#define MSR_P6_PERFCTR0 0xc1
#define MSR_P6_PERFCTR1 0xc2
#define MSR_P6_EVNTSEL0 0x186
@@ -124,12 +162,17 @@
#define MSR_K8_TOP_MEM1 0xC001001A
#define MSR_K8_TOP_MEM2 0xC001001D
#define MSR_K8_SYSCFG 0xC0000010
+#define MSR_K7_HWCR 0xC0010015
+#define MSR_K7_CLK_CTL 0xC001001b
+#define MSR_K7_FID_VID_CTL 0xC0010041
+#define MSR_K7_VID_STATUS 0xC0010042
/* K6 MSRs */
#define MSR_K6_EFER 0xC0000080
#define MSR_K6_STAR 0xC0000081
#define MSR_K6_WHCR 0xC0000082
#define MSR_K6_UWCCR 0xC0000085
+#define MSR_K6_EPMR 0xC0000086
#define MSR_K6_PSOR 0xC0000087
#define MSR_K6_PFIR 0xC0000088
@@ -151,16 +194,13 @@
/* VIA Cyrix defined MSRs*/
#define MSR_VIA_FCR 0x1107
+#define MSR_VIA_LONGHAUL 0x110a
+#define MSR_VIA_BCR2 0x1147
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR 0
-#define MSR_IA32_P5_MC_TYPE 1
-#define MSR_IA32_PLATFORM_ID 0x17
-#define MSR_IA32_EBL_CR_POWERON 0x2a
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL 0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
+#define MSR_TMTA_LRTI_READOUT 0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
-#define MSR_IA32_APICBASE 0x1b
-#define MSR_IA32_APICBASE_BSP (1<<8)
-#define MSR_IA32_APICBASE_ENABLE (1<<11)
-#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-
-#endif
+#endif /* __ASM_MSR_H */
diff --git a/xen/include/asm-i386/page.h b/xen/include/asm-x86/page.h
index 61996d4ccc..61996d4ccc 100644
--- a/xen/include/asm-i386/page.h
+++ b/xen/include/asm-x86/page.h
diff --git a/xen/include/asm-i386/param.h b/xen/include/asm-x86/param.h
index 1b10bf49fe..efc29a8da2 100644
--- a/xen/include/asm-i386/param.h
+++ b/xen/include/asm-x86/param.h
@@ -1,5 +1,5 @@
-#ifndef _ASMi386_PARAM_H
-#define _ASMi386_PARAM_H
+#ifndef _ASM_PARAM_H
+#define _ASM_PARAM_H
#ifndef HZ
#define HZ 100
diff --git a/xen/include/asm-x86/pci.h b/xen/include/asm-x86/pci.h
new file mode 100644
index 0000000000..1a217315a4
--- /dev/null
+++ b/xen/include/asm-x86/pci.h
@@ -0,0 +1,35 @@
+#ifndef __X86_PCI_H
+#define __X86_PCI_H
+
+#include <xen/config.h>
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+#ifdef CONFIG_PCI
+extern unsigned int pcibios_assign_all_busses(void);
+#else
+#define pcibios_assign_all_busses() 0
+#endif
+#define pcibios_scan_all_fns() 0
+
+extern unsigned long pci_mem_start;
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM (pci_mem_start)
+
+void pcibios_config_init(void);
+struct pci_bus * pcibios_scan_root(int bus);
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
+
+void pcibios_set_master(struct pci_dev *dev);
+void pcibios_penalize_isa_irq(int irq);
+struct irq_routing_table *pcibios_get_irq_routing_table(void);
+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+
+#include <xen/types.h>
+#include <xen/slab.h>
+#include <asm/io.h>
+
+#endif /* __X86_PCI_H */
diff --git a/xen/include/asm-i386/pdb.h b/xen/include/asm-x86/pdb.h
index 2ed6a9a318..2ed6a9a318 100644
--- a/xen/include/asm-i386/pdb.h
+++ b/xen/include/asm-x86/pdb.h
diff --git a/xen/include/asm-i386/processor.h b/xen/include/asm-x86/processor.h
index 823c6ca851..823c6ca851 100644
--- a/xen/include/asm-i386/processor.h
+++ b/xen/include/asm-x86/processor.h
diff --git a/xen/include/asm-i386/ptrace.h b/xen/include/asm-x86/ptrace.h
index 26269afcb0..26269afcb0 100644
--- a/xen/include/asm-i386/ptrace.h
+++ b/xen/include/asm-x86/ptrace.h
diff --git a/xen/include/asm-i386/rwlock.h b/xen/include/asm-x86/rwlock.h
index 9475419f95..7519f32713 100644
--- a/xen/include/asm-i386/rwlock.h
+++ b/xen/include/asm-x86/rwlock.h
@@ -1,4 +1,4 @@
-/* include/asm-i386/rwlock.h
+/* include/asm-x86/rwlock.h
*
* Helpers used by both rw spinlocks and rw semaphores.
*
@@ -14,8 +14,8 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _ASM_I386_RWLOCK_H
-#define _ASM_I386_RWLOCK_H
+#ifndef _ASM_X86_RWLOCK_H
+#define _ASM_X86_RWLOCK_H
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
@@ -35,10 +35,10 @@
"js 2f\n" \
"1:\n" \
".section .text.lock,\"ax\"\n" \
- "2:\tpushl %%eax\n\t" \
- "leal %0,%%eax\n\t" \
+ "2:\tpush"__OS" %%"__OP"ax\n\t" \
+ "lea"__OS" %0,%%"__OP"ax\n\t" \
"call " helper "\n\t" \
- "popl %%eax\n\t" \
+ "pop"__OS" %%"__OP"ax\n\t" \
"jmp 1b\n" \
".previous" \
:"=m" (*(volatile int *)rw) : : "memory")
@@ -65,10 +65,10 @@
"jnz 2f\n" \
"1:\n" \
".section .text.lock,\"ax\"\n" \
- "2:\tpushl %%eax\n\t" \
- "leal %0,%%eax\n\t" \
+ "2:\tpush"__OS" %%"__OP"ax\n\t" \
+ "lea"__OS" %0,%%"__OP"ax\n\t" \
"call " helper "\n\t" \
- "popl %%eax\n\t" \
+ "pop"__OS" %%"__OP"ax\n\t" \
"jmp 1b\n" \
".previous" \
:"=m" (*(volatile int *)rw) : : "memory")
diff --git a/xen/include/asm-i386/smp.h b/xen/include/asm-x86/smp.h
index b48bbae43c..8d8bdcd06f 100644
--- a/xen/include/asm-i386/smp.h
+++ b/xen/include/asm-x86/smp.h
@@ -81,18 +81,23 @@ extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial
* so this is correct in the x86 case.
*/
+#ifdef x86_32
#define smp_processor_id() (current->processor)
+#else
+#include <asm/pda.h>
+#define smp_processor_id() read_pda(cpunumber)
+#endif
static __inline int hard_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
+ return GET_APIC_ID(*(unsigned *)(APIC_BASE+APIC_ID));
}
static __inline int logical_smp_processor_id(void)
{
/* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+ return GET_APIC_LOGICAL_ID(*(unsigned *)(APIC_BASE+APIC_LDR));
}
#endif /* !__ASSEMBLY__ */
diff --git a/xen/include/asm-i386/smpboot.h b/xen/include/asm-x86/smpboot.h
index 7a0b157114..7a0b157114 100644
--- a/xen/include/asm-i386/smpboot.h
+++ b/xen/include/asm-x86/smpboot.h
diff --git a/xen/include/asm-i386/softirq.h b/xen/include/asm-x86/softirq.h
index 292baac6ea..292baac6ea 100644
--- a/xen/include/asm-i386/softirq.h
+++ b/xen/include/asm-x86/softirq.h
diff --git a/xen/include/asm-i386/spinlock.h b/xen/include/asm-x86/spinlock.h
index d632b2139f..5cbb5a413e 100644
--- a/xen/include/asm-i386/spinlock.h
+++ b/xen/include/asm-x86/spinlock.h
@@ -170,7 +170,7 @@ typedef struct {
* Changed to use the same technique as rw semaphores. See
* semaphore.h for details. -ben
*/
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
+/* the spinlock helpers are in arch/x86/kernel/semaphore.c */
static inline void read_lock(rwlock_t *rw)
{
diff --git a/xen/include/asm-i386/string.h b/xen/include/asm-x86/string.h
index bef20a71d5..bef20a71d5 100644
--- a/xen/include/asm-i386/string.h
+++ b/xen/include/asm-x86/string.h
diff --git a/xen/include/asm-i386/system.h b/xen/include/asm-x86/system.h
index 15c8b7f811..4b787e6830 100644
--- a/xen/include/asm-i386/system.h
+++ b/xen/include/asm-x86/system.h
@@ -47,12 +47,27 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
:"m" (*__xg(ptr)), "0" (x)
:"memory");
break;
+#ifdef x86_32
case 4:
__asm__ __volatile__("xchgl %0,%1"
:"=r" (x)
:"m" (*__xg(ptr)), "0" (x)
:"memory");
break;
+#else
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+#endif
}
return x;
}
@@ -80,12 +95,27 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
+#ifdef x86_32
case 4:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
+#else
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+#endif
}
return old;
}
@@ -105,7 +135,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
({ \
int _rc; \
__asm__ __volatile__ ( \
- "1: " LOCK_PREFIX "cmpxchgl %2,%3\n" \
+ "1: " LOCK_PREFIX "cmpxchg"__OS" %2,%3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \
@@ -162,16 +192,26 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/* interrupt control.. */
+#ifdef x86_64
+#define __save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
+#define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
+#else
#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
#define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
+#endif
#define __cli() __asm__ __volatile__("cli": : :"memory")
#define __sti() __asm__ __volatile__("sti": : :"memory")
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
/* For spinlocks etc */
+#ifdef x86_64
+#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
+#define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
+#else
#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
#define local_irq_restore(x) __restore_flags(x)
+#endif
#define local_irq_disable() __cli()
#define local_irq_enable() __sti()
diff --git a/xen/include/asm-i386/time.h b/xen/include/asm-x86/time.h
index ed3a15bfb2..ed3a15bfb2 100644
--- a/xen/include/asm-i386/time.h
+++ b/xen/include/asm-x86/time.h
diff --git a/xen/include/asm-i386/timex.h b/xen/include/asm-x86/timex.h
index 4b0a93fc87..4b0a93fc87 100644
--- a/xen/include/asm-i386/timex.h
+++ b/xen/include/asm-x86/timex.h
diff --git a/xen/include/asm-i386/types.h b/xen/include/asm-x86/types.h
index ebfaf85f68..adb63db4d1 100644
--- a/xen/include/asm-i386/types.h
+++ b/xen/include/asm-x86/types.h
@@ -1,5 +1,5 @@
-#ifndef _I386_TYPES_H
-#define _I386_TYPES_H
+#ifndef _X86_TYPES_H
+#define _X86_TYPES_H
typedef unsigned short umode_t;
@@ -20,8 +20,13 @@ typedef __signed__ int __s32;
typedef unsigned int __u32;
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+#ifdef x86_32
typedef __signed__ long long __s64;
typedef unsigned long long __u64;
+#else
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+#endif
#endif
#include <xen/config.h>
@@ -35,18 +40,19 @@ typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
+#ifdef x86_32
typedef signed long long s64;
typedef unsigned long long u64;
-
#define BITS_PER_LONG 32
+#else
+typedef signed long s64;
+typedef unsigned long u64;
+#define BITS_PER_LONG 64
+#endif
/* DMA addresses come in generic and 64-bit flavours. */
-#ifdef CONFIG_HIGHMEM
-typedef u64 dma_addr_t;
-#else
-typedef u32 dma_addr_t;
-#endif
+typedef unsigned long dma_addr_t;
typedef u64 dma64_addr_t;
#endif
diff --git a/xen/include/asm-i386/uaccess.h b/xen/include/asm-x86/uaccess.h
index bb2616336d..bb2616336d 100644
--- a/xen/include/asm-i386/uaccess.h
+++ b/xen/include/asm-x86/uaccess.h
diff --git a/xen/include/asm-i386/unaligned.h b/xen/include/asm-x86/unaligned.h
index 7acd795762..08f8ca2d9b 100644
--- a/xen/include/asm-i386/unaligned.h
+++ b/xen/include/asm-x86/unaligned.h
@@ -1,8 +1,8 @@
-#ifndef __I386_UNALIGNED_H
-#define __I386_UNALIGNED_H
+#ifndef __X86_UNALIGNED_H
+#define __X86_UNALIGNED_H
/*
- * The i386 can do unaligned accesses itself.
+ * x86 can do unaligned accesses itself.
*
* The strange macros are there to make sure these can't
* be misused in a way that makes them not work on other
diff --git a/xen/include/asm-x86_64/config.h b/xen/include/asm-x86/x86_64/config.h
index 5a0acabf2a..5a0acabf2a 100644
--- a/xen/include/asm-x86_64/config.h
+++ b/xen/include/asm-x86/x86_64/config.h
diff --git a/xen/include/asm-x86_64/current.h b/xen/include/asm-x86/x86_64/current.h
index d5ffb0720a..d5ffb0720a 100644
--- a/xen/include/asm-x86_64/current.h
+++ b/xen/include/asm-x86/x86_64/current.h
diff --git a/xen/include/asm-x86_64/desc.h b/xen/include/asm-x86/x86_64/desc.h
index e8556e976e..e8556e976e 100644
--- a/xen/include/asm-x86_64/desc.h
+++ b/xen/include/asm-x86/x86_64/desc.h
diff --git a/xen/include/asm-x86_64/ldt.h b/xen/include/asm-x86/x86_64/ldt.h
index e0f139829e..e0f139829e 100644
--- a/xen/include/asm-x86_64/ldt.h
+++ b/xen/include/asm-x86/x86_64/ldt.h
diff --git a/xen/include/asm-x86_64/page.h b/xen/include/asm-x86/x86_64/page.h
index cb8651ec8a..cb8651ec8a 100644
--- a/xen/include/asm-x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
diff --git a/xen/include/asm-x86_64/pda.h b/xen/include/asm-x86/x86_64/pda.h
index b9ca345ee4..b9ca345ee4 100644
--- a/xen/include/asm-x86_64/pda.h
+++ b/xen/include/asm-x86/x86_64/pda.h
diff --git a/xen/include/asm-x86_64/processor.h b/xen/include/asm-x86/x86_64/processor.h
index ad3344cf3c..ad3344cf3c 100644
--- a/xen/include/asm-x86_64/processor.h
+++ b/xen/include/asm-x86/x86_64/processor.h
diff --git a/xen/include/asm-x86_64/ptrace.h b/xen/include/asm-x86/x86_64/ptrace.h
index da0419f429..da0419f429 100644
--- a/xen/include/asm-x86_64/ptrace.h
+++ b/xen/include/asm-x86/x86_64/ptrace.h
diff --git a/xen/include/asm-x86_64/uaccess.h b/xen/include/asm-x86/x86_64/uaccess.h
index 952e1b2f0a..952e1b2f0a 100644
--- a/xen/include/asm-x86_64/uaccess.h
+++ b/xen/include/asm-x86/x86_64/uaccess.h
diff --git a/xen/include/asm-x86_64/apic.h b/xen/include/asm-x86_64/apic.h
deleted file mode 100644
index 9044692e3b..0000000000
--- a/xen/include/asm-x86_64/apic.h
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef __ASM_APIC_H
-#define __ASM_APIC_H
-
-#include <xen/config.h>
-#include <asm/ptrace.h>
-#include <asm/apicdef.h>
-#include <asm/system.h>
-
-#ifdef CONFIG_X86_LOCAL_APIC
-
-#define APIC_DEBUG 0
-
-#if APIC_DEBUG
-#define Dprintk(x...) printk(x)
-#else
-#define Dprintk(x...)
-#endif
-
-/*
- * Basic functions accessing APICs.
- */
-
-static __inline void apic_write(unsigned long reg, unsigned int v)
-{
- *((volatile unsigned int *)(APIC_BASE+reg)) = v;
- barrier();
-}
-
-static __inline void apic_write_atomic(unsigned long reg, unsigned int v)
-{
- xchg((volatile unsigned int *)(APIC_BASE+reg), v);
-}
-
-static __inline unsigned int apic_read(unsigned long reg)
-{
- return *((volatile unsigned int *)(APIC_BASE+reg));
-}
-
-static __inline__ void apic_wait_icr_idle(void)
-{
- do { } while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY );
-}
-
-#ifdef CONFIG_X86_GOOD_APIC
-# define FORCE_READ_AROUND_WRITE 0
-# define apic_read_around(x)
-# define apic_write_around(x,y) apic_write((x),(y))
-#else
-# define FORCE_READ_AROUND_WRITE 1
-# define apic_read_around(x) apic_read(x)
-# define apic_write_around(x,y) apic_write_atomic((x),(y))
-#endif
-
-static inline void ack_APIC_irq(void)
-{
- /*
- * ack_APIC_irq() actually gets compiled as a single instruction:
- * - a single rmw on Pentium/82489DX
- * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
- * ... yummie.
- */
-
- /* Docs say use 0 for future compatibility */
- apic_write_around(APIC_EOI, 0);
-}
-
-extern int get_maxlvt(void);
-extern void connect_bsp_APIC (void);
-extern void disconnect_bsp_APIC (void);
-extern void disable_local_APIC (void);
-extern int verify_local_APIC (void);
-extern void sync_Arb_IDs (void);
-extern void init_bsp_APIC (void);
-extern void setup_local_APIC (void);
-extern void init_apic_mappings (void);
-extern void setup_APIC_clocks (void);
-extern void setup_apic_nmi_watchdog (void);
-extern inline void nmi_watchdog_tick (struct pt_regs * regs);
-extern int APIC_init_uniprocessor (void);
-extern void disable_APIC_timer(void);
-extern void enable_APIC_timer(void);
-
-//extern struct pm_dev *apic_pm_register(pm_dev_t, unsigned long, pm_callback);
-//extern void apic_pm_unregister(struct pm_dev*);
-
-extern unsigned int watchdog_on;
-
-extern unsigned int apic_timer_irqs [NR_CPUS];
-extern int check_nmi_watchdog (void);
-
-extern unsigned int nmi_watchdog;
-#define NMI_NONE 0
-#define NMI_IO_APIC 1
-#define NMI_LOCAL_APIC 2
-#define NMI_INVALID 3
-
-#endif /* CONFIG_X86_LOCAL_APIC */
-
-#define clustered_apic_mode 0
-#define esr_disable 0
-
-#endif /* __ASM_APIC_H */
diff --git a/xen/include/asm-x86_64/apicdef.h b/xen/include/asm-x86_64/apicdef.h
deleted file mode 100644
index 8a787c3122..0000000000
--- a/xen/include/asm-x86_64/apicdef.h
+++ /dev/null
@@ -1,363 +0,0 @@
-#ifndef __ASM_APICDEF_H
-#define __ASM_APICDEF_H
-
-/*
- * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
- *
- * Alan Cox <Alan.Cox@linux.org>, 1995.
- * Ingo Molnar <mingo@redhat.com>, 1999, 2000
- */
-
-#define APIC_DEFAULT_PHYS_BASE 0xfee00000
-
-#define APIC_ID 0x20
-#define APIC_ID_MASK (0x0F<<24)
-#define GET_APIC_ID(x) (((x)>>24)&0x0F)
-#define APIC_LVR 0x30
-#define APIC_LVR_MASK 0xFF00FF
-#define GET_APIC_VERSION(x) ((x)&0xFF)
-#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF)
-#define APIC_INTEGRATED(x) ((x)&0xF0)
-#define APIC_TASKPRI 0x80
-#define APIC_TPRI_MASK 0xFF
-#define APIC_ARBPRI 0x90
-#define APIC_ARBPRI_MASK 0xFF
-#define APIC_PROCPRI 0xA0
-#define APIC_EOI 0xB0
-#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */
-#define APIC_RRR 0xC0
-#define APIC_LDR 0xD0
-#define APIC_LDR_MASK (0xFF<<24)
-#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF)
-#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
-#define APIC_ALL_CPUS 0xFF
-#define APIC_DFR 0xE0
-#define APIC_SPIV 0xF0
-#define APIC_SPIV_FOCUS_DISABLED (1<<9)
-#define APIC_SPIV_APIC_ENABLED (1<<8)
-#define APIC_ISR 0x100
-#define APIC_TMR 0x180
-#define APIC_IRR 0x200
-#define APIC_ESR 0x280
-#define APIC_ESR_SEND_CS 0x00001
-#define APIC_ESR_RECV_CS 0x00002
-#define APIC_ESR_SEND_ACC 0x00004
-#define APIC_ESR_RECV_ACC 0x00008
-#define APIC_ESR_SENDILL 0x00020
-#define APIC_ESR_RECVILL 0x00040
-#define APIC_ESR_ILLREGA 0x00080
-#define APIC_ICR 0x300
-#define APIC_DEST_SELF 0x40000
-#define APIC_DEST_ALLINC 0x80000
-#define APIC_DEST_ALLBUT 0xC0000
-#define APIC_ICR_RR_MASK 0x30000
-#define APIC_ICR_RR_INVALID 0x00000
-#define APIC_ICR_RR_INPROG 0x10000
-#define APIC_ICR_RR_VALID 0x20000
-#define APIC_INT_LEVELTRIG 0x08000
-#define APIC_INT_ASSERT 0x04000
-#define APIC_ICR_BUSY 0x01000
-#define APIC_DEST_LOGICAL 0x00800
-#define APIC_DM_FIXED 0x00000
-#define APIC_DM_LOWEST 0x00100
-#define APIC_DM_SMI 0x00200
-#define APIC_DM_REMRD 0x00300
-#define APIC_DM_NMI 0x00400
-#define APIC_DM_INIT 0x00500
-#define APIC_DM_STARTUP 0x00600
-#define APIC_DM_EXTINT 0x00700
-#define APIC_VECTOR_MASK 0x000FF
-#define APIC_ICR2 0x310
-#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF)
-#define SET_APIC_DEST_FIELD(x) ((x)<<24)
-#define APIC_LVTT 0x320
-#define APIC_LVTPC 0x340
-#define APIC_LVT0 0x350
-#define APIC_LVT_TIMER_BASE_MASK (0x3<<18)
-#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3)
-#define SET_APIC_TIMER_BASE(x) (((x)<<18))
-#define APIC_TIMER_BASE_CLKIN 0x0
-#define APIC_TIMER_BASE_TMBASE 0x1
-#define APIC_TIMER_BASE_DIV 0x2
-#define APIC_LVT_TIMER_PERIODIC (1<<17)
-#define APIC_LVT_MASKED (1<<16)
-#define APIC_LVT_LEVEL_TRIGGER (1<<15)
-#define APIC_LVT_REMOTE_IRR (1<<14)
-#define APIC_INPUT_POLARITY (1<<13)
-#define APIC_SEND_PENDING (1<<12)
-#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7)
-#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8))
-#define APIC_MODE_FIXED 0x0
-#define APIC_MODE_NMI 0x4
-#define APIC_MODE_EXINT 0x7
-#define APIC_LVT1 0x360
-#define APIC_LVTERR 0x370
-#define APIC_TMICT 0x380
-#define APIC_TMCCT 0x390
-#define APIC_TDCR 0x3E0
-#define APIC_TDR_DIV_TMBASE (1<<2)
-#define APIC_TDR_DIV_1 0xB
-#define APIC_TDR_DIV_2 0x0
-#define APIC_TDR_DIV_4 0x1
-#define APIC_TDR_DIV_8 0x2
-#define APIC_TDR_DIV_16 0x3
-#define APIC_TDR_DIV_32 0x8
-#define APIC_TDR_DIV_64 0x9
-#define APIC_TDR_DIV_128 0xA
-
-#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
-
-#define MAX_IO_APICS 16
-
-/*
- * the local APIC register structure, memory mapped. Not terribly well
- * tested, but we might eventually use this one in the future - the
- * problem why we cannot use it right now is the P5 APIC, it has an
- * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
- */
-#define u32 unsigned int
-
-#define lapic ((volatile struct local_apic *)APIC_BASE)
-
-struct local_apic {
-
-/*000*/ struct { u32 __reserved[4]; } __reserved_01;
-
-/*010*/ struct { u32 __reserved[4]; } __reserved_02;
-
-/*020*/ struct { /* APIC ID Register */
- u32 __reserved_1 : 24,
- phys_apic_id : 4,
- __reserved_2 : 4;
- u32 __reserved[3];
- } id;
-
-/*030*/ const
- struct { /* APIC Version Register */
- u32 version : 8,
- __reserved_1 : 8,
- max_lvt : 8,
- __reserved_2 : 8;
- u32 __reserved[3];
- } version;
-
-/*040*/ struct { u32 __reserved[4]; } __reserved_03;
-
-/*050*/ struct { u32 __reserved[4]; } __reserved_04;
-
-/*060*/ struct { u32 __reserved[4]; } __reserved_05;
-
-/*070*/ struct { u32 __reserved[4]; } __reserved_06;
-
-/*080*/ struct { /* Task Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } tpr;
-
-/*090*/ const
- struct { /* Arbitration Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } apr;
-
-/*0A0*/ const
- struct { /* Processor Priority Register */
- u32 priority : 8,
- __reserved_1 : 24;
- u32 __reserved_2[3];
- } ppr;
-
-/*0B0*/ struct { /* End Of Interrupt Register */
- u32 eoi;
- u32 __reserved[3];
- } eoi;
-
-/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
-
-/*0D0*/ struct { /* Logical Destination Register */
- u32 __reserved_1 : 24,
- logical_dest : 8;
- u32 __reserved_2[3];
- } ldr;
-
-/*0E0*/ struct { /* Destination Format Register */
- u32 __reserved_1 : 28,
- model : 4;
- u32 __reserved_2[3];
- } dfr;
-
-/*0F0*/ struct { /* Spurious Interrupt Vector Register */
- u32 spurious_vector : 8,
- apic_enabled : 1,
- focus_cpu : 1,
- __reserved_2 : 22;
- u32 __reserved_3[3];
- } svr;
-
-/*100*/ struct { /* In Service Register */
-/*170*/ u32 bitfield;
- u32 __reserved[3];
- } isr [8];
-
-/*180*/ struct { /* Trigger Mode Register */
-/*1F0*/ u32 bitfield;
- u32 __reserved[3];
- } tmr [8];
-
-/*200*/ struct { /* Interrupt Request Register */
-/*270*/ u32 bitfield;
- u32 __reserved[3];
- } irr [8];
-
-/*280*/ union { /* Error Status Register */
- struct {
- u32 send_cs_error : 1,
- receive_cs_error : 1,
- send_accept_error : 1,
- receive_accept_error : 1,
- __reserved_1 : 1,
- send_illegal_vector : 1,
- receive_illegal_vector : 1,
- illegal_register_address : 1,
- __reserved_2 : 24;
- u32 __reserved_3[3];
- } error_bits;
- struct {
- u32 errors;
- u32 __reserved_3[3];
- } all_errors;
- } esr;
-
-/*290*/ struct { u32 __reserved[4]; } __reserved_08;
-
-/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
-
-/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
-
-/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
-
-/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
-
-/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
-
-/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
-
-/*300*/ struct { /* Interrupt Command Register 1 */
- u32 vector : 8,
- delivery_mode : 3,
- destination_mode : 1,
- delivery_status : 1,
- __reserved_1 : 1,
- level : 1,
- trigger : 1,
- __reserved_2 : 2,
- shorthand : 2,
- __reserved_3 : 12;
- u32 __reserved_4[3];
- } icr1;
-
-/*310*/ struct { /* Interrupt Command Register 2 */
- union {
- u32 __reserved_1 : 24,
- phys_dest : 4,
- __reserved_2 : 4;
- u32 __reserved_3 : 24,
- logical_dest : 8;
- } dest;
- u32 __reserved_4[3];
- } icr2;
-
-/*320*/ struct { /* LVT - Timer */
- u32 vector : 8,
- __reserved_1 : 4,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- timer_mode : 1,
- __reserved_3 : 14;
- u32 __reserved_4[3];
- } lvt_timer;
-
-/*330*/ struct { u32 __reserved[4]; } __reserved_15;
-
-/*340*/ struct { /* LVT - Performance Counter */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- __reserved_3 : 15;
- u32 __reserved_4[3];
- } lvt_pc;
-
-/*350*/ struct { /* LVT - LINT0 */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- polarity : 1,
- remote_irr : 1,
- trigger : 1,
- mask : 1,
- __reserved_2 : 15;
- u32 __reserved_3[3];
- } lvt_lint0;
-
-/*360*/ struct { /* LVT - LINT1 */
- u32 vector : 8,
- delivery_mode : 3,
- __reserved_1 : 1,
- delivery_status : 1,
- polarity : 1,
- remote_irr : 1,
- trigger : 1,
- mask : 1,
- __reserved_2 : 15;
- u32 __reserved_3[3];
- } lvt_lint1;
-
-/*370*/ struct { /* LVT - Error */
- u32 vector : 8,
- __reserved_1 : 4,
- delivery_status : 1,
- __reserved_2 : 3,
- mask : 1,
- __reserved_3 : 15;
- u32 __reserved_4[3];
- } lvt_error;
-
-/*380*/ struct { /* Timer Initial Count Register */
- u32 initial_count;
- u32 __reserved_2[3];
- } timer_icr;
-
-/*390*/ const
- struct { /* Timer Current Count Register */
- u32 curr_count;
- u32 __reserved_2[3];
- } timer_ccr;
-
-/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
-
-/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
-
-/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
-
-/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
-
-/*3E0*/ struct { /* Timer Divide Configuration Register */
- u32 divisor : 4,
- __reserved_1 : 28;
- u32 __reserved_2[3];
- } timer_dcr;
-
-/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
-
-} __attribute__ ((packed));
-
-#undef u32
-
-#endif
diff --git a/xen/include/asm-x86_64/atomic.h b/xen/include/asm-x86_64/atomic.h
deleted file mode 100644
index 1f5dc5085d..0000000000
--- a/xen/include/asm-x86_64/atomic.h
+++ /dev/null
@@ -1,205 +0,0 @@
-#ifndef __ARCH_X86_64_ATOMIC__
-#define __ARCH_X86_64_ATOMIC__
-
-#include <xen/config.h>
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-#define atomic_read(v) ((v)->counter)
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-#define atomic_set(v,i) (((v)->counter) = (i))
-
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v. Note that the guaranteed useful range
- * of an atomic_t is only 24 bits.
- */
-static __inline__ void atomic_add(int i, atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK "addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
-}
-
-/**
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ void atomic_sub(int i, atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK "subl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
-}
-
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK "subl %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
- return c;
-}
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ void atomic_inc(atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
-}
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ void atomic_dec(atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK "decl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
-}
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK "decl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
- return c != 0;
-}
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK "incl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
- return c != 0;
-}
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero. Note that the guaranteed
- * useful range of an atomic_t is only 24 bits.
- */
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK "addl %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
- return c;
-}
-
-
-/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
-
-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK "orl %0,%1" \
-: : "r" ((unsigned)mask),"m" (*addr) : "memory")
-
-/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#endif /* __ARCH_X86_64_ATOMIC__ */
diff --git a/xen/include/asm-x86_64/cache.h b/xen/include/asm-x86_64/cache.h
deleted file mode 100644
index a1d7349a81..0000000000
--- a/xen/include/asm-x86_64/cache.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * include/asm-x8664/cache.h
- */
-#ifndef __ARCH_X8664_CACHE_H
-#define __ARCH_X8664_CACHE_H
-
-#include <xen/config.h>
-
-/* L1 cache line size */
-#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-#endif
diff --git a/xen/include/asm-x86_64/cpufeature.h b/xen/include/asm-x86_64/cpufeature.h
deleted file mode 100644
index 7d9f90e813..0000000000
--- a/xen/include/asm-x86_64/cpufeature.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * cpufeature.h
- *
- * Defines x86 CPU feature bits
- */
-
-#ifndef __ASM_X8664_CPUFEATURE_H
-#define __ASM_X8664_CPUFEATURE_H
-
-/* Sample usage: CPU_FEATURE_P(cpu.x86_capability, FPU) */
-#define CPU_FEATURE_P(CAP, FEATURE) test_bit(CAP, X86_FEATURE_##FEATURE ##_BIT)
-
-#define NCAPINTS 4 /* Currently we have 4 32-bit words worth of info */
-
-/* Intel-defined CPU features, CPUID level 0x00000001, word 0 */
-#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
-#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
-#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
-#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */
-#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
- /* of FPU context), and CR4.OSFXSR available */
-#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
-#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
-#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
-#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
-#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
-
-/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
-/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
-
-/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
-
-/* Other features, Linux-defined mapping, word 3 */
-/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
-
-#endif /* __ASM_X8664_CPUFEATURE_H */
-
-/*
- * Local Variables:
- * mode:c
- * comment-column:42
- * End:
- */
diff --git a/xen/include/asm-x86_64/debugreg.h b/xen/include/asm-x86_64/debugreg.h
deleted file mode 100644
index bd1aab1d8c..0000000000
--- a/xen/include/asm-x86_64/debugreg.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef _X86_64_DEBUGREG_H
-#define _X86_64_DEBUGREG_H
-
-
-/* Indicate the register numbers for a number of the specific
- debug registers. Registers 0-3 contain the addresses we wish to trap on */
-#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
-#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
-
-#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
-#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
-
-/* Define a few things for the status register. We can use this to determine
- which debugging register was responsible for the trap. The other bits
- are either reserved or not of interest to us. */
-
-#define DR_TRAP0 (0x1) /* db0 */
-#define DR_TRAP1 (0x2) /* db1 */
-#define DR_TRAP2 (0x4) /* db2 */
-#define DR_TRAP3 (0x8) /* db3 */
-
-#define DR_STEP (0x4000) /* single-step */
-#define DR_SWITCH (0x8000) /* task switch */
-
-/* Now define a bunch of things for manipulating the control register.
- The top two bytes of the control register consist of 4 fields of 4
- bits - each field corresponds to one of the four debug registers,
- and indicates what types of access we trap on, and how large the data
- field is that we are looking at */
-
-#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
-#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
-
-#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
-#define DR_RW_WRITE (0x1)
-#define DR_RW_READ (0x3)
-
-#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
-#define DR_LEN_2 (0x4)
-#define DR_LEN_4 (0xC)
-#define DR_LEN_8 (0x8)
-
-/* The low byte to the control register determine which registers are
- enabled. There are 4 fields of two bits. One bit is "local", meaning
- that the processor will reset the bit after a task switch and the other
- is global meaning that we have to explicitly reset the bit. With linux,
- you can use either one, since we explicitly zero the register when we enter
- kernel mode. */
-
-#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
-#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
-#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
-
-#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
-#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
-
-/* The second byte to the control register has a few special things.
- We can slow the instruction pipeline for instructions coming via the
- gdt or the ldt if we want to. I am not sure why this is an advantage */
-
-#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
-#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
-#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
-
-#endif
diff --git a/xen/include/asm-x86_64/delay.h b/xen/include/asm-x86_64/delay.h
deleted file mode 100644
index a04cdb4346..0000000000
--- a/xen/include/asm-x86_64/delay.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _X86_64_DELAY_H
-#define _X86_64_DELAY_H
-
-/*
- * Copyright (C) 1993 Linus Torvalds
- *
- * Delay routines calling functions in arch/i386/lib/delay.c
- */
-
-extern unsigned long ticks_per_usec;
-extern void __udelay(unsigned long usecs);
-#define udelay(n) __udelay(n)
-
-#endif /* defined(_X86_64_DELAY_H) */
diff --git a/xen/include/asm-x86_64/dma.h b/xen/include/asm-x86_64/dma.h
deleted file mode 100644
index e0854628d3..0000000000
--- a/xen/include/asm-x86_64/dma.h
+++ /dev/null
@@ -1,301 +0,0 @@
-/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- */
-
-#ifndef _ASM_DMA_H
-#define _ASM_DMA_H
-
-#include <xen/config.h>
-#include <xen/spinlock.h> /* And spinlocks */
-#include <asm/io.h> /* need byte IO */
-#include <xen/delay.h>
-
-
-#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
-#define dma_outb outb_p
-#else
-#define dma_outb outb
-#endif
-
-#define dma_inb inb
-
-/*
- * NOTES about DMA transfers:
- *
- * controller 1: channels 0-3, byte operations, ports 00-1F
- * controller 2: channels 4-7, word operations, ports C0-DF
- *
- * - ALL registers are 8 bits only, regardless of transfer size
- * - channel 4 is not used - cascades 1 into 2.
- * - channels 0-3 are byte - addresses/counts are for physical bytes
- * - channels 5-7 are word - addresses/counts are for physical words
- * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- * - transfer count loaded to registers is 1 less than actual count
- * - controller 2 offsets are all even (2x offsets for controller 1)
- * - page registers for 5-7 don't use data bit 0, represent 128K pages
- * - page registers for 0-3 use bit 0, represent 64K pages
- *
- * DMA transfers are limited to the lower 16MB of _physical_ memory.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- * Address mapping for channels 0-3:
- *
- * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * P7 ... P0 A7 ... A0 A7 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Address mapping for channels 5-7:
- *
- * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
- * | ... | \ \ ... \ \ \ ... \ \
- * | ... | \ \ ... \ \ \ ... \ (not used)
- * | ... | \ \ ... \ \ \ ... \
- * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation.
- *
- */
-
-#define MAX_DMA_CHANNELS 8
-
-#if 0
-/* The maximum address that we can perform a DMA transfer to on this platform */
-#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
-#endif
-
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG 0x08 /* command register (w) */
-#define DMA1_STAT_REG 0x08 /* status register (r) */
-#define DMA1_REQ_REG 0x09 /* request register (w) */
-#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
-#define DMA1_MODE_REG 0x0B /* mode register (w) */
-#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
-#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
-#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
-
-#define DMA2_CMD_REG 0xD0 /* command register (w) */
-#define DMA2_STAT_REG 0xD0 /* status register (r) */
-#define DMA2_REQ_REG 0xD2 /* request register (w) */
-#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
-#define DMA2_MODE_REG 0xD6 /* mode register (w) */
-#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
-#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
-#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
-
-#define DMA_ADDR_0 0x00 /* DMA address registers */
-#define DMA_ADDR_1 0x02
-#define DMA_ADDR_2 0x04
-#define DMA_ADDR_3 0x06
-#define DMA_ADDR_4 0xC0
-#define DMA_ADDR_5 0xC4
-#define DMA_ADDR_6 0xC8
-#define DMA_ADDR_7 0xCC
-
-#define DMA_CNT_0 0x01 /* DMA count registers */
-#define DMA_CNT_1 0x03
-#define DMA_CNT_2 0x05
-#define DMA_CNT_3 0x07
-#define DMA_CNT_4 0xC2
-#define DMA_CNT_5 0xC6
-#define DMA_CNT_6 0xCA
-#define DMA_CNT_7 0xCE
-
-#define DMA_PAGE_0 0x87 /* DMA page registers */
-#define DMA_PAGE_1 0x83
-#define DMA_PAGE_2 0x81
-#define DMA_PAGE_3 0x82
-#define DMA_PAGE_5 0x8B
-#define DMA_PAGE_6 0x89
-#define DMA_PAGE_7 0x8A
-
-#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT 0x10
-
-
-extern spinlock_t dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&dma_spin_lock, flags);
- return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
- spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr, DMA1_MASK_REG);
- else
- dma_outb(dmanr & 3, DMA2_MASK_REG);
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr | 4, DMA1_MASK_REG);
- else
- dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while holding the DMA lock ! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(0, DMA1_CLEAR_FF_REG);
- else
- dma_outb(0, DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
- if (dmanr<=3)
- dma_outb(mode | dmanr, DMA1_MODE_REG);
- else
- dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
-}
-
-/* Set only the page register bits of the transfer address.
- * This is used for successive transfers when we know the contents of
- * the lower 16 bits of the DMA current address register, but a 64k boundary
- * may have been crossed.
- */
-static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
-{
- switch(dmanr) {
- case 0:
- dma_outb(pagenr, DMA_PAGE_0);
- break;
- case 1:
- dma_outb(pagenr, DMA_PAGE_1);
- break;
- case 2:
- dma_outb(pagenr, DMA_PAGE_2);
- break;
- case 3:
- dma_outb(pagenr, DMA_PAGE_3);
- break;
- case 5:
- dma_outb(pagenr & 0xfe, DMA_PAGE_5);
- break;
- case 6:
- dma_outb(pagenr & 0xfe, DMA_PAGE_6);
- break;
- case 7:
- dma_outb(pagenr & 0xfe, DMA_PAGE_7);
- break;
- }
-}
-
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- set_dma_page(dmanr, a>>16);
- if (dmanr <= 3) {
- dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- } else {
- dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- }
-}
-
-
-/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- count--;
- if (dmanr <= 3) {
- dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- } else {
- dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
- : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
- /* using short to get 16-bit wrap around */
- unsigned short count;
-
- count = 1 + dma_inb(io_port);
- count += dma_inb(io_port) << 8;
-
- return (dmanr<=3)? count : (count<<1);
-}
-
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-/* From PCI */
-
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
-#endif /* _ASM_DMA_H */
diff --git a/xen/include/asm-x86_64/domain_page.h b/xen/include/asm-x86_64/domain_page.h
deleted file mode 100644
index f093ee2d96..0000000000
--- a/xen/include/asm-x86_64/domain_page.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/******************************************************************************
- * domain_page.h
- *
- * Allow temporary mapping of domain page frames into Xen space.
- */
-
-#ifndef __ASM_DOMAIN_PAGE_H__
-#define __ASM_DOMAIN_PAGE_H__
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/page.h>
-
-/*
- * Maps a given physical address, returning corresponding virtual address.
- * The entire page containing that VA is now accessible until a
- * corresponding call to unmap_domain_mem().
- */
-#define map_domain_mem(pa) __va(pa)
-
-/*
- * Pass a VA within a page previously mapped with map_domain_mem().
- * That page will then be removed from the mapping lists.
- */
-#define unmap_domain_mem(va) {}
-
-#endif /* __ASM_DOMAIN_PAGE_H__ */
diff --git a/xen/include/asm-x86_64/fixmap.h b/xen/include/asm-x86_64/fixmap.h
deleted file mode 100644
index d3f9803af4..0000000000
--- a/xen/include/asm-x86_64/fixmap.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <xen/config.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of virtual memory (0xfffff000) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-/*
- * on UP currently we will have no trace of the fixmap mechanizm,
- * no page table allocations, etc. This might change in the
- * future, say framebuffers for the console driver(s) could be
- * fix-mapped?
- */
-enum fixed_addresses {
-#ifdef CONFIG_X86_LOCAL_APIC
- FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
-#endif
-#ifdef CONFIG_X86_IO_APIC
- FIX_IO_APIC_BASE_0,
- FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-#endif
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
- __end_of_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- l1_pgentry_t entry);
-
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, mk_l1_pgentry(phys|PAGE_HYPERVISOR))
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, mk_l1_pgentry(phys|PAGE_HYPERVISOR_NOCACHE))
-/*
- * used by vmalloc.c.
- *
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap, and leave one page empty
- * at the top of mem..
- */
-#define FIXADDR_TOP (0xffffffffffffe000UL)
-#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-#endif
diff --git a/xen/include/asm-x86_64/hardirq.h b/xen/include/asm-x86_64/hardirq.h
deleted file mode 100644
index c59f6e3e0f..0000000000
--- a/xen/include/asm-x86_64/hardirq.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef __ASM_HARDIRQ_H
-#define __ASM_HARDIRQ_H
-
-#include <xen/config.h>
-#include <xen/irq.h>
-
-/* assembly code in softirq.h is sensitive to the offsets of these fields */
-typedef struct {
- unsigned int __softirq_pending;
- unsigned int __local_irq_count;
- unsigned int __local_bh_count;
- unsigned int __syscall_count;
- unsigned int __nmi_count;
- unsigned long idle_timestamp;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <xen/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-/*
- * Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
- */
-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
-
-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
-
-#ifndef CONFIG_SMP
-
-#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
-#define hardirq_endlock(cpu) do { } while (0)
-
-#define irq_enter(cpu, irq) (local_irq_count(cpu)++)
-#define irq_exit(cpu, irq) (local_irq_count(cpu)--)
-
-#define synchronize_irq() barrier()
-
-#else
-
-#include <asm/atomic.h>
-#include <asm/smp.h>
-
-extern unsigned char global_irq_holder;
-extern unsigned volatile long global_irq_lock; /* long for set_bit -RR */
-
-static inline int irqs_running (void)
-{
- int i;
-
- for (i = 0; i < smp_num_cpus; i++)
- if (local_irq_count(i))
- return 1;
- return 0;
-}
-
-static inline void release_irqlock(int cpu)
-{
- /* if we didn't own the irq lock, just ignore.. */
- if (global_irq_holder == (unsigned char) cpu) {
- global_irq_holder = 0xff;
- clear_bit(0,&global_irq_lock);
- }
-}
-
-static inline void irq_enter(int cpu, int irq)
-{
- ++local_irq_count(cpu);
-
- while (test_bit(0,&global_irq_lock)) {
- cpu_relax();
- }
-}
-
-static inline void irq_exit(int cpu, int irq)
-{
- --local_irq_count(cpu);
-}
-
-static inline int hardirq_trylock(int cpu)
-{
- return !local_irq_count(cpu) && !test_bit(0,&global_irq_lock);
-}
-
-#define hardirq_endlock(cpu) do { } while (0)
-
-extern void synchronize_irq(void);
-
-#endif /* CONFIG_SMP */
-
-#endif /* __ASM_HARDIRQ_H */
diff --git a/xen/include/asm-x86_64/hdreg.h b/xen/include/asm-x86_64/hdreg.h
deleted file mode 100644
index 18561aaed3..0000000000
--- a/xen/include/asm-x86_64/hdreg.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * linux/include/asm-x86_64/hdreg.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-#ifndef __ASMx86_64_HDREG_H
-#define __ASMx86_64_HDREG_H
-
-//typedef unsigned short ide_ioreg_t;
-typedef unsigned long ide_ioreg_t;
-
-#endif /* __ASMx86_64_HDREG_H */
diff --git a/xen/include/asm-x86_64/i387.h b/xen/include/asm-x86_64/i387.h
deleted file mode 100644
index 95a6bb6cde..0000000000
--- a/xen/include/asm-x86_64/i387.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * include/asm-i386/i387.h
- *
- * Copyright (C) 1994 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * General FPU state handling cleanups
- * Gareth Hughes <gareth@valinux.com>, May 2000
- */
-
-#ifndef __ASM_I386_I387_H
-#define __ASM_I386_I387_H
-
-#include <xen/sched.h>
-#include <asm/processor.h>
-
-extern void init_fpu(void);
-extern void save_init_fpu( struct task_struct *tsk );
-extern void restore_fpu( struct task_struct *tsk );
-
-#define unlazy_fpu( tsk ) do { \
- if ( test_bit(PF_USEDFPU, &tsk->flags) ) \
- save_init_fpu( tsk ); \
-} while (0)
-
-#define clear_fpu( tsk ) do { \
- if ( test_and_clear_bit(PF_USEDFPU, &tsk->flags) ) { \
- asm volatile("fwait"); \
- stts(); \
- } \
-} while (0)
-
-#define load_mxcsr( val ) do { \
- unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
- asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \
-} while (0)
-
-#endif /* __ASM_I386_I387_H */
diff --git a/xen/include/asm-x86_64/ide.h b/xen/include/asm-x86_64/ide.h
deleted file mode 100644
index 05de458761..0000000000
--- a/xen/include/asm-x86_64/ide.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * linux/include/asm-x86_64/ide.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/*
- * This file contains the x86_64 architecture specific IDE code.
- */
-
-#ifndef __ASMx86_64_IDE_H
-#define __ASMx86_64_IDE_H
-
-#ifdef __KERNEL__
-
-#include <xen/config.h>
-
-#ifndef MAX_HWIFS
-# ifdef CONFIG_BLK_DEV_IDEPCI
-#define MAX_HWIFS 10
-# else
-#define MAX_HWIFS 6
-# endif
-#endif
-
-static __inline__ int ide_default_irq(ide_ioreg_t base)
-{
- switch (base) {
- case 0x1f0: return 14;
- case 0x170: return 15;
- case 0x1e8: return 11;
- case 0x168: return 10;
- case 0x1e0: return 8;
- case 0x160: return 12;
- default:
- return 0;
- }
-}
-
-static __inline__ ide_ioreg_t ide_default_io_base(int index)
-{
- switch (index) {
- case 0: return 0x1f0;
- case 1: return 0x170;
- case 2: return 0x1e8;
- case 3: return 0x168;
- case 4: return 0x1e0;
- case 5: return 0x160;
- default:
- return 0;
- }
-}
-
-static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port, ide_ioreg_t ctrl_port, int *irq)
-{
- ide_ioreg_t reg = data_port;
- int i;
-
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
- hw->io_ports[i] = reg;
- reg += 1;
- }
- if (ctrl_port) {
- hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
- } else {
- hw->io_ports[IDE_CONTROL_OFFSET] = hw->io_ports[IDE_DATA_OFFSET] + 0x206;
- }
- if (irq != NULL)
- *irq = 0;
- hw->io_ports[IDE_IRQ_OFFSET] = 0;
-}
-
-static __inline__ void ide_init_default_hwifs(void)
-{
-#ifndef CONFIG_BLK_DEV_IDEPCI
- hw_regs_t hw;
- int index;
-
- for(index = 0; index < MAX_HWIFS; index++) {
- memset(&hw, 0, sizeof hw);
- ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
- hw.irq = ide_default_irq(ide_default_io_base(index));
- ide_register_hw(&hw, NULL);
- }
-#endif /* CONFIG_BLK_DEV_IDEPCI */
-}
-
-typedef union {
- unsigned all : 8; /* all of the bits together */
- struct {
- unsigned head : 4; /* always zeros here */
- unsigned unit : 1; /* drive select number, 0 or 1 */
- unsigned bit5 : 1; /* always 1 */
- unsigned lba : 1; /* using LBA instead of CHS */
- unsigned bit7 : 1; /* always 1 */
- } b;
-} select_t;
-
-typedef union {
- unsigned all : 8; /* all of the bits together */
- struct {
- unsigned bit0 : 1;
- unsigned nIEN : 1; /* device INTRQ to host */
- unsigned SRST : 1; /* host soft reset bit */
- unsigned bit3 : 1; /* ATA-2 thingy */
- unsigned reserved456 : 3;
- unsigned HOB : 1; /* 48-bit address ordering */
- } b;
-} control_t;
-
-#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
-#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
-#define ide_check_region(from,extent) check_region((from), (extent))
-#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
-#define ide_release_region(from,extent) release_region((from), (extent))
-
-/*
- * The following are not needed for the non-m68k ports
- */
-#define ide_ack_intr(hwif) (1)
-#define ide_fix_driveid(id) do {} while (0)
-#define ide_release_lock(lock) do {} while (0)
-#define ide_get_lock(lock, hdlr, data) do {} while (0)
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASMx86_64_IDE_H */
diff --git a/xen/include/asm-x86_64/io.h b/xen/include/asm-x86_64/io.h
deleted file mode 100644
index 35ec64235e..0000000000
--- a/xen/include/asm-x86_64/io.h
+++ /dev/null
@@ -1,273 +0,0 @@
-#ifndef _ASM_IO_H
-#define _ASM_IO_H
-
-#include <xen/config.h>
-#include <asm/page.h>
-
-/*
- * This file contains the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl and the "string versions" of the same
- * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
- * versions of the single-IO instructions (inb_p/inw_p/..).
- *
- * This file is not meant to be obfuscating: it's just complicated
- * to (a) handle it all in a way that makes gcc able to optimize it
- * as well as possible and (b) trying to avoid writing the same thing
- * over and over again with slight variations and possibly making a
- * mistake somewhere.
- */
-
-/*
- * Thanks to James van Artsdalen for a better timing-fix than
- * the two short jumps: using outb's to a nonexistent port seems
- * to guarantee better timings even on fast machines.
- *
- * On the other hand, I'd like to be sure of a non-existent port:
- * I feel a bit unsafe about using 0x80 (should be safe, though)
- *
- * Linus
- */
-
- /*
- * Bit simplified and optimized by Jan Hubicka
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
- *
- * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
- * isa_read[wl] and isa_write[wl] fixed
- * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
- */
-
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
-#else
-#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-#endif
-
-#ifdef REALLY_SLOW_IO
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-#else
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
-
-/*
- * Talk about misusing macros..
- */
-#define __OUT1(s,x) \
-extern inline void out##s(unsigned x value, unsigned short port) {
-
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
-
-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
-
-#define __IN1(s) \
-extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
-
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
-
-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-
-#define __INS(s) \
-extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define __OUTS(s) \
-extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define RETURN_TYPE unsigned char
-__IN(b,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned short
-__IN(w,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned int
-__IN(l,"")
-#undef RETURN_TYPE
-
-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
-
-__INS(b)
-__INS(w)
-__INS(l)
-
-__OUTS(b)
-__OUTS(w)
-__OUTS(l)
-
-#define IO_SPACE_LIMIT 0xffff
-
-/*
- * Temporary debugging check to catch old code using
- * unmapped ISA addresses. Will be removed in 2.4.
- */
-#ifdef CONFIG_IO_DEBUG
- extern void *__io_virt_debug(unsigned long x, const char *file, int line);
- extern unsigned long __io_phys_debug(unsigned long x, const char *file, int line);
- #define __io_virt(x) __io_virt_debug((unsigned long)(x), __FILE__, __LINE__)
-//#define __io_phys(x) __io_phys_debug((unsigned long)(x), __FILE__, __LINE__)
-#else
- #define __io_virt(x) ((void *)(x))
-//#define __io_phys(x) __pa(x)
-#endif
-
-/*
- * Change virtual addresses to physical addresses and vv.
- * These are pretty trivial
- */
-extern inline unsigned long virt_to_phys(volatile void * address)
-{
- return __pa(address);
-}
-
-extern inline void * phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-
-/*
- * Change "struct page" to physical address.
- */
-#ifdef CONFIG_DISCONTIGMEM
-#include <asm/mmzone.h>
-#else
-#define page_to_phys(page) (((page) - frame_table) << PAGE_SHIFT)
-#endif
-
-#define page_to_pfn(page) ((unsigned long)((_page) - frame_table))
-#define page_to_virt(page) (phys_to_virt(page_to_phys(_page)))
-
-extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-
-extern inline void * ioremap (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, 0);
-}
-
-/*
- * This one maps high address device memory and turns off caching for that area.
- * it's useful if some control registers are in such an area and write combining
- * or read caching is not desirable:
- */
-extern inline void * ioremap_nocache (unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, _PAGE_PCD);
-}
-
-extern void iounmap(void *addr);
-
-/*
- * IO bus memory addresses are also 1:1 with the physical address
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-#define page_to_bus page_to_phys
-
-/*
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the x86 architecture, we just read/write the
- * memory location directly.
- */
-
-#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
-#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
-#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
-#define readq(addr) (*(volatile unsigned long *) __io_virt(addr))
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_readq readq
-
-#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
-#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
-#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
-#define writeq(b,addr) (*(volatile unsigned long *) __io_virt(addr) = (b))
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-#define __raw_writeq writeq
-
-void *memcpy_fromio(void*,const void*,unsigned);
-void *memcpy_toio(void*,const void*,unsigned);
-
-#define memset_io(a,b,c) memset(__io_virt(a),(b),(c))
-
-/*
- * ISA space is 'always mapped' on a typical x86 system, no need to
- * explicitly ioremap() it. The fact that the ISA IO space is mapped
- * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
- * are physical addresses. The following constant pointer can be
- * used as the IO-area pointer (it can be iounmapped as well, so the
- * analogy with PCI is quite large):
- */
-#define __ISA_IO_base ((char *)(PAGE_OFFSET))
-
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
-#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
-
-
-/*
- * Again, x86-64 does not require mem IO specific function.
- */
-
-#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),__io_virt(__ISA_IO_base + (b)),(c),(d))
-
-static inline int check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-static inline int isa_check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (isa_readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
-#define flush_write_buffers()
-
-#endif
diff --git a/xen/include/asm-x86_64/io_apic.h b/xen/include/asm-x86_64/io_apic.h
deleted file mode 100644
index d5d2e4c439..0000000000
--- a/xen/include/asm-x86_64/io_apic.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef __ASM_IO_APIC_H
-#define __ASM_IO_APIC_H
-
-#include <xen/config.h>
-#include <xen/types.h>
-
-/*
- * Intel IO-APIC support for SMP and UP systems.
- *
- * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
- */
-
-#ifdef CONFIG_X86_IO_APIC
-
-#define APIC_MISMATCH_DEBUG
-
-#define IO_APIC_BASE(idx) \
- ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \
- + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK)))
-
-/*
- * The structure of the IO-APIC:
- */
-struct IO_APIC_reg_00 {
- __u32 __reserved_2 : 24,
- ID : 4,
- __reserved_1 : 4;
-} __attribute__ ((packed));
-
-struct IO_APIC_reg_01 {
- __u32 version : 8,
- __reserved_2 : 7,
- PRQ : 1,
- entries : 8,
- __reserved_1 : 8;
-} __attribute__ ((packed));
-
-struct IO_APIC_reg_02 {
- __u32 __reserved_2 : 24,
- arbitration : 4,
- __reserved_1 : 4;
-} __attribute__ ((packed));
-
-/*
- * # of IO-APICs and # of IRQ routing registers
- */
-extern int nr_ioapics;
-extern int nr_ioapic_registers[MAX_IO_APICS];
-
-enum ioapic_irq_destination_types {
- dest_Fixed = 0,
- dest_LowestPrio = 1,
- dest_SMI = 2,
- dest__reserved_1 = 3,
- dest_NMI = 4,
- dest_INIT = 5,
- dest__reserved_2 = 6,
- dest_ExtINT = 7
-};
-
-struct IO_APIC_route_entry {
- __u32 vector : 8,
- delivery_mode : 3, /* 000: FIXED
- * 001: lowest prio
- * 111: ExtINT
- */
- dest_mode : 1, /* 0: physical, 1: logical */
- delivery_status : 1,
- polarity : 1,
- irr : 1,
- trigger : 1, /* 0: edge, 1: level */
- mask : 1, /* 0: enabled, 1: disabled */
- __reserved_2 : 15;
-
- union { struct { __u32
- __reserved_1 : 24,
- physical_dest : 4,
- __reserved_2 : 4;
- } physical;
-
- struct { __u32
- __reserved_1 : 24,
- logical_dest : 8;
- } logical;
- } dest;
-
-} __attribute__ ((packed));
-
-/*
- * MP-BIOS irq configuration table structures:
- */
-
-/* I/O APIC entries */
-extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-
-/* # of MP IRQ source entries */
-extern int mp_irq_entries;
-
-/* MP IRQ source entries */
-extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-
-/* non-0 if default (table-less) MP configuration */
-extern int mpc_default_type;
-
-static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
-{
- *IO_APIC_BASE(apic) = reg;
- return *(IO_APIC_BASE(apic)+4);
-}
-
-static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-{
- *IO_APIC_BASE(apic) = reg;
- *(IO_APIC_BASE(apic)+4) = value;
-}
-
-/*
- * Re-write a value: to be used for read-modify-write
- * cycles where the read already set up the index register.
- */
-static inline void io_apic_modify(unsigned int apic, unsigned int value)
-{
- *(IO_APIC_BASE(apic)+4) = value;
-}
-
-/*
- * Synchronize the IO-APIC and the CPU by doing
- * a dummy read from the IO-APIC
- */
-static inline void io_apic_sync(unsigned int apic)
-{
- (void) *(IO_APIC_BASE(apic)+4);
-}
-
-/* 1 if "noapic" boot option passed */
-extern int skip_ioapic_setup;
-
-/*
- * If we use the IO-APIC for IRQ routing, disable automatic
- * assignment of PCI IRQ's.
- */
-#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup)
-
-#else /* !CONFIG_X86_IO_APIC */
-#define io_apic_assign_pci_irqs 0
-#endif
-
-#endif
diff --git a/xen/include/asm-x86_64/irq.h b/xen/include/asm-x86_64/irq.h
deleted file mode 100644
index bbb83c2d95..0000000000
--- a/xen/include/asm-x86_64/irq.h
+++ /dev/null
@@ -1,136 +0,0 @@
-#ifndef _ASM_HW_IRQ_H
-#define _ASM_HW_IRQ_H
-
-/* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar */
-
-#include <xen/config.h>
-#include <asm/atomic.h>
-
-#define SA_INTERRUPT 0x20000000
-#define SA_SHIRQ 0x04000000
-#define SA_NOPROFILE 0x02000000
-
-#define SA_SAMPLE_RANDOM 0 /* Linux driver compatibility */
-
-#define TIMER_IRQ 0
-
-extern void disable_irq(unsigned int);
-extern void disable_irq_nosync(unsigned int);
-extern void enable_irq(unsigned int);
-
-/*
- * IDT vectors usable for external interrupt sources start
- * at 0x20:
- */
-#define NR_VECTORS 256
-#define FIRST_EXTERNAL_VECTOR 0x30
-
-#ifdef CONFIG_X86_IO_APIC
-#define NR_IRQS 224
-#else
-#define NR_IRQS 16
-#endif
-
-#define HYPERVISOR_CALL_VECTOR 0x82
-
-/*
- * Vectors 0x30-0x3f are used for ISA interrupts.
- */
-
-/*
- * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
- *
- * some of the following vectors are 'rare', they are merged
- * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
- * TLB, reschedule and local APIC vectors are performance-critical.
- *
- * Vectors 0xf0-0xfa are free (reserved for future Linux use).
- */
-#define SPURIOUS_APIC_VECTOR 0xff
-#define ERROR_APIC_VECTOR 0xfe
-#define INVALIDATE_TLB_VECTOR 0xfd
-#define EVENT_CHECK_VECTOR 0xfc
-#define CALL_FUNCTION_VECTOR 0xfb
-#define KDB_VECTOR 0xfa
-#define TASK_MIGRATION_VECTOR 0xf9
-
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR 0xef
-
-/*
- * First APIC vector available to drivers: (vectors 0x40-0xee)
- * we start at 0x41 to spread out vectors evenly between priority
- * levels. (0x82 is the syscall vector)
- */
-#define FIRST_DEVICE_VECTOR 0x41
-#define FIRST_SYSTEM_VECTOR 0xef
-
-extern int irq_vector[NR_IRQS];
-#define IO_APIC_VECTOR(irq) irq_vector[irq]
-
-/*
- * Various low-level irq details needed by irq.c, process.c,
- * time.c, io_apic.c and smp.c
- *
- * Interrupt entry/exit code at both C and assembly level
- */
-
-extern void mask_irq(unsigned int irq);
-extern void unmask_irq(unsigned int irq);
-extern void disable_8259A_irq(unsigned int irq);
-extern void enable_8259A_irq(unsigned int irq);
-extern int i8259A_irq_pending(unsigned int irq);
-extern void make_8259A_irq(unsigned int irq);
-extern void init_8259A(int aeoi);
-extern void FASTCALL(send_IPI_self(int vector));
-extern void init_VISWS_APIC_irqs(void);
-extern void setup_IO_APIC(void);
-extern void disable_IO_APIC(void);
-extern void print_IO_APIC(void);
-extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-extern void send_IPI(int dest, int vector);
-
-extern unsigned long io_apic_irqs;
-
-extern atomic_t irq_err_count;
-extern atomic_t irq_mis_count;
-
-extern char _stext, _etext;
-
-#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-
-#define __STR(x) #x
-#define STR(x) __STR(x)
-
-#define IRQ_NAME2(nr) nr##_interrupt(void)
-#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
-
-#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
-__asm__( \
-"\n.p2align\n" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "push $"#nr"-256\n\t" \
- "jmp common_interrupt");
-
-extern unsigned long prof_cpu_mask;
-extern unsigned int * prof_buffer;
-extern unsigned long prof_len;
-extern unsigned long prof_shift;
-
-#include <xen/irq.h>
-
-#if defined(CONFIG_X86_IO_APIC)
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
- if (IO_APIC_IRQ(i))
- send_IPI_self(IO_APIC_VECTOR(i));
-}
-#else
-static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
-#endif
-
-#endif /* _ASM_HW_IRQ_H */
diff --git a/xen/include/asm-x86_64/mc146818rtc.h b/xen/include/asm-x86_64/mc146818rtc.h
deleted file mode 100644
index 8758528f7c..0000000000
--- a/xen/include/asm-x86_64/mc146818rtc.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Machine dependent access functions for RTC registers.
- */
-#ifndef _ASM_MC146818RTC_H
-#define _ASM_MC146818RTC_H
-
-#include <asm/io.h>
-#include <xen/spinlock.h>
-
-extern spinlock_t rtc_lock; /* serialize CMOS RAM access */
-
-/**********************************************************************
- * register summary
- **********************************************************************/
-#define RTC_SECONDS 0
-#define RTC_SECONDS_ALARM 1
-#define RTC_MINUTES 2
-#define RTC_MINUTES_ALARM 3
-#define RTC_HOURS 4
-#define RTC_HOURS_ALARM 5
-/* RTC_*_alarm is always true if 2 MSBs are set */
-# define RTC_ALARM_DONT_CARE 0xC0
-
-#define RTC_DAY_OF_WEEK 6
-#define RTC_DAY_OF_MONTH 7
-#define RTC_MONTH 8
-#define RTC_YEAR 9
-
-/* control registers - Moto names
- */
-#define RTC_REG_A 10
-#define RTC_REG_B 11
-#define RTC_REG_C 12
-#define RTC_REG_D 13
-
-/**********************************************************************
- * register details
- **********************************************************************/
-#define RTC_FREQ_SELECT RTC_REG_A
-
-/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus,
- * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete,
- * totalling to a max high interval of 2.228 ms.
- */
-# define RTC_UIP 0x80
-# define RTC_DIV_CTL 0x70
- /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */
-# define RTC_REF_CLCK_4MHZ 0x00
-# define RTC_REF_CLCK_1MHZ 0x10
-# define RTC_REF_CLCK_32KHZ 0x20
- /* 2 values for divider stage reset, others for "testing purposes only" */
-# define RTC_DIV_RESET1 0x60
-# define RTC_DIV_RESET2 0x70
- /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
-# define RTC_RATE_SELECT 0x0F
-
-/**********************************************************************/
-#define RTC_CONTROL RTC_REG_B
-# define RTC_SET 0x80 /* disable updates for clock setting */
-# define RTC_PIE 0x40 /* periodic interrupt enable */
-# define RTC_AIE 0x20 /* alarm interrupt enable */
-# define RTC_UIE 0x10 /* update-finished interrupt enable */
-# define RTC_SQWE 0x08 /* enable square-wave output */
-# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
-# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
-# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
-
-/**********************************************************************/
-#define RTC_INTR_FLAGS RTC_REG_C
-/* caution - cleared by read */
-# define RTC_IRQF 0x80 /* any of the following 3 is active */
-# define RTC_PF 0x40
-# define RTC_AF 0x20
-# define RTC_UF 0x10
-
-/**********************************************************************/
-#define RTC_VALID RTC_REG_D
-# define RTC_VRT 0x80 /* valid RAM and time */
-/**********************************************************************/
-
-/* example: !(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
- * determines if the following two #defines are needed
- */
-#ifndef BCD_TO_BIN
-#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
-#endif
-
-#ifndef BIN_TO_BCD
-#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
-#endif
-
-
-#ifndef RTC_PORT
-#define RTC_PORT(x) (0x70 + (x))
-#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
-#endif
-
-/*
- * The yet supported machines all access the RTC index register via
- * an ISA port access but the way to access the date register differs ...
- */
-#define CMOS_READ(addr) ({ \
-outb_p((addr),RTC_PORT(0)); \
-inb_p(RTC_PORT(1)); \
-})
-#define CMOS_WRITE(val, addr) ({ \
-outb_p((addr),RTC_PORT(0)); \
-outb_p((val),RTC_PORT(1)); \
-})
-
-#define RTC_IRQ 8
-
-#endif /* _ASM_MC146818RTC_H */
diff --git a/xen/include/asm-x86_64/mpspec.h b/xen/include/asm-x86_64/mpspec.h
deleted file mode 100644
index fa5d7aa2df..0000000000
--- a/xen/include/asm-x86_64/mpspec.h
+++ /dev/null
@@ -1,212 +0,0 @@
-#ifndef __ASM_MPSPEC_H
-#define __ASM_MPSPEC_H
-
-
-/*
- * Structure definitions for SMP machines following the
- * Intel Multiprocessing Specification 1.1 and 1.4.
- */
-
-/*
- * This tag identifies where the SMP configuration
- * information is.
- */
-
-#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
-
-/*
- * a maximum of 16 APICs with the current APIC ID architecture.
- * xAPICs can have up to 256. SAPICs have 16 ID bits.
- */
-#ifdef CONFIG_X86_CLUSTERED_APIC
-#define MAX_APICS 256
-#else
-#define MAX_APICS 16
-#endif
-
-#define MAX_MPC_ENTRY 1024
-
-struct intel_mp_floating
-{
- char mpf_signature[4]; /* "_MP_" */
- unsigned int mpf_physptr; /* Configuration table address */
- unsigned char mpf_length; /* Our length (paragraphs) */
- unsigned char mpf_specification;/* Specification version */
- unsigned char mpf_checksum; /* Checksum (makes sum 0) */
- unsigned char mpf_feature1; /* Standard or configuration ? */
- unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
- unsigned char mpf_feature3; /* Unused (0) */
- unsigned char mpf_feature4; /* Unused (0) */
- unsigned char mpf_feature5; /* Unused (0) */
-};
-
-struct mp_config_table
-{
- char mpc_signature[4];
-#define MPC_SIGNATURE "PCMP"
- unsigned short mpc_length; /* Size of table */
- char mpc_spec; /* 0x01 */
- char mpc_checksum;
- char mpc_oem[8];
- char mpc_productid[12];
- unsigned int mpc_oemptr; /* 0 if not present */
- unsigned short mpc_oemsize; /* 0 if not present */
- unsigned short mpc_oemcount;
- unsigned int mpc_lapic; /* APIC address */
- unsigned int reserved;
-};
-
-/* Followed by entries */
-
-#define MP_PROCESSOR 0
-#define MP_BUS 1
-#define MP_IOAPIC 2
-#define MP_INTSRC 3
-#define MP_LINTSRC 4
-#define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */
-
-struct mpc_config_processor
-{
- unsigned char mpc_type;
- unsigned char mpc_apicid; /* Local APIC number */
- unsigned char mpc_apicver; /* Its versions */
- unsigned char mpc_cpuflag;
-#define CPU_ENABLED 1 /* Processor is available */
-#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
- unsigned int mpc_cpufeature;
-#define CPU_STEPPING_MASK 0x0F
-#define CPU_MODEL_MASK 0xF0
-#define CPU_FAMILY_MASK 0xF00
- unsigned int mpc_featureflag; /* CPUID feature value */
- unsigned int mpc_reserved[2];
-};
-
-struct mpc_config_bus
-{
- unsigned char mpc_type;
- unsigned char mpc_busid;
- unsigned char mpc_bustype[6] __attribute((packed));
-};
-
-/* List of Bus Type string values, Intel MP Spec. */
-#define BUSTYPE_EISA "EISA"
-#define BUSTYPE_ISA "ISA"
-#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
-#define BUSTYPE_MCA "MCA"
-#define BUSTYPE_VL "VL" /* Local bus */
-#define BUSTYPE_PCI "PCI"
-#define BUSTYPE_PCMCIA "PCMCIA"
-#define BUSTYPE_CBUS "CBUS"
-#define BUSTYPE_CBUSII "CBUSII"
-#define BUSTYPE_FUTURE "FUTURE"
-#define BUSTYPE_MBI "MBI"
-#define BUSTYPE_MBII "MBII"
-#define BUSTYPE_MPI "MPI"
-#define BUSTYPE_MPSA "MPSA"
-#define BUSTYPE_NUBUS "NUBUS"
-#define BUSTYPE_TC "TC"
-#define BUSTYPE_VME "VME"
-#define BUSTYPE_XPRESS "XPRESS"
-
-struct mpc_config_ioapic
-{
- unsigned char mpc_type;
- unsigned char mpc_apicid;
- unsigned char mpc_apicver;
- unsigned char mpc_flags;
-#define MPC_APIC_USABLE 0x01
- unsigned int mpc_apicaddr;
-};
-
-struct mpc_config_intsrc
-{
- unsigned char mpc_type;
- unsigned char mpc_irqtype;
- unsigned short mpc_irqflag;
- unsigned char mpc_srcbus;
- unsigned char mpc_srcbusirq;
- unsigned char mpc_dstapic;
- unsigned char mpc_dstirq;
-};
-
-enum mp_irq_source_types {
- mp_INT = 0,
- mp_NMI = 1,
- mp_SMI = 2,
- mp_ExtINT = 3
-};
-
-#define MP_IRQDIR_DEFAULT 0
-#define MP_IRQDIR_HIGH 1
-#define MP_IRQDIR_LOW 3
-
-
-struct mpc_config_lintsrc
-{
- unsigned char mpc_type;
- unsigned char mpc_irqtype;
- unsigned short mpc_irqflag;
- unsigned char mpc_srcbusid;
- unsigned char mpc_srcbusirq;
- unsigned char mpc_destapic;
-#define MP_APIC_ALL 0xFF
- unsigned char mpc_destapiclint;
-};
-
-struct mp_config_oemtable
-{
- char oem_signature[4];
-#define MPC_OEM_SIGNATURE "_OEM"
- unsigned short oem_length; /* Size of table */
- char oem_rev; /* 0x01 */
- char oem_checksum;
- char mpc_oem[8];
-};
-
-struct mpc_config_translation
-{
- unsigned char mpc_type;
- unsigned char trans_len;
- unsigned char trans_type;
- unsigned char trans_quad;
- unsigned char trans_global;
- unsigned char trans_local;
- unsigned short trans_reserved;
-};
-
-/*
- * Default configurations
- *
- * 1 2 CPU ISA 82489DX
- * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
- * 3 2 CPU EISA 82489DX
- * 4 2 CPU MCA 82489DX
- * 5 2 CPU ISA+PCI
- * 6 2 CPU EISA+PCI
- * 7 2 CPU MCA+PCI
- */
-
-#define MAX_MP_BUSSES 257
-#define MAX_IRQ_SOURCES (MAX_MP_BUSSES*4)
-enum mp_bustype {
- MP_BUS_ISA = 1,
- MP_BUS_EISA,
- MP_BUS_PCI,
- MP_BUS_MCA
-};
-extern int mp_bus_id_to_type [MAX_MP_BUSSES];
-extern int mp_bus_id_to_node [MAX_MP_BUSSES];
-extern int mp_bus_id_to_local [MAX_MP_BUSSES];
-extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
-extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
-
-extern unsigned int boot_cpu_physical_apicid;
-extern int smp_found_config;
-extern void find_smp_config (void);
-extern void get_smp_config (void);
-extern int apic_version [MAX_APICS];
-extern int mp_current_pci_id;
-extern unsigned long mp_lapic_addr;
-
-#endif
-
diff --git a/xen/include/asm-x86_64/param.h b/xen/include/asm-x86_64/param.h
deleted file mode 100644
index 601733b463..0000000000
--- a/xen/include/asm-x86_64/param.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _ASMx86_64_PARAM_H
-#define _ASMx86_64_PARAM_H
-
-#ifndef HZ
-#define HZ 100
-#endif
-
-#define EXEC_PAGESIZE 4096
-
-#ifndef NGROUPS
-#define NGROUPS 32
-#endif
-
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
-
-#ifdef __KERNEL__
-# define CLOCKS_PER_SEC 100 /* frequency at which times() counts */
-#endif
-
-#endif
diff --git a/xen/include/asm-x86_64/pci.h b/xen/include/asm-x86_64/pci.h
deleted file mode 100644
index 988670995c..0000000000
--- a/xen/include/asm-x86_64/pci.h
+++ /dev/null
@@ -1,336 +0,0 @@
-#ifndef __x8664_PCI_H
-#define __x8664_PCI_H
-
-#include <xen/config.h>
-#include <asm/io.h>
-
-
-/* Can be used to override the logic in pci_scan_bus for skipping
- already-configured bus numbers - to be used for buggy BIOSes
- or architectures with incomplete PCI setup by the loader */
-
-#ifdef CONFIG_PCI
-extern unsigned int pcibios_assign_all_busses(void);
-#else
-#define pcibios_assign_all_busses() 0
-#endif
-
-extern unsigned long pci_mem_start;
-#define PCIBIOS_MIN_IO 0x1000
-#define PCIBIOS_MIN_MEM (pci_mem_start)
-
-void pcibios_set_master(struct pci_dev *dev);
-void pcibios_penalize_isa_irq(int irq);
-struct irq_routing_table *pcibios_get_irq_routing_table(void);
-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
-#include <xen/types.h>
-#include <xen/slab.h>
-#include <asm/scatterlist.h>
-#include <asm/io.h>
-#include <asm/page.h>
-
-struct pci_dev;
-extern int force_mmu;
-
-/* Allocate and map kernel buffer using consistent mode DMA for a device.
- * hwdev should be valid struct pci_dev pointer for PCI devices,
- * NULL for PCI-like buses (ISA, EISA).
- * Returns non-NULL cpu-view pointer to the buffer if successful and
- * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
- * is undefined.
- */
-extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle);
-
-/* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
- *
- * References to the memory and mappings associated with cpu_addr/dma_addr
- * past this call are illegal.
- */
-extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-#ifdef CONFIG_GART_IOMMU
-
-/* Map a single buffer of the indicated size for DMA in streaming mode.
- * The 32-bit bus address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory
- * until either pci_unmap_single or pci_dma_sync_single is performed.
- */
-extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
- size_t size, int direction);
-
-
-void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t addr,
- size_t size, int direction);
-
-/*
- * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
- * to pci_map_single, but takes a struct pfn_info instead of a virtual address
- */
-
-#define pci_map_page(dev,page,offset,size,dir) \
- pci_map_single((dev), page_address(page)+(offset), (size), (dir))
-
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
- dma_addr_t ADDR_NAME;
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
- __u32 LEN_NAME;
-#define pci_unmap_addr(PTR, ADDR_NAME) \
- ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
- (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME) \
- ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
- (((PTR)->LEN_NAME) = (VAL))
-
-static inline void pci_dma_sync_single(struct pci_dev *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-}
-
-static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-}
-
-/* The PCI address space does equal the physical memory
- * address space. The networking and block device layers use
- * this boolean for bounce buffer decisions.
- */
-#define PCI_DMA_BUS_IS_PHYS (0)
-
-
-#else
-static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
- size_t size, int direction)
-{
- dma_addr_t addr;
-
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- addr = virt_to_bus(ptr);
-
- /*
- * This is gross, but what should I do.
- * Unfortunately drivers do not test the return value of this.
- */
- if ((addr+size) & ~hwdev->dma_mask)
- out_of_line_bug();
- return addr;
-}
-
-static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
-}
-
-static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct pfn_info *page,
- unsigned long offset, size_t size, int direction)
-{
- dma_addr_t addr;
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- addr = (page - frame_table) * PAGE_SIZE + offset;
- if ((addr+size) & ~hwdev->dma_mask)
- out_of_line_bug();
- return addr;
-}
-
-/* pci_unmap_{page,single} is a nop so... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-#define pci_unmap_addr(PTR, ADDR_NAME) (0)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define pci_unmap_len(PTR, LEN_NAME) (0)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-
-#define BAD_DMA_ADDRESS (-1UL)
-
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static inline void pci_unmap_sg(struct pci_dev *dev, struct scatterlist *sg,
- int nents, int dir)
-{
- if (dir == PCI_DMA_NONE)
- out_of_line_bug();
-}
-
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scather-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- int i;
-
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
-
- /*
- * temporary 2.4 hack
- */
- for (i = 0; i < nents; i++ ) {
- struct scatterlist *s = &sg[i];
- void *addr = s->address;
- if (addr) {
- if (s->page || s->offset)
- out_of_line_bug();
- } else if (s->page)
- addr = page_address(s->page) + s->offset;
-#if 0
- /* Invalid check, since address==0 is valid. */
- else
- BUG();
-#endif
- s->dma_address = pci_map_single(hwdev, addr, s->length, direction);
- if (unlikely(s->dma_address == BAD_DMA_ADDRESS))
- goto error;
- }
- return nents;
-
- error:
- pci_unmap_sg(hwdev, sg, i, direction);
- return 0;
-}
-
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, the
- * device again owns the buffer.
- */
-static inline void pci_dma_sync_single(struct pci_dev *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
-}
-
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single but for a scatter-gather list,
- * same rules and usage.
- */
-static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
-{
- if (direction == PCI_DMA_NONE)
- out_of_line_bug();
- flush_write_buffers();
-}
-
-#define PCI_DMA_BUS_IS_PHYS 1
-
-#endif
-
-#define pci_unmap_page pci_unmap_single
-
-/* Return whether the given PCI device DMA address mask can
- * be supported properly. For example, if your device can
- * only drive the low 24-bits during PCI bus mastering, then
- * you would pass 0x00ffffff as the mask to this function.
- */
-static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
-
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-
-static __inline__ dma64_addr_t
-pci_dac_page_to_dma(struct pci_dev *pdev, struct pfn_info *page, unsigned long offset, int direction)
-{
- return ((dma64_addr_t) page_to_bus(page) +
- (dma64_addr_t) offset);
-}
-
-static __inline__ struct pfn_info *
-pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- unsigned long poff = (dma_addr >> PAGE_SHIFT);
- return frame_table + poff;
-}
-
-static __inline__ unsigned long
-pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return (dma_addr & ~PAGE_MASK);
-}
-
-static __inline__ void
-pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
- flush_write_buffers();
-}
-
-/* These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
-/* Return the index of the PCI controller for device. */
-static inline int pci_controller_num(struct pci_dev *dev)
-{
- return 0;
-}
-
-#if 0 /* XXX Not in land of Xen XXX */
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-#endif
-
-
-#endif /* __x8664_PCI_H */
diff --git a/xen/include/asm-x86_64/pdb.h b/xen/include/asm-x86_64/pdb.h
deleted file mode 100644
index 35b926eb17..0000000000
--- a/xen/include/asm-x86_64/pdb.h
+++ /dev/null
@@ -1,51 +0,0 @@
-
-/*
- * pervasive debugger
- *
- * alex ho
- * 2004
- * university of cambridge computer laboratory
- */
-
-
-#ifndef __PDB_H__
-#define __PDB_H__
-
-#include <asm/ptrace.h>
-#include <xen/list.h>
-
-extern int pdb_initialized;
-extern int pdb_com_port;
-extern int pdb_high_bit;
-
-extern void initialize_pdb(void);
-
-/* Get/set values from generic debug interface. */
-extern int pdb_set_values(domid_t domain, u_char *buffer,
- unsigned long addr, int length);
-extern int pdb_get_values(domid_t domain, u_char *buffer,
- unsigned long addr, int length);
-
-/* External entry points. */
-extern int pdb_handle_exception(int exceptionVector,
- struct pt_regs *xen_regs);
-extern int pdb_serial_input(u_char c, struct pt_regs *regs);
-extern void pdb_do_debug(dom0_op_t *op);
-
-/* Breakpoints. */
-struct pdb_breakpoint
-{
- struct list_head list;
- unsigned long address;
-};
-extern void pdb_bkpt_add (unsigned long address);
-extern struct pdb_breakpoint* pdb_bkpt_search (unsigned long address);
-extern int pdb_bkpt_remove (unsigned long address);
-
-/* Conversions. */
-extern int hex (char);
-extern char *mem2hex (char *, char *, int);
-extern char *hex2mem (char *, char *, int);
-extern int hexToInt (char **ptr, int *intValue);
-
-#endif /* __PDB_H__ */
diff --git a/xen/include/asm-x86_64/pgalloc.h b/xen/include/asm-x86_64/pgalloc.h
deleted file mode 100644
index 559e33194f..0000000000
--- a/xen/include/asm-x86_64/pgalloc.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef _X86_64_PGALLOC_H
-#define _X86_64_PGALLOC_H
-
-#include <xen/config.h>
-#include <xen/sched.h>
-#include <asm/processor.h>
-#include <asm/fixmap.h>
-
-/* XXX probably should be moved to flushtlb.h */
-
-/*
- * TLB flushing:
- *
- * - flush_tlb() flushes the current mm struct TLBs
- * - flush_tlb_all() flushes all processes TLBs
- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
- */
-
-#ifndef CONFIG_SMP
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb()
-#define flush_tlb_all_pge() __flush_tlb_pge()
-#define local_flush_tlb() __flush_tlb()
-#define flush_tlb_cpu(_cpu) __flush_tlb()
-#define flush_tlb_mask(_mask) __flush_tlb()
-#define try_flush_tlb_mask(_mask) __flush_tlb()
-
-#else
-#include <xen/smp.h>
-
-extern int try_flush_tlb_mask(unsigned long mask);
-extern void flush_tlb_mask(unsigned long mask);
-extern void flush_tlb_all_pge(void);
-
-#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() flush_tlb_mask((1 << smp_num_cpus) - 1)
-#define local_flush_tlb() __flush_tlb()
-#define flush_tlb_cpu(_cpu) flush_tlb_mask(1 << (_cpu))
-
-#endif
-
-#endif /* _X86_64_PGALLOC_H */
diff --git a/xen/include/asm-x86_64/rwlock.h b/xen/include/asm-x86_64/rwlock.h
deleted file mode 100644
index 8920e5829f..0000000000
--- a/xen/include/asm-x86_64/rwlock.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* include/asm-x86_64/rwlock.h
- *
- * Helpers used by both rw spinlocks and rw semaphores.
- *
- * Based in part on code from semaphore.h and
- * spinlock.h Copyright 1996 Linus Torvalds.
- *
- * Copyright 1999 Red Hat, Inc.
- * Copyright 2001,2002 SuSE labs
- *
- * Written by Benjamin LaHaise.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _ASM_X86_64_RWLOCK_H
-#define _ASM_X86_64_RWLOCK_H
-
-#define RW_LOCK_BIAS 0x01000000
-#define RW_LOCK_BIAS_STR "0x01000000"
-
-#define __build_read_lock_ptr(rw, helper) \
- asm volatile(LOCK "subl $1,(%0)\n\t" \
- "js 2f\n" \
- "1:\n" \
- ".section .text.lock,\"ax\"\n" \
- "2:\tcall " helper "\n\t" \
- "jmp 1b\n" \
- ".previous" \
- ::"a" (rw) : "memory")
-
-#define __build_read_lock_const(rw, helper) \
- asm volatile(LOCK "subl $1,%0\n\t" \
- "js 2f\n" \
- "1:\n" \
- ".section .text.lock,\"ax\"\n" \
- "2:\tpushq %%rax\n\t" \
- "leaq %0,%%rax\n\t" \
- "call " helper "\n\t" \
- "popq %%rax\n\t" \
- "jmp 1b\n" \
- ".previous" \
- :"=m" (*(volatile int *)rw) : : "memory")
-
-#define __build_read_lock(rw, helper) do { \
- if (__builtin_constant_p(rw)) \
- __build_read_lock_const(rw, helper); \
- else \
- __build_read_lock_ptr(rw, helper); \
- } while (0)
-
-#define __build_write_lock_ptr(rw, helper) \
- asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
- "jnz 2f\n" \
- "1:\n" \
- ".section .text.lock,\"ax\"\n" \
- "2:\tcall " helper "\n\t" \
- "jmp 1b\n" \
- ".previous" \
- ::"a" (rw) : "memory")
-
-#define __build_write_lock_const(rw, helper) \
- asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
- "jnz 2f\n" \
- "1:\n" \
- ".section .text.lock,\"ax\"\n" \
- "2:\tpushq %%rax\n\t" \
- "leaq %0,%%rax\n\t" \
- "call " helper "\n\t" \
- "popq %%rax\n\t" \
- "jmp 1b\n" \
- ".previous" \
- :"=m" (*(volatile int *)rw) : : "memory")
-
-#define __build_write_lock(rw, helper) do { \
- if (__builtin_constant_p(rw)) \
- __build_write_lock_const(rw, helper); \
- else \
- __build_write_lock_ptr(rw, helper); \
- } while (0)
-
-#endif
diff --git a/xen/include/asm-x86_64/scatterlist.h b/xen/include/asm-x86_64/scatterlist.h
deleted file mode 100644
index 1597d48eb0..0000000000
--- a/xen/include/asm-x86_64/scatterlist.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _X8664_SCATTERLIST_H
-#define _X8664_SCATTERLIST_H
-
-struct scatterlist {
- char * address; /* Location data is to be transferred to, NULL for
- * highmem page */
- struct pfn_info * page; /* Location for highmem page, if any */
- unsigned int offset;/* for highmem, page offset */
-
- dma_addr_t dma_address;
- unsigned int length;
-};
-
-#define ISA_DMA_THRESHOLD (0x00ffffff)
-
-#endif /* !(_I386_SCATTERLIST_H) */
diff --git a/xen/include/asm-x86_64/smp.h b/xen/include/asm-x86_64/smp.h
deleted file mode 100644
index bdc1b40e25..0000000000
--- a/xen/include/asm-x86_64/smp.h
+++ /dev/null
@@ -1,103 +0,0 @@
-#ifndef __ASM_SMP_H
-#define __ASM_SMP_H
-
-#include <xen/config.h>
-#include <asm/ptrace.h>
-
-#ifdef CONFIG_SMP
-#ifndef ASSEMBLY
-#include <asm/pda.h>
-
-/*
- * Private routines/data
- */
-
-extern void smp_alloc_memory(void);
-extern unsigned long phys_cpu_present_map;
-extern unsigned long cpu_online_map;
-extern volatile unsigned long smp_invalidate_needed;
-extern int pic_mode;
-extern void smp_flush_tlb(void);
-extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-extern void smp_invalidate_rcv(void); /* Process an NMI */
-extern void (*mtrr_hook) (void);
-extern void zap_low_mappings (void);
-
-/*
- * On x86 all CPUs are mapped 1:1 to the APIC space.
- * This simplifies scheduling and IPI sending and
- * compresses data structures.
- */
-static inline int cpu_logical_map(int cpu)
-{
- return cpu;
-}
-static inline int cpu_number_map(int cpu)
-{
- return cpu;
-}
-
-/*
- * Some lowlevel functions might want to know about
- * the real APIC ID <-> CPU # mapping.
- */
-#define MAX_APICID 256
-extern volatile int cpu_to_physical_apicid[NR_CPUS];
-extern volatile int physical_apicid_to_cpu[MAX_APICID];
-extern volatile int cpu_to_logical_apicid[NR_CPUS];
-extern volatile int logical_apicid_to_cpu[MAX_APICID];
-
-/*
- * General functions that each host system must provide.
- */
-
-extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial udelay numbers */
-
-/*
- * This function is needed by all SMP systems. It must _always_ be valid
- * from the initial startup. We map APIC_BASE very early in page_setup(),
- * so this is correct in the x86 case.
- */
-
-#define smp_processor_id() read_pda(cpunumber)
-
-#include <asm/fixmap.h>
-#include <asm/apic.h>
-
-static __inline int hard_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned *)(APIC_BASE+APIC_ID));
-}
-
-extern int apic_disabled;
-extern int slow_smp_processor_id(void);
-#define safe_smp_processor_id() \
- (!apic_disabled ? hard_smp_processor_id() : slow_smp_processor_id())
-
-#endif /* !ASSEMBLY */
-
-#define NO_PROC_ID 0xFF /* No processor magic marker */
-
-/*
- * This magic constant controls our willingness to transfer
- * a process across CPUs. Such a transfer incurs misses on the L1
- * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
- * gut feeling is this will vary by board in value. For a board
- * with separate L2 cache it probably depends also on the RSS, and
- * for a board with shared L2 cache it ought to decay fast as other
- * processes are run.
- */
-
-#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
-
-
-
-#endif
-#define INT_DELIVERY_MODE 1 /* logical delivery */
-#define TARGET_CPUS 1
-
-#ifndef CONFIG_SMP
-#define safe_smp_processor_id() 0
-#endif
-#endif
diff --git a/xen/include/asm-x86_64/smpboot.h b/xen/include/asm-x86_64/smpboot.h
deleted file mode 100644
index 4017902c69..0000000000
--- a/xen/include/asm-x86_64/smpboot.h
+++ /dev/null
@@ -1,130 +0,0 @@
-#ifndef __ASM_SMPBOOT_H
-#define __ASM_SMPBOOT_H
-
-/*emum for clustered_apic_mode values*/
-enum{
- CLUSTERED_APIC_NONE = 0,
- CLUSTERED_APIC_XAPIC,
- CLUSTERED_APIC_NUMAQ
-};
-
-#ifdef CONFIG_X86_CLUSTERED_APIC
-extern unsigned int apic_broadcast_id;
-extern unsigned char clustered_apic_mode;
-extern unsigned char esr_disable;
-extern unsigned char int_delivery_mode;
-extern unsigned int int_dest_addr_mode;
-extern int cyclone_setup(char*);
-
-static inline void detect_clustered_apic(char* oem, char* prod)
-{
- /*
- * Can't recognize Summit xAPICs at present, so use the OEM ID.
- */
- if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(prod, "VIGIL SMP", 9)){
- clustered_apic_mode = CLUSTERED_APIC_XAPIC;
- apic_broadcast_id = APIC_BROADCAST_ID_XAPIC;
- int_dest_addr_mode = APIC_DEST_PHYSICAL;
- int_delivery_mode = dest_Fixed;
- esr_disable = 1;
- /*Start cyclone clock*/
- cyclone_setup(0);
- }
- else if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(prod, "RUTHLESS SMP", 9)){
- clustered_apic_mode = CLUSTERED_APIC_XAPIC;
- apic_broadcast_id = APIC_BROADCAST_ID_XAPIC;
- int_dest_addr_mode = APIC_DEST_PHYSICAL;
- int_delivery_mode = dest_Fixed;
- esr_disable = 1;
- /*Start cyclone clock*/
- cyclone_setup(0);
- }
- else if (!strncmp(oem, "IBM NUMA", 8)){
- clustered_apic_mode = CLUSTERED_APIC_NUMAQ;
- apic_broadcast_id = APIC_BROADCAST_ID_APIC;
- int_dest_addr_mode = APIC_DEST_LOGICAL;
- int_delivery_mode = dest_LowestPrio;
- esr_disable = 1;
- }
-}
-#define INT_DEST_ADDR_MODE (int_dest_addr_mode)
-#define INT_DELIVERY_MODE (int_delivery_mode)
-#else /* CONFIG_X86_CLUSTERED_APIC */
-#define apic_broadcast_id (APIC_BROADCAST_ID_APIC)
-#define clustered_apic_mode (CLUSTERED_APIC_NONE)
-#define esr_disable (0)
-#define detect_clustered_apic(x,y)
-#define INT_DEST_ADDR_MODE (APIC_DEST_LOGICAL) /* logical delivery */
-#define INT_DELIVERY_MODE (dest_LowestPrio)
-#endif /* CONFIG_X86_CLUSTERED_APIC */
-#define BAD_APICID 0xFFu
-
-#define TRAMPOLINE_LOW phys_to_virt((clustered_apic_mode == CLUSTERED_APIC_NUMAQ)?0x8:0x467)
-#define TRAMPOLINE_HIGH phys_to_virt((clustered_apic_mode == CLUSTERED_APIC_NUMAQ)?0xa:0x469)
-
-#define boot_cpu_apicid ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ)?boot_cpu_logical_apicid:boot_cpu_physical_apicid)
-
-extern unsigned char raw_phys_apicid[NR_CPUS];
-
-/*
- * How to map from the cpu_present_map
- */
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (clustered_apic_mode == CLUSTERED_APIC_XAPIC)
- return raw_phys_apicid[mps_cpu];
- if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ)
- return (mps_cpu/4)*16 + (1<<(mps_cpu%4));
- return mps_cpu;
-}
-
-static inline unsigned long apicid_to_phys_cpu_present(int apicid)
-{
- if(clustered_apic_mode)
- return 1UL << (((apicid >> 4) << 2) + (apicid & 0x3));
- return 1UL << apicid;
-}
-
-#define physical_to_logical_apicid(phys_apic) ( (1ul << (phys_apic & 0x3)) | (phys_apic & 0xF0u) )
-
-/*
- * Mappings between logical cpu number and logical / physical apicid
- * The first four macros are trivial, but it keeps the abstraction consistent
- */
-extern volatile int logical_apicid_2_cpu[];
-extern volatile int cpu_2_logical_apicid[];
-extern volatile int physical_apicid_2_cpu[];
-extern volatile int cpu_2_physical_apicid[];
-
-#define logical_apicid_to_cpu(apicid) logical_apicid_2_cpu[apicid]
-#define cpu_to_logical_apicid(cpu) cpu_2_logical_apicid[cpu]
-#define physical_apicid_to_cpu(apicid) physical_apicid_2_cpu[apicid]
-#define cpu_to_physical_apicid(cpu) cpu_2_physical_apicid[cpu]
-#ifdef CONFIG_MULTIQUAD /* use logical IDs to bootstrap */
-#define boot_apicid_to_cpu(apicid) logical_apicid_2_cpu[apicid]
-#define cpu_to_boot_apicid(cpu) cpu_2_logical_apicid[cpu]
-#else /* !CONFIG_MULTIQUAD */ /* use physical IDs to bootstrap */
-#define boot_apicid_to_cpu(apicid) physical_apicid_2_cpu[apicid]
-#define cpu_to_boot_apicid(cpu) cpu_2_physical_apicid[cpu]
-#endif /* CONFIG_MULTIQUAD */
-
-#ifdef CONFIG_X86_CLUSTERED_APIC
-static inline int target_cpus(void)
-{
- static int cpu;
- switch(clustered_apic_mode){
- case CLUSTERED_APIC_NUMAQ:
- /* Broadcast intrs to local quad only. */
- return APIC_BROADCAST_ID_APIC;
- case CLUSTERED_APIC_XAPIC:
- /*round robin the interrupts*/
- cpu = (cpu+1)%smp_num_cpus;
- return cpu_to_physical_apicid(cpu);
- default:
- }
- return cpu_online_map;
-}
-#else
-#define target_cpus() (0xFF)
-#endif
-#endif
diff --git a/xen/include/asm-x86_64/softirq.h b/xen/include/asm-x86_64/softirq.h
deleted file mode 100644
index 292baac6ea..0000000000
--- a/xen/include/asm-x86_64/softirq.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __ASM_SOFTIRQ_H
-#define __ASM_SOFTIRQ_H
-
-#include <asm/atomic.h>
-#include <asm/hardirq.h>
-
-#define cpu_bh_enable(cpu) \
- do { barrier(); local_bh_count(cpu)--; } while (0)
-#define cpu_bh_disable(cpu) \
- do { local_bh_count(cpu)++; barrier(); } while (0)
-
-#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-
-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
-
-#endif /* __ASM_SOFTIRQ_H */
diff --git a/xen/include/asm-x86_64/spinlock.h b/xen/include/asm-x86_64/spinlock.h
deleted file mode 100644
index 50e7ffec43..0000000000
--- a/xen/include/asm-x86_64/spinlock.h
+++ /dev/null
@@ -1,174 +0,0 @@
-#ifndef __ASM_SPINLOCK_H
-#define __ASM_SPINLOCK_H
-
-#include <xen/config.h>
-#include <xen/lib.h>
-#include <asm/atomic.h>
-#include <asm/rwlock.h>
-
-#if 0
-#define SPINLOCK_DEBUG 1
-#else
-#define SPINLOCK_DEBUG 0
-#endif
-
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
- volatile unsigned int lock;
-#if SPINLOCK_DEBUG
- unsigned magic;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#if SPINLOCK_DEBUG
-#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
- * Simple spin lock operations. There are two variants, one clears IRQ's
- * on the local processor, one does not.
- *
- * We make no fairness assumptions. They have a cost.
- */
-
-#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
-#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
-
-#define spin_lock_string \
- "\n1:\t" \
- "lock ; decb %0\n\t" \
- "js 2f\n" \
- ".section .text.lock,\"ax\"\n" \
- "2:\t" \
- "cmpb $0,%0\n\t" \
- "rep;nop\n\t" \
- "jle 2b\n\t" \
- "jmp 1b\n" \
- ".previous"
-
-/*
- * This works. Despite all the confusion.
- */
-#define spin_unlock_string \
- "movb $1,%0"
-
-static inline int spin_trylock(spinlock_t *lock)
-{
- char oldval;
- __asm__ __volatile__(
- "xchgb %b0,%1"
- :"=q" (oldval), "=m" (lock->lock)
- :"0" (0) : "memory");
- return oldval > 0;
-}
-
-static inline void spin_lock(spinlock_t *lock)
-{
-#if SPINLOCK_DEBUG
- __label__ here;
-here:
- if (lock->magic != SPINLOCK_MAGIC) {
-printk("eip: %p\n", &&here);
- BUG();
- }
-#endif
- __asm__ __volatile__(
- spin_lock_string
- :"=m" (lock->lock) : : "memory");
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
-#if SPINLOCK_DEBUG
- if (lock->magic != SPINLOCK_MAGIC)
- BUG();
- if (!spin_is_locked(lock))
- BUG();
-#endif
- __asm__ __volatile__(
- spin_unlock_string
- :"=m" (lock->lock) : : "memory");
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts
- * but no interrupt writers. For those circumstances we
- * can "mix" irq-safe locks - any writer needs to get a
- * irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- */
-typedef struct {
- volatile unsigned int lock;
-#if SPINLOCK_DEBUG
- unsigned magic;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
-#if SPINLOCK_DEBUG
-#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-/*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
- *
- * The inline assembly is non-obvious. Think about it.
- *
- * Changed to use the same technique as rw semaphores. See
- * semaphore.h for details. -ben
- */
-/* the spinlock helpers are in arch/x86_64/kernel/semaphore.S */
-
-static inline void read_lock(rwlock_t *rw)
-{
-#if SPINLOCK_DEBUG
- if (rw->magic != RWLOCK_MAGIC)
- BUG();
-#endif
- __build_read_lock(rw, "__read_lock_failed");
-}
-
-static inline void write_lock(rwlock_t *rw)
-{
-#if SPINLOCK_DEBUG
- if (rw->magic != RWLOCK_MAGIC)
- BUG();
-#endif
- __build_write_lock(rw, "__write_lock_failed");
-}
-
-#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-
-static inline int write_trylock(rwlock_t *lock)
-{
- atomic_t *count = (atomic_t *)lock;
- if (atomic_sub_and_test(RW_LOCK_BIAS, count))
- return 1;
- atomic_add(RW_LOCK_BIAS, count);
- return 0;
-}
-
-#endif /* __ASM_SPINLOCK_H */
diff --git a/xen/include/asm-x86_64/string.h b/xen/include/asm-x86_64/string.h
deleted file mode 100644
index 875e0e2747..0000000000
--- a/xen/include/asm-x86_64/string.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _X86_64_STRING_H_
-#define _X86_64_STRING_H_
-
-#ifdef __KERNEL__
-
-/* Written 2002 by Andi Kleen */
-
-/* Only used for special circumstances. Stolen from i386/string.h */
-static inline void * __inline_memcpy(void * to, const void * from, size_t n)
-{
-unsigned long d0, d1, d2;
-__asm__ __volatile__(
- "rep ; movsl\n\t"
- "testb $2,%b4\n\t"
- "je 1f\n\t"
- "movsw\n"
- "1:\ttestb $1,%b4\n\t"
- "je 2f\n\t"
- "movsb\n"
- "2:"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
- : "memory");
-return (to);
-}
-
-/* Even with __builtin_ the compiler may decide to use the out of line
- function. */
-
-#define __HAVE_ARCH_MEMCPY 1
-extern void *__memcpy(void *to, const void *from, size_t len);
-#define memcpy(dst,src,len) \
- ({ size_t __len = (len); \
- void *__ret; \
- if (__builtin_constant_p(len) && __len >= 64) \
- __ret = __memcpy((dst),(src),__len); \
- else \
- __ret = __builtin_memcpy((dst),(src),__len); \
- __ret; })
-
-
-#define __HAVE_ARCH_MEMSET
-#define memset __builtin_memset
-
-#define __HAVE_ARCH_MEMMOVE
-void * memmove(void * dest,const void *src,size_t count);
-
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/xen/include/asm-x86_64/system.h b/xen/include/asm-x86_64/system.h
deleted file mode 100644
index b6b6172381..0000000000
--- a/xen/include/asm-x86_64/system.h
+++ /dev/null
@@ -1,220 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <xen/config.h>
-#include <asm/bitops.h>
-
-/* Clear and set 'TS' bit respectively */
-#define clts() __asm__ __volatile__ ("clts")
-#define stts() write_cr0(X86_CR0_TS|read_cr0())
-
-#define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory");
-
-static inline unsigned long get_limit(unsigned long segment)
-{
- unsigned long __limit;
- __asm__("lsll %1,%0"
- :"=r" (__limit):"r" (segment));
- return __limit+1;
-}
-
-#define nop() __asm__ __volatile__ ("nop")
-
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
-#define __xg(x) ((volatile long *)(x))
-
-extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
-{
- *ptr = val;
-}
-
-#define _set_64bit set_64bit
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %k0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 8:
- __asm__ __volatile__("xchgq %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
-
-/*
- * This function causes longword _o to be changed to _n at location _p.
- * If this access causes a fault then we return 1, otherwise we return 0.
- * If no fault occurs then _o is updated to teh value we saw at _p. If this
- * is the same as the initial value of _o then _n is written to location _p.
- */
-#define cmpxchg_user(_p,_o,_n) \
-({ \
- int _rc; \
- __asm__ __volatile__ ( \
- "1: " LOCK_PREFIX "cmpxchgq %2,%3\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl $1,%1\n" \
- " jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=a" (_o), "=r" (_rc) \
- : "q" (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
- : "memory"); \
- _rc; \
-})
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#endif
-
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- */
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence":::"memory")
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
-
-/* interrupt control.. */
-#define __save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
-#define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
-#define __cli() __asm__ __volatile__("cli": : :"memory")
-#define __sti() __asm__ __volatile__("sti": : :"memory")
-/* used in the idle loop; sti takes one instruction cycle to complete */
-#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
-
-/* For spinlocks etc */
-#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
-#define local_irq_set(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_set \n\t pushfq ; popq %0 ; sti":"=g" (x): /* no input */ :"memory"); } while (0)
-#define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
-
-#ifdef CONFIG_SMP
-
-extern void __global_cli(void);
-extern void __global_sti(void);
-extern unsigned long __global_save_flags(void);
-extern void __global_restore_flags(unsigned long);
-#define cli() __global_cli()
-#define sti() __global_sti()
-#define save_flags(x) ((x)=__global_save_flags())
-#define restore_flags(x) __global_restore_flags(x)
-
-#else
-
-#define cli() __cli()
-#define sti() __sti()
-#define save_flags(x) __save_flags(x)
-#define restore_flags(x) __restore_flags(x)
-
-#endif
-
-/* Default simics "magic" breakpoint */
-#define icebp() asm volatile("xchg %%bx,%%bx" ::: "ebx")
-
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
-#endif
diff --git a/xen/include/asm-x86_64/time.h b/xen/include/asm-x86_64/time.h
deleted file mode 100644
index 40145ddb0f..0000000000
--- a/xen/include/asm-x86_64/time.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
- ****************************************************************************
- * (C) 2002 - Rolf Neugebauer - Intel Research Cambridge
- ****************************************************************************
- *
- * File: time.h
- * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
- *
- * Environment: Xen Hypervisor
- * Description: Architecture dependent definition of time variables
- */
-
-#ifndef _ASM_TIME_H_
-#define _ASM_TIME_H_
-
-#include <asm/types.h>
-#include <asm/msr.h>
-
-typedef s64 s_time_t; /* system time */
-
-extern int using_apic_timer;
-
-#endif /* _ASM_TIME_H_ */
diff --git a/xen/include/asm-x86_64/timex.h b/xen/include/asm-x86_64/timex.h
deleted file mode 100644
index 7b6835763a..0000000000
--- a/xen/include/asm-x86_64/timex.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * linux/include/asm-x8664/timex.h
- *
- * x8664 architecture timex specifications
- */
-#ifndef _ASMx8664_TIMEX_H
-#define _ASMx8664_TIMEX_H
-
-#include <xen/config.h>
-#include <asm/msr.h>
-
-#define CLOCK_TICK_RATE (vxtime_hz)
-#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
- 1000000 / CLOCK_TICK_RATE) << (SHIFT_SCALE - SHIFT_HZ)) / HZ)
-
-/*
- * We only use the low 32 bits, and we'd simply better make sure
- * that we reschedule before that wraps. Scheduling at least every
- * four billion cycles just basically sounds like a good idea,
- * regardless of how fast the machine is.
- */
-typedef unsigned long long cycles_t;
-
-extern cycles_t cacheflush_time;
-
-static inline cycles_t get_cycles (void)
-{
- unsigned long long ret;
- rdtscll(ret);
- return ret;
-}
-
-extern unsigned int cpu_khz;
-
-/*
- * Documentation on HPET can be found at:
- * http://www.intel.com/ial/home/sp/pcmmspec.htm
- * ftp://download.intel.com/ial/home/sp/mmts098.pdf
- */
-
-#define HPET_ID 0x000
-#define HPET_PERIOD 0x004
-#define HPET_CFG 0x010
-#define HPET_STATUS 0x020
-#define HPET_COUNTER 0x0f0
-#define HPET_T0_CFG 0x100
-#define HPET_T0_CMP 0x108
-#define HPET_T0_ROUTE 0x110
-
-#define HPET_ID_VENDOR 0xffff0000
-#define HPET_ID_LEGSUP 0x00008000
-#define HPET_ID_NUMBER 0x00000f00
-#define HPET_ID_REV 0x000000ff
-
-#define HPET_CFG_ENABLE 0x001
-#define HPET_CFG_LEGACY 0x002
-
-#define HPET_T0_ENABLE 0x004
-#define HPET_T0_PERIODIC 0x008
-#define HPET_T0_SETVAL 0x040
-#define HPET_T0_32BIT 0x100
-
-/*extern struct vxtime_data vxtime; */
-extern unsigned long vxtime_hz;
-extern unsigned long hpet_address;
-
-#endif
diff --git a/xen/include/asm-x86_64/types.h b/xen/include/asm-x86_64/types.h
deleted file mode 100644
index 25a78f28c6..0000000000
--- a/xen/include/asm-x86_64/types.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _X86_64_TYPES_H
-#define _X86_64_TYPES_H
-
-typedef unsigned short umode_t;
-
-typedef unsigned long size_t;
-
-/*
- * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
- * header files exported to user space
- */
-
-typedef __signed__ char __s8;
-typedef unsigned char __u8;
-
-typedef __signed__ short __s16;
-typedef unsigned short __u16;
-
-typedef __signed__ int __s32;
-typedef unsigned int __u32;
-
-typedef __signed__ long long __s64;
-typedef unsigned long long __u64;
-
-#include <xen/config.h>
-
-typedef signed char s8;
-typedef unsigned char u8;
-
-typedef signed short s16;
-typedef unsigned short u16;
-
-typedef signed int s32;
-typedef unsigned int u32;
-
-typedef signed long long s64;
-typedef unsigned long long u64;
-
-#define BITS_PER_LONG 64
-
-typedef u64 dma64_addr_t;
-typedef u64 dma_addr_t;
-
-#endif
diff --git a/xen/include/asm-x86_64/unaligned.h b/xen/include/asm-x86_64/unaligned.h
deleted file mode 100644
index d4bf78dc6f..0000000000
--- a/xen/include/asm-x86_64/unaligned.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __X8664_UNALIGNED_H
-#define __X8664_UNALIGNED_H
-
-/*
- * The x86-64 can do unaligned accesses itself.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
- */
-
-/**
- * get_unaligned - get value from possibly mis-aligned location
- * @ptr: pointer to value
- *
- * This macro should be used for accessing values larger in size than
- * single bytes at locations that are expected to be improperly aligned,
- * e.g. retrieving a u16 value from a location not u16-aligned.
- *
- * Note that unaligned accesses can be very expensive on some architectures.
- */
-#define get_unaligned(ptr) (*(ptr))
-
-/**
- * put_unaligned - put value to a possibly mis-aligned location
- * @val: value to place
- * @ptr: pointer to location
- *
- * This macro should be used for placing values larger in size than
- * single bytes at locations that are expected to be improperly aligned,
- * e.g. writing a u16 value to a location not u16-aligned.
- *
- * Note that unaligned accesses can be very expensive on some architectures.
- */
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
-
-#endif
diff --git a/xen/include/hypervisor-ifs/arch-i386/hypervisor-if.h b/xen/include/hypervisor-ifs/arch-x86/hypervisor-if.h
index 80055a5062..80055a5062 100644
--- a/xen/include/hypervisor-ifs/arch-i386/hypervisor-if.h
+++ b/xen/include/hypervisor-ifs/arch-x86/hypervisor-if.h
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 5202c60d4c..387af5865c 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -9,7 +9,6 @@
#include <xen/sched.h>
#include <asm/processor.h>
-#include <asm/pgalloc.h>
#include <asm/atomic.h>
#include <asm/desc.h>
#include <asm/flushtlb.h>