aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--BitKeeper/etc/logging_ok1
-rw-r--r--xen/arch/x86/i8259.c2
-rw-r--r--xen/include/asm-x86/system.h17
-rw-r--r--xen/include/asm-x86/x86_64/uaccess.h38
-rw-r--r--xen/include/hypervisor-ifs/arch-x86_64.h6
5 files changed, 36 insertions, 28 deletions
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index be1a4e4e40..35d2bada8d 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -21,6 +21,7 @@ jws22@gauntlet.cl.cam.ac.uk
jws@cairnwell.research
kaf24@freefall.cl.cam.ac.uk
kaf24@labyrinth.cl.cam.ac.uk
+kaf24@penguin.local
kaf24@plym.cl.cam.ac.uk
kaf24@scramble.cl.cam.ac.uk
kaf24@striker.cl.cam.ac.uk
diff --git a/xen/arch/x86/i8259.c b/xen/arch/x86/i8259.c
index 2067b628e9..a4202303c2 100644
--- a/xen/arch/x86/i8259.c
+++ b/xen/arch/x86/i8259.c
@@ -100,7 +100,7 @@ BUILD_SMP_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
- void (*interrupt[NR_IRQS])(void) = {
+ void *interrupt[NR_IRQS] = {
IRQLIST_16(0x0),
#ifdef CONFIG_X86_IO_APIC
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 4835b6e236..e2c961e360 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -123,6 +123,7 @@ static always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long o
* If no fault occurs then _o is updated to the value we saw at _p. If this
* is the same as the initial value of _o then _n is written to location _p.
*/
+#ifdef __i386__
#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
__asm__ __volatile__ ( \
"1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
@@ -138,7 +139,6 @@ static always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long o
: "=a" (_o), "=r" (_rc) \
: _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
: "memory");
-#ifdef __i386__
#define cmpxchg_user(_p,_o,_n) \
({ \
int _rc; \
@@ -156,6 +156,21 @@ static always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long o
_rc; \
})
#else
+#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
+ __asm__ __volatile__ ( \
+ "1: " LOCK_PREFIX "cmpxchg"_isuff" %"_oppre"2,%3\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $1,%1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 1b,3b\n" \
+ ".previous" \
+ : "=a" (_o), "=r" (_rc) \
+ : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
+ : "memory");
#define cmpxchg_user(_p,_o,_n) \
({ \
int _rc; \
diff --git a/xen/include/asm-x86/x86_64/uaccess.h b/xen/include/asm-x86/x86_64/uaccess.h
index 29522716d5..be49ff870b 100644
--- a/xen/include/asm-x86/x86_64/uaccess.h
+++ b/xen/include/asm-x86/x86_64/uaccess.h
@@ -4,36 +4,22 @@
/*
* User space memory access functions
*/
-#include <linux/config.h>
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/prefetch.h>
+#include <xen/config.h>
+#include <xen/compiler.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/prefetch.h>
#include <asm/page.h>
+/* No user-pointer checking. */
+#define __user
+#define __force
+#define __chk_user_ptr(_p) ((void)0)
+
#define VERIFY_READ 0
#define VERIFY_WRITE 1
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
-#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
-
-#define get_ds() (KERNEL_DS)
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
-#define segment_eq(a,b) ((a).seg == (b).seg)
-
-#define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
+#define __addr_ok(addr) ((unsigned long)(addr) < HYPERVISOR_VIRT_START)
/*
* Uhhuh, this needs 65-bit arithmetic. We have a carry..
@@ -44,7 +30,7 @@
asm("# range_ok\n\r" \
"addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
:"=&r" (flag), "=r" (sum) \
- :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
+ :"1" (addr),"g" ((long)(size)),"r" (HYPERVISOR_VIRT_START)); \
flag; })
#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
diff --git a/xen/include/hypervisor-ifs/arch-x86_64.h b/xen/include/hypervisor-ifs/arch-x86_64.h
index 7ac8cf8a73..8eab08f669 100644
--- a/xen/include/hypervisor-ifs/arch-x86_64.h
+++ b/xen/include/hypervisor-ifs/arch-x86_64.h
@@ -123,6 +123,12 @@ typedef struct {
unsigned long failsafe_callback_eip;
} PACKED full_execution_context_t;
+typedef struct {
+ u64 mfn_to_pfn_start; /* MFN of start of m2p table */
+ u64 pfn_to_mfn_frame_list; /* MFN of a table of MFNs that
+ make up p2m table */
+} PACKED arch_shared_info_t;
+
#endif /* !__ASSEMBLY__ */
#endif /* __HYPERVISOR_IF_H__ */