aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S21
-rw-r--r--xen/arch/x86/pdb-stub.c10
-rw-r--r--xen/arch/x86/traps.c21
-rw-r--r--xen/arch/x86/x86_32/entry.S175
-rw-r--r--xen/arch/x86/x86_32/seg_fixup.c2
-rw-r--r--xen/include/asm-x86/debugger.h3
-rw-r--r--xen/include/asm-x86/x86_32/asm_defns.h36
-rw-r--r--xen/include/asm-x86/x86_32/regs.h6
-rw-r--r--xen/include/public/xen.h1
9 files changed, 193 insertions, 82 deletions
diff --git a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
index 1d19d79864..a3189508d5 100644
--- a/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
+++ b/linux-2.6.9-xen-sparse/arch/xen/i386/kernel/entry.S
@@ -309,6 +309,8 @@ syscall_exit:
testw $_TIF_ALLWORK_MASK, %cx # current->work
jne syscall_exit_work
restore_all:
+ testl $VM_MASK, EFLAGS(%esp)
+ jnz resume_vm86
movb EVENT_MASK(%esp), %al
notb %al # %al == ~saved_mask
andb evtchn_upcall_mask(%esi),%al
@@ -316,6 +318,13 @@ restore_all:
jnz restore_all_enable_events # != 0 => reenable event delivery
RESTORE_ALL
+resume_vm86:
+ RESTORE_REGS
+ pushl %eax,(%esp)
+ movl $__HYPERVISOR_switch_vm86,%eax
+ int $0x82 # Atomically enables events delivery
+ ud2
+
# perform work that needs to be done immediately before resumption
ALIGN
work_pending:
@@ -546,7 +555,8 @@ ENTRY(failsafe_callback)
2: popl %es
3: popl %fs
4: popl %gs
-5: iret
+ SAVE_ALL
+ jmp ret_from_exception
.section .fixup,"ax"; \
6: movl $0,(%esp); \
jmp 1b; \
@@ -556,12 +566,6 @@ ENTRY(failsafe_callback)
jmp 3b; \
9: movl $0,(%esp); \
jmp 4b; \
-10: pushl %ss; \
- popl %ds; \
- pushl %ss; \
- popl %es; \
- pushl $11; \
- call do_exit; \
.previous; \
.section __ex_table,"a";\
.align 4; \
@@ -569,7 +573,6 @@ ENTRY(failsafe_callback)
.long 2b,7b; \
.long 3b,8b; \
.long 4b,9b; \
- .long 5b,10b; \
.previous
ENTRY(coprocessor_error)
@@ -887,7 +890,7 @@ ENTRY(sys_call_table)
.long sys_iopl /* 110 */
.long sys_vhangup
.long sys_ni_syscall /* old "idle" system call */
- .long sys_ni_syscall /* disable sys_vm86old */
+ .long sys_vm86old
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
diff --git a/xen/arch/x86/pdb-stub.c b/xen/arch/x86/pdb-stub.c
index e301bf387a..88aa373122 100644
--- a/xen/arch/x86/pdb-stub.c
+++ b/xen/arch/x86/pdb-stub.c
@@ -1083,13 +1083,17 @@ int pdb_handle_exception(int exceptionVector,
int watchdog_save;
unsigned long cr3 = read_cr3();
+ /* No vm86 handling here as yet. */
+ if ( VM86_MODE(xen_regs) )
+ return 1;
+
/* If the exception is an int3 from user space then pdb is only
interested if it re-wrote an instruction set the breakpoint.
This occurs when leaving a system call from a domain.
*/
- if ( exceptionVector == 3 &&
- (xen_regs->cs & 3) == 3 &&
- xen_regs->eip != pdb_system_call_next_addr + 1)
+ if ( (exceptionVector == 3) &&
+ RING_3(xen_regs) &&
+ (xen_regs->eip != (pdb_system_call_next_addr + 1)) )
{
TRC(printf("pdb: user bkpt (0x%x) at 0x%x:0x%lx:0x%x\n",
exceptionVector, xen_regs->cs & 3, cr3, xen_regs->eip));
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index f07740220b..842de24582 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -54,6 +54,8 @@
#if defined(__i386__)
+#define GUEST_FAULT(_r) (likely(VM86_MODE(_r) || !RING_0(_r)))
+
#define DOUBLEFAULT_STACK_SIZE 1024
static struct tss_struct doublefault_tss;
static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE];
@@ -164,7 +166,7 @@ void show_registers(struct xen_regs *regs)
unsigned long esp;
unsigned short ss, ds, es, fs, gs;
- if ( regs->cs & 3 )
+ if ( GUEST_FAULT(regs) )
{
esp = regs->esp;
ss = regs->ss & 0xffff;
@@ -247,7 +249,7 @@ static inline int do_trap(int trapnr, char *str,
DEBUGGER_trap_entry(trapnr, regs);
- if ( !(regs->cs & 3) )
+ if ( !GUEST_FAULT(regs) )
goto xen_fault;
ti = current->thread.traps + trapnr;
@@ -313,7 +315,7 @@ asmlinkage int do_int3(struct xen_regs *regs)
DEBUGGER_trap_entry(TRAP_int3, regs);
- if ( unlikely((regs->cs & 3) == 0) )
+ if ( !GUEST_FAULT(regs) )
{
DEBUGGER_trap_fatal(TRAP_int3, regs);
show_registers(regs);
@@ -419,7 +421,7 @@ asmlinkage int do_page_fault(struct xen_regs *regs)
return EXCRET_fault_fixed; /* successfully copied the mapping */
}
- if ( unlikely(!(regs->cs & 3)) )
+ if ( !GUEST_FAULT(regs) )
goto xen_fault;
ti = d->thread.traps + 14;
@@ -479,8 +481,10 @@ asmlinkage int do_general_protection(struct xen_regs *regs)
DEBUGGER_trap_entry(TRAP_gp_fault, regs);
- /* Badness if error in ring 0, or result of an interrupt. */
- if ( !(regs->cs & 3) || (regs->error_code & 1) )
+ if ( regs->error_code & 1 )
+ goto hardware_gp;
+
+ if ( !GUEST_FAULT(regs) )
goto gp_in_kernel;
/*
@@ -507,7 +511,7 @@ asmlinkage int do_general_protection(struct xen_regs *regs)
{
/* This fault must be due to <INT n> instruction. */
ti = current->thread.traps + (regs->error_code>>3);
- if ( TI_GET_DPL(ti) >= (regs->cs & 3) )
+ if ( TI_GET_DPL(ti) >= (VM86_MODE(regs) ? 3 : (regs->cs & 3)) )
{
tb->flags = TBF_EXCEPTION;
regs->eip += 2;
@@ -545,6 +549,7 @@ asmlinkage int do_general_protection(struct xen_regs *regs)
DEBUGGER_trap_fatal(TRAP_gp_fault, regs);
+ hardware_gp:
show_registers(regs);
panic("CPU%d GENERAL PROTECTION FAULT\n[error_code=%04x]\n",
smp_processor_id(), regs->error_code);
@@ -641,7 +646,7 @@ asmlinkage int do_debug(struct xen_regs *regs)
goto out;
}
- if ( (regs->cs & 3) == 0 )
+ if ( !GUEST_FAULT(regs) )
{
/* Clear TF just for absolute sanity. */
regs->eflags &= ~EF_TF;
diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S
index 32915e4a05..4bfcb945d3 100644
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -73,10 +73,13 @@ ENTRY(continue_nonidle_task)
restore_all_guest:
testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
jnz failsafe_callback
+ testl $X86_EFLAGS_VM,XREGS_eflags(%esp)
+ jnz restore_all_vm86
FLT1: movl XREGS_ds(%esp),%ds
FLT2: movl XREGS_es(%esp),%es
FLT3: movl XREGS_fs(%esp),%fs
FLT4: movl XREGS_gs(%esp),%gs
+restore_all_vm86:
popl %ebx
popl %ecx
popl %edx
@@ -218,10 +221,11 @@ process_softirqs:
/* {EIP, CS, EFLAGS, [ESP, SS]} */
/* %edx == trap_bounce, %ebx == task_struct */
/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
-create_bounce_frame:
+create_bounce_frame:
+ movl XREGS_eflags+4(%esp),%ecx
movb XREGS_cs+4(%esp),%cl
- testb $2,%cl
- jz 1f /* jump if returning to an existing ring-1 activation */
+ testl $(2|X86_EFLAGS_VM),%ecx
+ jz ring1 /* jump if returning to an existing ring-1 activation */
/* obtain ss/esp from TSS -- no current ring-1 activations */
movl DOMAIN_processor(%ebx),%eax
/* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
@@ -232,48 +236,73 @@ create_bounce_frame:
addl $init_tss + 12,%eax
movl (%eax),%esi /* tss->esp1 */
FLT7: movl 4(%eax),%gs /* tss->ss1 */
- /* base of stack frame must contain ss/esp (inter-priv iret) */
- subl $8,%esi
+ testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
+ jz nvm86_1
+ subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
+ movl XREGS_es+4(%esp),%eax
+FLT8: movl %eax,%gs:(%esi)
+ movl XREGS_ds+4(%esp),%eax
+FLT9: movl %eax,%gs:4(%esi)
+ movl XREGS_fs+4(%esp),%eax
+FLT10: movl %eax,%gs:8(%esi)
+ movl XREGS_gs+4(%esp),%eax
+FLT11: movl %eax,%gs:12(%esi)
+nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */
movl XREGS_esp+4(%esp),%eax
-FLT8: movl %eax,%gs:(%esi)
+FLT12: movl %eax,%gs:(%esi)
movl XREGS_ss+4(%esp),%eax
-FLT9: movl %eax,%gs:4(%esi)
- jmp 2f
-1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
+FLT13: movl %eax,%gs:4(%esi)
+ jmp 1f
+ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
movl XREGS_esp+4(%esp),%esi
-FLT10: movl XREGS_ss+4(%esp),%gs
-2: /* Construct a stack frame: EFLAGS, CS/EIP */
+FLT14: movl XREGS_ss+4(%esp),%gs
+1: /* Construct a stack frame: EFLAGS, CS/EIP */
subl $12,%esi
movl XREGS_eip+4(%esp),%eax
-FLT11: movl %eax,%gs:(%esi)
+FLT15: movl %eax,%gs:(%esi)
movl XREGS_cs+4(%esp),%eax
-FLT12: movl %eax,%gs:4(%esi)
+FLT16: movl %eax,%gs:4(%esi)
movl XREGS_eflags+4(%esp),%eax
-FLT13: movl %eax,%gs:8(%esi)
+FLT17: movl %eax,%gs:8(%esi)
movb TRAPBOUNCE_flags(%edx),%cl
test $TBF_EXCEPTION_ERRCODE,%cl
jz 1f
subl $4,%esi # push error_code onto guest frame
movl TRAPBOUNCE_error_code(%edx),%eax
-FLT14: movl %eax,%gs:(%esi)
+FLT18: movl %eax,%gs:(%esi)
testb $TBF_EXCEPTION_CR2,%cl
jz 2f
subl $4,%esi # push %cr2 onto guest frame
movl TRAPBOUNCE_cr2(%edx),%eax
-FLT15: movl %eax,%gs:(%esi)
+FLT19: movl %eax,%gs:(%esi)
1: testb $TBF_FAILSAFE,%cl
jz 2f
subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
- movl XREGS_ds+4(%esp),%eax
-FLT16: movl %eax,%gs:(%esi)
+ testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
+ jz nvm86_2
+ xorl %eax,%eax # VM86: we write zero selector values
+FLT20: movl %eax,%gs:(%esi)
+FLT21: movl %eax,%gs:4(%esi)
+FLT22: movl %eax,%gs:8(%esi)
+FLT23: movl %eax,%gs:12(%esi)
+ jmp 2f
+nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values
+FLT24: movl %eax,%gs:(%esi)
movl XREGS_es+4(%esp),%eax
-FLT17: movl %eax,%gs:4(%esi)
+FLT25: movl %eax,%gs:4(%esi)
movl XREGS_fs+4(%esp),%eax
-FLT18: movl %eax,%gs:8(%esi)
+FLT26: movl %eax,%gs:8(%esi)
movl XREGS_gs+4(%esp),%eax
-FLT19: movl %eax,%gs:12(%esi)
+FLT27: movl %eax,%gs:12(%esi)
2: movb $0,TRAPBOUNCE_flags(%edx)
- /* Rewrite our stack frame and return to ring 1. */
+ testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp)
+ jz nvm86_3
+ xorl %eax,%eax /* zero DS-GS, just as a real CPU would */
+ movl %eax,XREGS_ds+4(%esp)
+ movl %eax,XREGS_es+4(%esp)
+ movl %eax,XREGS_fs+4(%esp)
+ movl %eax,XREGS_gs+4(%esp)
+nvm86_3:/* Rewrite our stack frame and return to ring 1. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
andl $0xfffcbeff,XREGS_eflags+4(%esp)
movl %gs,XREGS_ss+4(%esp)
@@ -297,19 +326,11 @@ FIX7: sti
DBLFLT2:jmp process_guest_exception_and_events
.previous
.section __pre_ex_table,"a"
- .long FLT7,FIX7
- .long FLT8,FIX7
- .long FLT9,FIX7
- .long FLT10,FIX7
- .long FLT11,FIX7
- .long FLT12,FIX7
- .long FLT13,FIX7
- .long FLT14,FIX7
- .long FLT15,FIX7
- .long FLT16,FIX7
- .long FLT17,FIX7
- .long FLT18,FIX7
- .long FLT19,FIX7
+ .long FLT7,FIX7 , FLT8,FIX7 , FLT9,FIX7 , FLT10,FIX7
+ .long FLT11,FIX7 , FLT12,FIX7 , FLT13,FIX7 , FLT14,FIX7
+ .long FLT15,FIX7 , FLT16,FIX7 , FLT17,FIX7 , FLT18,FIX7
+ .long FLT19,FIX7 , FLT20,FIX7 , FLT21,FIX7 , FLT22,FIX7
+ .long FLT23,FIX7 , FLT24,FIX7 , FLT25,FIX7 , FLT26,FIX7 , FLT27,FIX7
.previous
.section __ex_table,"a"
.long DBLFLT2,domain_crash
@@ -325,11 +346,12 @@ process_guest_exception_and_events:
ALIGN
ENTRY(ret_from_intr)
- GET_CURRENT(%ebx)
- movb XREGS_cs(%esp),%al
- testb $3,%al # return to non-supervisor?
- jnz test_all_events
- jmp restore_all_xen
+ GET_CURRENT(%ebx)
+ movl XREGS_eflags(%esp),%eax
+ movb XREGS_cs(%esp),%al
+ testl $(3|X86_EFLAGS_VM),%eax
+ jnz test_all_events
+ jmp restore_all_xen
ENTRY(divide_error)
pushl $TRAP_divide_error<<16
@@ -347,15 +369,18 @@ error_code:
GET_CURRENT(%ebx)
call *SYMBOL_NAME(exception_table)(,%eax,4)
addl $4,%esp
+ movl XREGS_eflags(%esp),%eax
movb XREGS_cs(%esp),%al
- testb $3,%al
+ testl $(3|X86_EFLAGS_VM),%eax
jz restore_all_xen
jmp process_guest_exception_and_events
exception_with_ints_disabled:
+ movl XREGS_eflags(%esp),%eax
movb XREGS_cs(%esp),%al
- testb $3,%al # interrupts disabled outside Xen?
- jnz 1b # it really does happen! (e.g., DOM0 X server)
+ testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen?
+ jnz 1b # it really does happen!
+ # (e.g., DOM0 X server)
pushl XREGS_eip(%esp)
call search_pre_exception_table
addl $4,%esp
@@ -469,8 +494,9 @@ ENTRY(nmi)
# In all other cases we bail without touching DS-GS, as we have
# interrupted an enclosing Xen activation in tricky prologue or
# epilogue code.
+ movl XREGS_eflags(%esp),%eax
movb XREGS_cs(%esp),%al
- testb $3,%al
+ testl $(3|X86_EFLAGS_VM),%eax
jnz do_watchdog_tick
movl XREGS_ds(%esp),%eax
cmpw $(__HYPERVISOR_DS),%ax
@@ -494,8 +520,9 @@ do_watchdog_tick:
pushl %edx # regs
call SYMBOL_NAME(do_nmi)
addl $8,%esp
+ movl XREGS_eflags(%esp),%eax
movb XREGS_cs(%esp),%al
- testb $3,%al
+ testl $(3|X86_EFLAGS_VM),%eax
jz restore_all_xen
GET_CURRENT(%ebx)
jmp restore_all_guest
@@ -539,7 +566,62 @@ nmi_io_err:
call SYMBOL_NAME(io_check_error)
addl $4,%esp
jmp ret_from_intr
-
+
+
+ENTRY(setup_vm86_frame)
+ # Copies the entire stack frame forwards by 16 bytes.
+ .macro copy_vm86_words count=18
+ .if \count
+ pushl ((\count-1)*4)(%esp)
+ popl ((\count-1)*4)+16(%esp)
+ copy_vm86_words "(\count-1)"
+ .endif
+ .endm
+ copy_vm86_words
+ addl $16,%esp
+ ret
+
+do_switch_vm86:
+ # Discard the return address
+ addl $4,%esp
+
+ movl XREGS_eflags(%esp),%ecx
+
+ # GS:ESI == Ring-1 stack activation
+ movl XREGS_esp(%esp),%esi
+VFLT1: movl XREGS_ss(%esp),%gs
+
+ # ES:EDI == Ring-0 stack activation
+ leal XREGS_eip(%esp),%edi
+
+ # Restore the hypercall-number-clobbered EAX on our stack frame
+VFLT2: movl %gs:(%esi),%eax
+ movl %eax,XREGS_eax(%esp)
+ addl $4,%esi
+
+ # Copy the VM86 activation from the ring-1 stack to the ring-0 stack
+ movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx
+VFLT3: movl %gs:(%esi),%eax
+ stosl
+ addl $4,%esi
+ loop VFLT3
+
+ # Fix up EFLAGS
+ andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp)
+ andl $X86_EFLAGS_IOPL,%ecx # Ignore attempts to change EFLAGS.IOPL
+ jnz 1f
+ orl $X86_EFLAGS_IF,%ecx # EFLAGS.IOPL=0 => no messing with EFLAGS.IF
+1: orl $X86_EFLAGS_VM,%ecx # Force EFLAGS.VM
+ orl %ecx,XREGS_eflags(%esp)
+
+ jmp test_all_events
+
+.section __ex_table,"a"
+ .long VFLT1,domain_crash
+ .long VFLT2,domain_crash
+ .long VFLT3,domain_crash
+.previous
+
.data
ENTRY(exception_table)
@@ -588,6 +670,7 @@ ENTRY(hypercall_table)
.long SYMBOL_NAME(do_grant_table_op) /* 20 */
.long SYMBOL_NAME(do_vm_assist)
.long SYMBOL_NAME(do_update_va_mapping_otherdomain)
+ .long SYMBOL_NAME(do_switch_vm86)
.rept NR_hypercalls-((.-hypercall_table)/4)
.long SYMBOL_NAME(do_ni_hypercall)
.endr
diff --git a/xen/arch/x86/x86_32/seg_fixup.c b/xen/arch/x86/x86_32/seg_fixup.c
index a102508c83..f08da34cf8 100644
--- a/xen/arch/x86/x86_32/seg_fixup.c
+++ b/xen/arch/x86/x86_32/seg_fixup.c
@@ -304,7 +304,7 @@ int gpf_emulate_4gb(struct xen_regs *regs)
unsigned int *pseg = NULL; /* segment for memory operand (NULL=default) */
/* WARNING: We only work for ring-3 segments. */
- if ( unlikely((regs->cs & 3) != 3) )
+ if ( unlikely(VM86_MODE(regs)) || unlikely(!RING_3(regs)) )
{
DPRINTK("Taken fault at bad CS %04x\n", regs->cs);
goto fail;
diff --git a/xen/include/asm-x86/debugger.h b/xen/include/asm-x86/debugger.h
index 1d377c5060..d44b2d5e62 100644
--- a/xen/include/asm-x86/debugger.h
+++ b/xen/include/asm-x86/debugger.h
@@ -55,7 +55,8 @@ static inline int debugger_trap_entry(
break;
case TRAP_gp_fault:
- if ( ((regs->cs & 3) != 0) && ((regs->error_code & 3) == 2) &&
+ if ( (VM86_MODE(regs) || !RING_0(regs)) &&
+ ((regs->error_code & 3) == 2) &&
pdb_initialized && (pdb_ctx.system_call != 0) )
{
unsigned long cr3 = read_cr3();
diff --git a/xen/include/asm-x86/x86_32/asm_defns.h b/xen/include/asm-x86/x86_32/asm_defns.h
index 01244c8e90..8231bb2dec 100644
--- a/xen/include/asm-x86/x86_32/asm_defns.h
+++ b/xen/include/asm-x86/x86_32/asm_defns.h
@@ -11,7 +11,7 @@
/* Maybe auto-generate the following two cases (quoted vs. unquoted). */
#ifndef __ASSEMBLY__
-#define __SAVE_ALL_PRE(_reg) \
+#define __SAVE_ALL_PRE \
"cld;" \
"pushl %eax;" \
"pushl %ebp;" \
@@ -20,16 +20,20 @@
"pushl %edx;" \
"pushl %ecx;" \
"pushl %ebx;" \
- "movb "STR(XREGS_cs)"(%esp),%"STR(_reg)"l;" \
- "testb $3,%"STR(_reg)"l;" \
- "je 1f;" \
+ "testl $"STR(X86_EFLAGS_VM)","STR(XREGS_eflags)"(%esp);" \
+ "jz 2f;" \
+ "call setup_vm86_frame;" \
+ "jmp 3f;" \
+ "2:testb $3,"STR(XREGS_cs)"(%esp);" \
+ "jz 1f;" \
"movl %ds,"STR(XREGS_ds)"(%esp);" \
"movl %es,"STR(XREGS_es)"(%esp);" \
"movl %fs,"STR(XREGS_fs)"(%esp);" \
- "movl %gs,"STR(XREGS_gs)"(%esp);"
+ "movl %gs,"STR(XREGS_gs)"(%esp);" \
+ "3:"
#define SAVE_ALL_NOSEGREGS(_reg) \
- __SAVE_ALL_PRE(_reg) \
+ __SAVE_ALL_PRE \
"1:"
#define SET_XEN_SEGMENTS(_reg) \
@@ -38,13 +42,13 @@
"movl %e"STR(_reg)"x,%es;"
#define SAVE_ALL(_reg) \
- __SAVE_ALL_PRE(_reg) \
+ __SAVE_ALL_PRE \
SET_XEN_SEGMENTS(_reg) \
"1:"
#else
-#define __SAVE_ALL_PRE(_reg) \
+#define __SAVE_ALL_PRE \
cld; \
pushl %eax; \
pushl %ebp; \
@@ -53,16 +57,20 @@
pushl %edx; \
pushl %ecx; \
pushl %ebx; \
- movb XREGS_cs(%esp),% ## _reg ## l; \
- testb $3,% ## _reg ## l; \
- je 1f; \
+ testl $X86_EFLAGS_VM,XREGS_eflags(%esp); \
+ jz 2f; \
+ call setup_vm86_frame; \
+ jmp 3f; \
+ 2:testb $3,XREGS_cs(%esp); \
+ jz 1f; \
movl %ds,XREGS_ds(%esp); \
movl %es,XREGS_es(%esp); \
movl %fs,XREGS_fs(%esp); \
- movl %gs,XREGS_gs(%esp);
+ movl %gs,XREGS_gs(%esp); \
+ 3:
#define SAVE_ALL_NOSEGREGS(_reg) \
- __SAVE_ALL_PRE(_reg) \
+ __SAVE_ALL_PRE \
1:
#define SET_XEN_SEGMENTS(_reg) \
@@ -71,7 +79,7 @@
movl %e ## _reg ## x,%es;
#define SAVE_ALL(_reg) \
- __SAVE_ALL_PRE(_reg) \
+ __SAVE_ALL_PRE \
SET_XEN_SEGMENTS(_reg) \
1:
diff --git a/xen/include/asm-x86/x86_32/regs.h b/xen/include/asm-x86/x86_32/regs.h
index a4fdcc663a..e03e2a8a4b 100644
--- a/xen/include/asm-x86/x86_32/regs.h
+++ b/xen/include/asm-x86/x86_32/regs.h
@@ -49,4 +49,10 @@ enum EFLAGS {
EF_ID = 0x00200000, /* id */
};
+#define VM86_MODE(_r) ((_r)->eflags & EF_VM)
+#define RING_0(_r) (((_r)->cs & 3) == 0)
+#define RING_1(_r) (((_r)->cs & 3) == 1)
+#define RING_2(_r) (((_r)->cs & 3) == 2)
+#define RING_3(_r) (((_r)->cs & 3) == 3)
+
#endif
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index f103509b04..905bd4c011 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -48,6 +48,7 @@
#define __HYPERVISOR_grant_table_op 20
#define __HYPERVISOR_vm_assist 21
#define __HYPERVISOR_update_va_mapping_otherdomain 22
+#define __HYPERVISOR_switch_vm86 23
/*
* MULTICALLS