aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2003-10-14 14:12:31 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2003-10-14 14:12:31 +0000
commit1c26235d074b33bb27f0adacd5ea47f8542017f7 (patch)
treedaa5a4b1f8d1d5bbad0879e10e1814604bdbef2a
parent8874486120eb5e00d74752b1948a2b46f041ae5a (diff)
downloadxen-1c26235d074b33bb27f0adacd5ea47f8542017f7.tar.gz
xen-1c26235d074b33bb27f0adacd5ea47f8542017f7.tar.bz2
xen-1c26235d074b33bb27f0adacd5ea47f8542017f7.zip
bitkeeper revision 1.510.1.1 (3f8c044fubZknMoFWYMcHcTrrIOO3Q)
sched.h, irq.h, traps.c, entry.S, boot.S: A final cleanup of segment-register handling in Xen. We now safely propagate invalid segment register contents to the guest-OS failsafe handler in all cases.
-rw-r--r--xen/arch/i386/boot/boot.S2
-rw-r--r--xen/arch/i386/entry.S366
-rw-r--r--xen/arch/i386/traps.c122
-rw-r--r--xen/include/asm-i386/irq.h4
-rw-r--r--xen/include/xeno/sched.h34
5 files changed, 283 insertions, 245 deletions
diff --git a/xen/arch/i386/boot/boot.S b/xen/arch/i386/boot/boot.S
index 6179cf5a06..621ae87905 100644
--- a/xen/arch/i386/boot/boot.S
+++ b/xen/arch/i386/boot/boot.S
@@ -25,6 +25,8 @@ hal_entry:
mov $(__HYPERVISOR_DS),%ecx
mov %ecx,%ds
mov %ecx,%es
+ mov %ecx,%fs
+ mov %ecx,%gs
ljmp $(__HYPERVISOR_CS),$(1f)-__PAGE_OFFSET
1: lss stack_start-__PAGE_OFFSET,%esp
diff --git a/xen/arch/i386/entry.S b/xen/arch/i386/entry.S
index 7454b693f5..65266b80fd 100644
--- a/xen/arch/i386/entry.S
+++ b/xen/arch/i386/entry.S
@@ -104,14 +104,13 @@ OLDSS = 0x40
/* Offsets in task_struct */
PROCESSOR = 0
-STATE = 4
-HYP_EVENTS = 8
-DOMAIN = 12
-SHARED_INFO = 16
-EVENT_SEL = 20
-EVENT_ADDR = 24
-FAILSAFE_SEL = 28
-FAILSAFE_ADDR = 32
+HYP_EVENTS = 2
+SHARED_INFO = 4
+EVENT_SEL = 8
+EVENT_ADDR = 12
+FAILSAFE_BUFFER = 16
+FAILSAFE_SEL = 32
+FAILSAFE_ADDR = 36
/* Offsets in shared_info_t */
EVENTS = 0
@@ -131,77 +130,42 @@ CF_MASK = 0x00000001
IF_MASK = 0x00000200
NT_MASK = 0x00004000
-#define SAVE_ALL_NOSTI \
- cld; \
- pushl %gs; \
- pushl %fs; \
- pushl %es; \
- pushl %ds; \
- pushl %eax; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
- pushl %ebx; \
- movl $(__HYPERVISOR_DS),%edx; \
- movl %edx,%ds; \
- movl %edx,%es;
+
+
+#define SAVE_ALL_NOSEGREGS \
+ cld; \
+ pushl %gs; \
+ pushl %fs; \
+ pushl %es; \
+ pushl %ds; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+
+#define SAVE_ALL_NOSTI \
+ SAVE_ALL_NOSEGREGS \
+ movl $(__HYPERVISOR_DS),%edx; \
+ movl %edx,%ds; \
+ movl %edx,%es; \
+ movl %edx,%fs; \
+ movl %edx,%gs;
#define SAVE_ALL \
- SAVE_ALL_NOSTI \
- sti;
-
-#define RESTORE_ALL \
- popl %ebx; \
- popl %ecx; \
- popl %edx; \
- popl %esi; \
- popl %edi; \
- popl %ebp; \
- popl %eax; \
-1: popl %ds; \
-2: popl %es; \
-3: popl %fs; \
-4: popl %gs; \
- addl $4,%esp; \
-5: iret; \
-.section .fixup,"ax"; \
-10: subl $4,%esp; \
- pushl %gs; \
-9: pushl %fs; \
-8: pushl %es; \
-7: pushl %ds; \
-6: pushl %eax; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
- pushl %ebx; \
- pushl %ss; \
- popl %ds; \
- pushl %ss; \
- popl %es; \
- jmp failsafe_callback; \
-.previous; \
-.section __ex_table,"a"; \
- .align 4; \
- .long 1b,6b; \
- .long 2b,7b; \
- .long 3b,8b; \
- .long 4b,9b; \
- .long 5b,10b; \
-.previous
+ SAVE_ALL_NOSTI \
+ sti;
#define GET_CURRENT(reg) \
- movl $4096-4, reg; \
+ movl $4096-4, reg; \
orl %esp, reg; \
movl (reg),reg \
ENTRY(continue_nonidle_task)
- GET_CURRENT(%ebx)
- jmp test_all_events
+ GET_CURRENT(%ebx)
+ jmp test_all_events
ALIGN
/*
@@ -273,8 +237,111 @@ multicall_fixup1:
.previous
ALIGN
-restore_all:
- RESTORE_ALL
+restore_all_guest:
+ # First, may need to restore %ds if clobbered by create_bounce_frame
+ pushl %ss
+ popl %ds
+ # Second, create a failsafe copy of DS,ES,FS,GS in case any are bad
+ leal DS(%esp),%esi
+ leal FAILSAFE_BUFFER(%ebx),%edi
+ movsl
+ movsl
+ movsl
+ movsl
+ # Finally, restore guest registers -- faults will cause failsafe
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
+1: popl %ds
+2: popl %es
+3: popl %fs
+4: popl %gs
+ addl $4,%esp
+5: iret
+.section .fixup,"ax"
+10: subl $4,%esp
+ pushl %gs
+9: pushl %fs
+8: pushl %es
+7: pushl %ds
+6: pushl %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ pushl %ecx
+ pushl %ebx
+ pushl %ss
+ popl %ds
+ pushl %ss
+ popl %es
+ jmp failsafe_callback
+.previous
+.section __ex_table,"a"
+ .align 4
+ .long 1b,6b
+ .long 2b,7b
+ .long 3b,8b
+ .long 4b,9b
+ .long 5b,10b
+.previous
+
+/* No special register assumptions */
+failsafe_callback:
+ GET_CURRENT(%ebx)
+ movzwl PROCESSOR(%ebx),%eax
+ shl $4,%eax
+ lea guest_trap_bounce(%eax),%edx
+ movl FAILSAFE_ADDR(%ebx),%eax
+ movl %eax,GTB_EIP(%edx)
+ movl FAILSAFE_SEL(%ebx),%eax
+ movw %ax,GTB_CS(%edx)
+ call create_bounce_frame
+ subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
+ leal FAILSAFE_BUFFER(%ebx),%ebp
+ movl 0(%ebp),%eax # DS
+FAULT1: movl %eax,(%esi)
+ movl 4(%ebp),%eax # ES
+FAULT2: movl %eax,4(%esi)
+ movl 8(%ebp),%eax # FS
+FAULT3: movl %eax,8(%esi)
+ movl 12(%ebp),%eax # GS
+FAULT4: movl %eax,12(%esi)
+ movl %esi,OLDESP(%esp)
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
+ addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
+FAULT5: iret
+
+
+ ALIGN
+# Simple restore -- we should never fault as we we will only interrupt ring 0
+# when sane values have been placed in all registers. The only exception is
+# NMI, which may interrupt before good values have been placed in DS-GS.
+# The NMI return code deals with this problem itself.
+restore_all_xen:
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
+ popl %ds
+ popl %es
+ popl %fs
+ popl %gs
+ addl $4,%esp
+ iret
ALIGN
ENTRY(hypervisor_call)
@@ -292,26 +359,26 @@ test_all_events:
notl %ecx
cli # tests must not race interrupts
/*test_softirqs:*/
- mov PROCESSOR(%ebx),%eax
+ movzwl PROCESSOR(%ebx),%eax
shl $6,%eax # sizeof(irq_cpustat) == 64
test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
jnz process_softirqs
/*test_hyp_events:*/
- test %ecx, HYP_EVENTS(%ebx)
+ testw %cx, HYP_EVENTS(%ebx)
jnz process_hyp_events
/*test_guest_events:*/
movl SHARED_INFO(%ebx),%eax
shl $31,%ecx # %ecx = EVENTS_MASTER_ENABLE_MASK
test %ecx,EVENTS_MASK(%eax)
- jz restore_all # only notify if master switch enabled
+ jz restore_all_guest # only notify if master switch enabled
movl EVENTS(%eax),%ecx
andl EVENTS_MASK(%eax),%ecx
- jz restore_all # skip if no events to deliver
+ jz restore_all_guest # skip if no events to deliver
notl %ecx
btrl $31,%ecx # NB. We clear all events that are
andl %ecx,EVENTS_MASK(%eax) # being delivered + master enable.
/*process_guest_events:*/
- mov PROCESSOR(%ebx),%edx
+ movzwl PROCESSOR(%ebx),%edx
shl $4,%edx # sizeof(guest_trap_bounce) == 16
lea guest_trap_bounce(%edx),%edx
movl EVENT_ADDR(%ebx),%eax
@@ -319,7 +386,7 @@ test_all_events:
movl EVENT_SEL(%ebx),%eax
movw %ax,GTB_CS(%edx)
call create_bounce_frame
- jmp restore_all
+ jmp restore_all_guest
ALIGN
process_softirqs:
@@ -332,53 +399,6 @@ process_hyp_events:
sti
call SYMBOL_NAME(do_hyp_events)
jmp test_all_events
-
-/* No special register assumptions */
-failsafe_callback:
- # Check that we are actually returning to ring != 0 because
- # we may fault when returning to another ring 0 activation.
- # This can only occur when restoring FS and GS, which can be avoided
- # by zeroing those registers and trying again. The outermost ring 0
- # activation will do a full failsafe callback to the guest OS.
- # Note that the outermost activation certainly has the "bad" selector
- # value saved away, since interrupts are always disabled in ring 0
- # until all segment registers have been saved.
- movb CS(%esp),%al
- test $3,%al
- jnz 1f
- xorl %eax,%eax
- movl %eax,FS(%esp)
- movl %eax,GS(%esp)
- jmp restore_all
-1: GET_CURRENT(%ebx)
- mov PROCESSOR(%ebx),%eax
- shl $4,%eax
- lea guest_trap_bounce(%eax),%edx
- movl FAILSAFE_ADDR(%ebx),%eax
- movl %eax,GTB_EIP(%edx)
- movl FAILSAFE_SEL(%ebx),%eax
- movw %ax,GTB_CS(%edx)
- call create_bounce_frame
- subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
- movl DS(%esp),%eax
-FAULT1: movl %eax,(%esi)
- movl ES(%esp),%eax
-FAULT2: movl %eax,4(%esi)
- movl FS(%esp),%eax
-FAULT3: movl %eax,8(%esi)
- movl GS(%esp),%eax
-FAULT4: movl %eax,12(%esi)
- movl %esi,OLDESP(%esp)
- popl %ebx
- popl %ecx
- popl %edx
- popl %esi
- popl %edi
- popl %ebp
- popl %eax
- addl $20,%esp # skip DS/ES/FS/GS/ORIG_EAX
-FAULT5: iret
-
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
/* {EIP, CS, EFLAGS, [ESP, SS]} */
@@ -389,7 +409,7 @@ create_bounce_frame:
test $2,%cl
jz 1f /* jump if returning to an existing ring-1 activation */
/* obtain ss/esp from TSS -- no current ring-1 activations */
- movl PROCESSOR(%ebx),%eax
+ movzwl PROCESSOR(%ebx),%eax
shll $8,%eax /* multiply by 256 */
addl $init_tss + 12,%eax
movl (%eax),%esi /* tss->esp1 */
@@ -459,7 +479,7 @@ kill_domain_fixup3:
ALIGN
process_guest_exception_and_events:
- mov PROCESSOR(%ebx),%eax
+ movzwl PROCESSOR(%ebx),%eax
shl $4,%eax
lea guest_trap_bounce(%eax),%edx
testb $~0,GTB_FLAGS(%edx)
@@ -488,7 +508,7 @@ ENTRY(ret_from_intr)
movb CS(%esp),%al
testb $3,%al # return to non-supervisor?
jne test_all_events
- jmp restore_all
+ jmp restore_all_xen
ENTRY(divide_error)
pushl $0 # no error code
@@ -524,7 +544,7 @@ error_code:
addl $8,%esp
movb CS(%esp),%al
testb $3,%al
- je restore_all
+ je restore_all_xen
jmp process_guest_exception_and_events
ENTRY(coprocessor_error)
@@ -547,16 +567,6 @@ ENTRY(debug)
pushl $ SYMBOL_NAME(do_debug)
jmp error_code
-ENTRY(nmi)
- pushl %eax
- SAVE_ALL_NOSTI
- movl %esp,%edx
- pushl $0
- pushl %edx
- call SYMBOL_NAME(do_nmi)
- addl $8,%esp
- jmp restore_all
-
ENTRY(int3)
pushl $0
pushl $ SYMBOL_NAME(do_int3)
@@ -616,6 +626,82 @@ ENTRY(spurious_interrupt_bug)
pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
jmp error_code
+ENTRY(nmi)
+ # Save state but do not trash the segment registers!
+ # We may otherwise be unable to reload them or copy them to ring 1.
+ pushl %eax
+ SAVE_ALL_NOSEGREGS
+
+ # Check for hardware problems. These are always fatal so we can
+ # reload DS and ES when handling them.
+ inb $0x61,%al
+ testb $0x80,%al
+ jne nmi_parity_err
+ testb $0x40,%al
+ jne nmi_io_err
+ movl %eax,%ebx
+
+ # Okay, it's almost a normal NMI tick. We can only process it if:
+ # 1. We're the outermost Xen activation (in which case we have
+ # the selectors safely saved on our stack)
+ # 2. DS-GS all contain sane Xen values.
+ # In all other cases we bail without touching DS-GS, as we've
+ # interrupted an enclosing Xen activation in tricky prologue or
+ # epilogue code.
+ movb CS(%esp),%al
+ testb $3,%al
+ jne do_watchdog_tick
+ movl DS(%esp),%eax
+ cmpw $(__HYPERVISOR_DS),%ax
+ jne nmi_badseg
+ movl ES(%esp),%eax
+ cmpw $(__HYPERVISOR_DS),%ax
+ jne nmi_badseg
+ movl FS(%esp),%eax
+ cmpw $(__HYPERVISOR_DS),%ax
+ jne nmi_badseg
+ movl GS(%esp),%eax
+ cmpw $(__HYPERVISOR_DS),%ax
+ jne nmi_badseg
+
+do_watchdog_tick:
+ movl $(__HYPERVISOR_DS),%edx
+ movl %edx,%ds
+ movl %edx,%es
+ movl %esp,%edx
+ pushl %ebx # reason
+ pushl %edx # regs
+ call SYMBOL_NAME(do_nmi)
+ addl $8,%esp
+ movb CS(%esp),%al
+ testb $3,%al
+ je restore_all_xen
+ GET_CURRENT(%ebx)
+ jmp restore_all_guest
+
+nmi_badseg:
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
+ addl $20,%esp
+ iret
+
+nmi_parity_err:
+ movl $(__HYPERVISOR_DS),%edx
+ movl %edx,%ds
+ movl %edx,%es
+ jmp SYMBOL_NAME(mem_parity_error)
+
+nmi_io_err:
+ movl $(__HYPERVISOR_DS),%edx
+ movl %edx,%ds
+ movl %edx,%es
+ jmp SYMBOL_NAME(io_check_error)
+
.data
ENTRY(hypervisor_call_table)
.long SYMBOL_NAME(do_set_trap_table)
diff --git a/xen/arch/i386/traps.c b/xen/arch/i386/traps.c
index a255fe9f1f..c645d3e00e 100644
--- a/xen/arch/i386/traps.c
+++ b/xen/arch/i386/traps.c
@@ -185,44 +185,6 @@ void die(const char * str, struct pt_regs * regs, long err)
panic("HYPERVISOR DEATH!!\n");
}
-#define check_selector(_s) \
- ({ int err; \
- __asm__ __volatile__ ( \
- "1: movl %2,%%gs \n" \
- "2: \n" \
- ".section .fixup,\"ax\"\n" \
- "3: incl %0 \n" \
- " jmp 2b \n" \
- ".previous \n" \
- ".section __ex_table,\"a\"\n" \
- ".align 4 \n" \
- ".long 1b,3b \n" \
- ".previous " \
- : "=&r" (err) : "0" (0), \
- "m" (*(unsigned int *)&(_s))); \
- err; })
-
-static inline void check_saved_selectors(struct pt_regs *regs)
-{
- /* Prevent recursion. */
- __asm__ __volatile__ (
- "movl %0,%%fs; movl %0,%%gs"
- : : "r" (0) );
-
- /*
- * NB. We need to check DS and ES as well, since we may have taken
- * an exception after they were restored in
- */
- if ( check_selector(regs->xds) )
- regs->xds = 0;
- if ( check_selector(regs->xes) )
- regs->xes = 0;
- if ( check_selector(regs->xfs) )
- regs->xfs = 0;
- if ( check_selector(regs->xgs) )
- regs->xgs = 0;
-}
-
static inline void do_trap(int trapnr, char *str,
struct pt_regs *regs,
@@ -247,10 +209,10 @@ static inline void do_trap(int trapnr, char *str,
fault_in_hypervisor:
- if ( (fixup = search_exception_table(regs->eip)) != 0 )
+ if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
regs->eip = fixup;
- check_saved_selectors(regs);
+ regs->xds = regs->xes = regs->xfs = regs->xgs = __HYPERVISOR_DS;
return;
}
@@ -331,13 +293,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
__asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
+ if ( unlikely(!(regs->xcs & 3)) )
+ goto fault_in_hypervisor;
+
if ( unlikely(addr > PAGE_OFFSET) )
goto fault_in_xen_space;
- bounce_fault:
-
- if ( unlikely(!(regs->xcs & 3)) )
- goto fault_in_hypervisor;
+ propagate_fault:
ti = p->thread.traps + 14;
gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */
@@ -359,7 +321,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
if ( (addr < LDT_VIRT_START) ||
(addr >= (LDT_VIRT_START + (p->mm.ldt_ents*LDT_ENTRY_SIZE))) )
- goto bounce_fault;
+ goto propagate_fault;
off = addr - LDT_VIRT_START;
addr = p->mm.ldt_base + off;
@@ -368,19 +330,19 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
__get_user(l1e, (unsigned long *)(linear_pg_table+(addr>>PAGE_SHIFT)));
if ( !(l1e & _PAGE_PRESENT) )
- goto unlock_and_bounce_fault;
+ goto unlock_and_propagate_fault;
page = frame_table + (l1e >> PAGE_SHIFT);
if ( (page->flags & PG_type_mask) != PGT_ldt_page )
{
if ( page->type_count != 0 )
- goto unlock_and_bounce_fault;
+ goto unlock_and_propagate_fault;
/* Check all potential LDT entries in the page. */
ldt_page = (unsigned long *)(addr & PAGE_MASK);
for ( i = 0; i < 512; i++ )
if ( !check_descriptor(ldt_page[i*2], ldt_page[i*2+1]) )
- goto unlock_and_bounce_fault;
+ goto unlock_and_propagate_fault;
if ( page->flags & PG_need_flush )
{
@@ -403,18 +365,18 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
return;
- unlock_and_bounce_fault:
+ unlock_and_propagate_fault:
spin_unlock(&p->page_lock);
- goto bounce_fault;
+ goto propagate_fault;
fault_in_hypervisor:
- if ( (fixup = search_exception_table(regs->eip)) != 0 )
+ if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
regs->eip = fixup;
- check_saved_selectors(regs);
+ regs->xds = regs->xes = regs->xfs = regs->xgs = __HYPERVISOR_DS;
return;
}
@@ -445,8 +407,8 @@ asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
trap_info_t *ti;
unsigned long fixup;
- /* Bad shit if error in ring 0, or result of an interrupt. */
- if (!(regs->xcs & 3) || (error_code & 1))
+ /* Badness if error in ring 0, or result of an interrupt. */
+ if ( !(regs->xcs & 3) || (error_code & 1) )
goto gp_in_kernel;
/*
@@ -494,40 +456,38 @@ asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
gp_in_kernel:
- if ( (fixup = search_exception_table(regs->eip)) != 0 )
+ if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
regs->eip = fixup;
- check_saved_selectors(regs);
+ regs->xds = regs->xes = regs->xfs = regs->xgs = __HYPERVISOR_DS;
return;
}
die("general protection fault", regs, error_code);
}
-static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+asmlinkage void mem_parity_error(unsigned char reason, struct pt_regs * regs)
{
- printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
+ printk("NMI received. Dazed and confused, but trying to continue\n");
printk("You probably have a hardware problem with your RAM chips\n");
/* Clear and disable the memory parity error line. */
reason = (reason & 0xf) | 4;
outb(reason, 0x61);
+
+ show_registers(regs);
+ panic("PARITY ERROR");
}
-static void io_check_error(unsigned char reason, struct pt_regs * regs)
+asmlinkage void io_check_error(unsigned char reason, struct pt_regs * regs)
{
- unsigned long i;
-
printk("NMI: IOCK error (debug interrupt?)\n");
- show_registers(regs);
- /* Re-enable the IOCK line, wait for a few seconds */
reason = (reason & 0xf) | 8;
outb(reason, 0x61);
- i = 2000;
- while (--i) udelay(1000);
- reason &= ~8;
- outb(reason, 0x61);
+
+ show_registers(regs);
+ panic("IOCK ERROR");
}
static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
@@ -537,34 +497,16 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
printk("Do you have a strange power saving mode enabled?\n");
}
-asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
+asmlinkage void do_nmi(struct pt_regs * regs, unsigned long reason)
{
- unsigned char reason = inb(0x61);
-
++nmi_count(smp_processor_id());
- if (!(reason & 0xc0)) {
#if CONFIG_X86_LOCAL_APIC
- if (nmi_watchdog) {
- nmi_watchdog_tick(regs);
- return;
- }
+ if ( nmi_watchdog )
+ nmi_watchdog_tick(regs);
+ else
#endif
- unknown_nmi_error(reason, regs);
- return;
- }
- if (reason & 0x80)
- mem_parity_error(reason, regs);
- if (reason & 0x40)
- io_check_error(reason, regs);
- /*
- * Reassert NMI in case it became active meanwhile
- * as it's edge-triggered.
- */
- outb(0x8f, 0x70);
- inb(0x71); /* dummy */
- outb(0x0f, 0x70);
- inb(0x71); /* dummy */
+ unknown_nmi_error((unsigned char)(reason&0xff), regs);
}
asmlinkage void math_state_restore(struct pt_regs *regs, long error_code)
diff --git a/xen/include/asm-i386/irq.h b/xen/include/asm-i386/irq.h
index 31435c8753..d88429df4a 100644
--- a/xen/include/asm-i386/irq.h
+++ b/xen/include/asm-i386/irq.h
@@ -115,7 +115,9 @@ extern char _stext, _etext;
"pushl %ebx\n\t" \
"movl $" STR(__HYPERVISOR_DS) ",%edx\n\t" \
"movl %edx,%ds\n\t" \
- "movl %edx,%es\n\t"
+ "movl %edx,%es\n\t" \
+ "movl %edx,%fs\n\t" \
+ "movl %edx,%gs\n\t"
#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
diff --git a/xen/include/xeno/sched.h b/xen/include/xeno/sched.h
index 595eb4c216..ba98b7d1f2 100644
--- a/xen/include/xeno/sched.h
+++ b/xen/include/xeno/sched.h
@@ -74,13 +74,11 @@ struct task_struct
* Their offsets are hardcoded in entry.S
*/
- int processor; /* 00: current processor */
- int state; /* 04: current run state */
- int hyp_events; /* 08: pending intra-Xen events */
- unsigned int domain; /* 12: domain id */
+ unsigned short processor; /* 00: current processor */
+ unsigned short hyp_events; /* 02: pending intra-Xen events */
/* An unsafe pointer into a shared data area. */
- shared_info_t *shared_info; /* 16: shared data area */
+ shared_info_t *shared_info; /* 04: shared data area */
/*
* Return vectors pushed to us by guest OS.
@@ -89,15 +87,25 @@ struct task_struct
* for segment registers %ds, %es, %fs and %gs:
* %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
*/
- unsigned long event_selector; /* 20: entry CS */
- unsigned long event_address; /* 24: entry EIP */
- unsigned long failsafe_selector; /* 28: entry CS */
- unsigned long failsafe_address; /* 32: entry EIP */
+ unsigned long event_selector; /* 08: entry CS */
+ unsigned long event_address; /* 12: entry EIP */
+
+ /* Saved DS,ES,FS,GS immediately before return to guest OS. */
+ unsigned long failsafe_selectors[4]; /* 16-32 */
+
+ /*
+ * END OF FIRST CACHELINE. Stuff above is touched a lot!
+ */
+
+ unsigned long failsafe_selector; /* 32: entry CS */
+ unsigned long failsafe_address; /* 36: entry EIP */
/*
* From here on things can be added and shuffled without special attention
*/
+ unsigned int domain; /* domain id */
+
struct list_head pg_head;
unsigned int tot_pages; /* number of pages currently possesed */
unsigned int max_pages; /* max number of pages that can be possesed */
@@ -105,6 +113,7 @@ struct task_struct
/* scheduling */
struct list_head run_list;
int has_cpu;
+ int state; /* current run state */
s_time_t lastschd; /* time this domain was last scheduled */
s_time_t cpu_time; /* total CPU time received till now */
@@ -134,17 +143,14 @@ struct task_struct
the process can do raw access
to. */
spinlock_t physdev_lock;
- segment_t *segment_list[XEN_MAX_SEGMENTS]; /* xvd */
+ segment_t *segment_list[XEN_MAX_SEGMENTS];
/* VM */
struct mm_struct mm;
/* We need this lock to check page types and frob reference counts. */
spinlock_t page_lock;
- mm_segment_t addr_limit; /* thread address space:
- 0-0xBFFFFFFF for user-thead
- 0-0xFFFFFFFF for kernel-thread
- */
+ mm_segment_t addr_limit;
char name[MAX_DOMAIN_NAME];