aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-12-14 10:51:30 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-12-14 10:51:30 +0000
commit3876b13db7efbf2de19cbdd05a7083f9fc907578 (patch)
tree181aaf1f2959b05604603db60536c6d842640956
parent2607eac3a51a032af447ad175751dd35379c37ad (diff)
downloadxen-3876b13db7efbf2de19cbdd05a7083f9fc907578.tar.gz
xen-3876b13db7efbf2de19cbdd05a7083f9fc907578.tar.bz2
xen-3876b13db7efbf2de19cbdd05a7083f9fc907578.zip
bitkeeper revision 1.1159.187.64 (41bec5b2hEqxDdXD0_RNHHR7utG6gA)
schedule_tail is now an indirect function call in x86 architecture.
-rw-r--r--.rootkeys2
-rw-r--r--xen/arch/x86/boot/x86_64.S3
-rw-r--r--xen/arch/x86/domain.c84
-rw-r--r--xen/arch/x86/setup.c2
-rw-r--r--xen/arch/x86/smpboot.c4
-rw-r--r--xen/arch/x86/x86_32/entry.S4
-rw-r--r--xen/common/domain.c27
-rw-r--r--xen/include/asm-x86/processor.h8
-rw-r--r--xen/include/asm-x86/system.h7
-rw-r--r--xen/include/asm-x86/x86_32/current.h11
-rw-r--r--xen/include/asm-x86/x86_64/current.h11
-rw-r--r--xen/include/xen/domain.h (renamed from xen/include/asm-x86/domain.h)12
-rw-r--r--xen/include/xen/sched.h5
13 files changed, 79 insertions, 101 deletions
diff --git a/.rootkeys b/.rootkeys
index 87bb718abb..b8615ff886 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -792,7 +792,6 @@
3ddb79c3r9-31dIsewPV3P3i8HALsQ xen/include/asm-x86/delay.h
3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/include/asm-x86/desc.h
40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-x86/div64.h
-40f2b4a2hC3HtChu-ArD8LyojxWMjg xen/include/asm-x86/domain.h
3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/asm-x86/domain_page.h
3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-x86/fixmap.h
3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-x86/flushtlb.h
@@ -855,6 +854,7 @@
3eb165e0eawr3R-p2ZQtSdLWtLRN_A xen/include/xen/console.h
3ddb79c1V44RD26YqCUm-kqIupM37A xen/include/xen/ctype.h
3ddb79c05DdHQ0UxX_jKsXdR4QlMCA xen/include/xen/delay.h
+40f2b4a2hC3HtChu-ArD8LyojxWMjg xen/include/xen/domain.h
3ddb79c2O729EttZTYu1c8LcsUO_GQ xen/include/xen/elf.h
3ddb79c0HIghfBF8zFUdmXhOU8i6hA xen/include/xen/errno.h
3ddb79c1W0lQca8gRV7sN6j3iY4Luw xen/include/xen/event.h
diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S
index 98289c7ab6..b16cde8762 100644
--- a/xen/arch/x86/boot/x86_64.S
+++ b/xen/arch/x86/boot/x86_64.S
@@ -270,9 +270,8 @@ copy_user_generic:
memcmp:
idt_tables:
new_thread:
-.globl switch_to, continue_nonidle_task, __get_user_1, paging_init, trap_init
+.globl switch_to, __get_user_1, paging_init, trap_init
switch_to:
-continue_nonidle_task:
__get_user_1:
paging_init:
trap_init:
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 4a01d86351..1881afa3fd 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -48,35 +48,16 @@
#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
#define round_pgdown(_p) ((_p)&PAGE_MASK)
-int hlt_counter;
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-
-/*
- * We use this if we don't have any better
- * idle routine..
- */
static void default_idle(void)
{
- if ( hlt_counter == 0 )
- {
- __cli();
- if ( !softirq_pending(smp_processor_id()) )
- safe_halt();
- else
- __sti();
- }
+ __cli();
+ if ( !softirq_pending(smp_processor_id()) )
+ safe_halt();
+ else
+ __sti();
}
-void continue_cpu_idle_loop(void)
+static void idle_loop(void)
{
int cpu = smp_processor_id();
for ( ; ; )
@@ -102,7 +83,7 @@ void startup_cpu_idle_loop(void)
smp_mb();
init_idle();
- continue_cpu_idle_loop();
+ idle_loop();
}
static long no_idt[2];
@@ -216,20 +197,43 @@ void free_perdomain_pt(struct domain *d)
free_xenheap_page((unsigned long)d->mm.perdomain_pt);
}
+static void continue_idle_task(struct domain *d)
+{
+ reset_stack_and_jump(idle_loop);
+}
+
+static void continue_nonidle_task(struct domain *d)
+{
+ reset_stack_and_jump(ret_from_intr);
+}
+
void arch_do_createdomain(struct domain *d)
{
- d->shared_info = (void *)alloc_xenheap_page();
- memset(d->shared_info, 0, PAGE_SIZE);
- d->shared_info->arch.mfn_to_pfn_start =
- virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
- SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
- machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
- PAGE_SHIFT] = INVALID_P2M_ENTRY;
-
- d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
- memset(d->mm.perdomain_pt, 0, PAGE_SIZE);
- machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >>
- PAGE_SHIFT] = INVALID_P2M_ENTRY;
+#ifdef ARCH_HAS_FAST_TRAP
+ SET_DEFAULT_FAST_TRAP(&d->thread);
+#endif
+
+ if ( d->id == IDLE_DOMAIN_ID )
+ {
+ d->thread.schedule_tail = continue_idle_task;
+ }
+ else
+ {
+ d->thread.schedule_tail = continue_nonidle_task;
+
+ d->shared_info = (void *)alloc_xenheap_page();
+ memset(d->shared_info, 0, PAGE_SIZE);
+ d->shared_info->arch.mfn_to_pfn_start =
+ virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
+ SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
+ machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
+ PAGE_SHIFT] = INVALID_P2M_ENTRY;
+
+ d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
+ memset(d->mm.perdomain_pt, 0, PAGE_SIZE);
+ machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >>
+ PAGE_SHIFT] = INVALID_P2M_ENTRY;
+ }
}
int arch_final_setup_guestos(struct domain *d, full_execution_context_t *c)
@@ -263,7 +267,6 @@ int arch_final_setup_guestos(struct domain *d, full_execution_context_t *c)
sizeof(d->thread.traps));
#ifdef ARCH_HAS_FAST_TRAP
- SET_DEFAULT_FAST_TRAP(&d->thread);
if ( (rc = (int)set_fast_trap(d, c->fast_trap_idx)) != 0 )
return rc;
#endif
@@ -328,9 +331,6 @@ void new_thread(struct domain *d,
__save_flags(ec->eflags);
ec->eflags |= X86_EFLAGS_IF;
-
- /* No fast trap at start of day. */
- SET_DEFAULT_FAST_TRAP(&d->thread);
}
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index cf6f7980e1..d2109f4fb8 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -328,6 +328,8 @@ void __init start_of_day(void)
sort_exception_tables();
+ arch_do_createdomain(current);
+
/* Tell the PCI layer not to allocate too close to the RAM area.. */
low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 05d49a27d0..991b5c577a 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -662,10 +662,6 @@ static void __init do_boot_cpu (int apicid)
map_cpu_to_boot_apicid(cpu, apicid);
-#if defined(__i386__)
- SET_DEFAULT_FAST_TRAP(&idle->thread);
-#endif
-
idle_task[cpu] = idle;
/* start_eip had better be page-aligned! */
diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S
index c85d84c649..a3ed040538 100644
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -65,10 +65,6 @@
andl $~3,reg; \
movl (reg),reg;
-ENTRY(continue_nonidle_task)
- GET_CURRENT(%ebx)
- jmp test_all_events
-
ALIGN
restore_all_guest:
testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index d4a97d307a..ed18817cec 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -47,20 +47,21 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
/* Per-domain PCI-device list. */
spin_lock_init(&d->pcidev_lock);
INIT_LIST_HEAD(&d->pcidev_list);
+
+ if ( (d->id != IDLE_DOMAIN_ID) &&
+ ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
+ {
+ destroy_event_channels(d);
+ free_domain_struct(d);
+ return NULL;
+ }
+
+ arch_do_createdomain(d);
+
+ sched_add_domain(d);
if ( d->id != IDLE_DOMAIN_ID )
{
- if ( (init_event_channels(d) != 0) || (grant_table_create(d) != 0) )
- {
- destroy_event_channels(d);
- free_domain_struct(d);
- return NULL;
- }
-
- arch_do_createdomain(d);
-
- sched_add_domain(d);
-
write_lock(&domlist_lock);
pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
@@ -72,10 +73,6 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
domain_hash[DOMAIN_HASH(dom_id)] = d;
write_unlock(&domlist_lock);
}
- else
- {
- sched_add_domain(d);
- }
return d;
}
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index c5ccb59fa3..6d3cf3036a 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -399,6 +399,8 @@ struct thread_struct {
/* general user-visible register state */
execution_context_t user_ctxt;
+ void (*schedule_tail) (struct domain *);
+
/*
* Return vectors pushed to us by guest OS.
* The stack frame for events is exactly that of an x86 hardware interrupt.
@@ -456,14 +458,10 @@ extern struct desc_struct *idt_tables[];
long set_fast_trap(struct domain *p, int idx);
-#define INIT_THREAD { fast_trap_idx: 0x20 }
-
-#elif defined(__x86_64__)
+#endif
#define INIT_THREAD { 0 }
-#endif /* __x86_64__ */
-
extern int gpf_emulate_4gb(struct xen_regs *regs);
struct mm_struct {
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index e2c961e360..18854edc3f 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -271,13 +271,6 @@ static inline int local_irq_is_enabled(void)
return !!(flags & (1<<9)); /* EFLAGS_IF */
}
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
#define BROKEN_ACPI_Sx 0x0001
#define BROKEN_INIT_AFTER_S1 0x0002
diff --git a/xen/include/asm-x86/x86_32/current.h b/xen/include/asm-x86/x86_32/current.h
index 3450cd02cc..2c76a133aa 100644
--- a/xen/include/asm-x86/x86_32/current.h
+++ b/xen/include/asm-x86/x86_32/current.h
@@ -45,14 +45,11 @@ static inline unsigned long get_stack_top(void)
return p;
}
-#define schedule_tail(_p) \
+#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
- "andl %%esp,%0; addl %2,%0; movl %0,%%esp; jmp *%1" \
- : : "r" (~(STACK_SIZE-1)), \
- "r" (unlikely(is_idle_task((_p))) ? \
- continue_cpu_idle_loop : \
- continue_nonidle_task), \
- "i" (STACK_SIZE-STACK_RESERVED) )
+ "movl %0,%%esp; jmp "STR(__fn) \
+ : : "r" (get_execution_context()) )
+#define schedule_tail(_d) ((_d)->thread.schedule_tail)(_d)
#endif /* _X86_CURRENT_H */
diff --git a/xen/include/asm-x86/x86_64/current.h b/xen/include/asm-x86/x86_64/current.h
index 2f3a6e5665..2ee550643b 100644
--- a/xen/include/asm-x86/x86_64/current.h
+++ b/xen/include/asm-x86/x86_64/current.h
@@ -40,15 +40,12 @@ static inline unsigned long get_stack_top(void)
return p;
}
-#define schedule_tail(_p) \
+#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
- "andq %%rsp,%0; addq %2,%0; movq %0,%%rsp; jmp *%1" \
- : : "r" (~(STACK_SIZE-1)), \
- "r" (unlikely(is_idle_task((_p))) ? \
- continue_cpu_idle_loop : \
- continue_nonidle_task), \
- "i" (STACK_SIZE-STACK_RESERVED) )
+ "movq %0,%%rsp; jmp "STR(__fn) \
+ : : "r" (get_execution_context()) )
+#define schedule_tail(_d) ((_d)->thread.schedule_tail)(_d)
#else
diff --git a/xen/include/asm-x86/domain.h b/xen/include/xen/domain.h
index 7dde4efa6d..5ff5ed62ad 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/xen/domain.h
@@ -1,6 +1,12 @@
-#ifndef __ASM_X86_DOMAIN_H__
-#define __ASM_X86_DOMAIN_H__
+#ifndef __XEN_DOMAIN_H__
+#define __XEN_DOMAIN_H__
+
+
+
+/*
+ * Arch-specifics.
+ */
extern void arch_do_createdomain(struct domain *d);
@@ -11,4 +17,4 @@ extern void free_perdomain_pt(struct domain *d);
extern void domain_relinquish_memory(struct domain *d);
-#endif /* __ASM_X86_DOMAIN_H__ */
+#endif /* __XEN_DOMAIN_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index c8781f0440..d7e6f0be26 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -214,9 +214,6 @@ void domain_init(void);
int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
void startup_cpu_idle_loop(void);
-void continue_cpu_idle_loop(void);
-
-void continue_nonidle_task(void);
unsigned long hypercall_create_continuation(
unsigned int op, unsigned int nr_args, ...);
@@ -295,6 +292,6 @@ static inline void domain_unpause_by_systemcontroller(struct domain *d)
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
#include <xen/slab.h>
-#include <asm/domain.h>
+#include <xen/domain.h>
#endif /* __SCHED_H__ */