aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorcl349@firebug.cl.cam.ac.uk <cl349@firebug.cl.cam.ac.uk>2005-06-02 21:25:15 +0000
committercl349@firebug.cl.cam.ac.uk <cl349@firebug.cl.cam.ac.uk>2005-06-02 21:25:15 +0000
commitb7a0fb87b9fd4ed50006c31e9aaf7a7bdff3d060 (patch)
tree733cf55a9da86074006d331b8a0b334aa325ea6a
parent7b9361498f418c0ddca3ee202add13d24da0f0cd (diff)
parent550e86d463b9eda10d94642aad1a719cf0102362 (diff)
downloadxen-b7a0fb87b9fd4ed50006c31e9aaf7a7bdff3d060.tar.gz
xen-b7a0fb87b9fd4ed50006c31e9aaf7a7bdff3d060.tar.bz2
xen-b7a0fb87b9fd4ed50006c31e9aaf7a7bdff3d060.zip
bitkeeper revision 1.1654 (429f793bJpez_x78azX5iKCsmtPMUQ)
Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk into firebug.cl.cam.ac.uk:/local/scratch/cl349/xen-unstable.bk
-rw-r--r--xen/arch/ia64/asm-offsets.c24
-rw-r--r--xen/arch/ia64/domain.c164
-rw-r--r--xen/arch/ia64/hypercall.c38
-rw-r--r--xen/arch/ia64/idle0_task.c2
-rw-r--r--xen/arch/ia64/irq.c2
-rw-r--r--xen/arch/ia64/mmio.c2
-rw-r--r--xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c4
-rw-r--r--xen/arch/ia64/patch/linux-2.6.7/current.h2
-rw-r--r--xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c4
-rw-r--r--xen/arch/ia64/patch/linux-2.6.7/time.c10
-rw-r--r--xen/arch/ia64/privop.c14
-rw-r--r--xen/arch/ia64/process.c96
-rw-r--r--xen/arch/ia64/regionreg.c62
-rw-r--r--xen/arch/ia64/vmmu.c14
-rw-r--r--xen/arch/ia64/vmx_init.c46
-rw-r--r--xen/arch/ia64/vmx_process.c20
-rw-r--r--xen/arch/ia64/xenirq.c4
-rw-r--r--xen/arch/ia64/xenmisc.c22
-rw-r--r--xen/arch/ia64/xensetup.c8
-rw-r--r--xen/arch/ia64/xentime.c14
-rw-r--r--xen/arch/x86/audit.c16
-rw-r--r--xen/arch/x86/dom0_ops.c18
-rw-r--r--xen/arch/x86/domain.c234
-rw-r--r--xen/arch/x86/domain_build.c41
-rw-r--r--xen/arch/x86/i387.c4
-rw-r--r--xen/arch/x86/idle0_task.c2
-rw-r--r--xen/arch/x86/irq.c18
-rw-r--r--xen/arch/x86/mm.c158
-rw-r--r--xen/arch/x86/setup.c4
-rw-r--r--xen/arch/x86/shadow.c244
-rw-r--r--xen/arch/x86/smpboot.c6
-rw-r--r--xen/arch/x86/time.c10
-rw-r--r--xen/arch/x86/traps.c138
-rw-r--r--xen/arch/x86/vmx.c48
-rw-r--r--xen/arch/x86/vmx_intercept.c8
-rw-r--r--xen/arch/x86/vmx_io.c38
-rw-r--r--xen/arch/x86/vmx_platform.c2
-rw-r--r--xen/arch/x86/vmx_vmcs.c14
-rw-r--r--xen/arch/x86/x86_32/asm-offsets.c20
-rw-r--r--xen/arch/x86/x86_32/entry.S28
-rw-r--r--xen/arch/x86/x86_32/mm.c2
-rw-r--r--xen/arch/x86/x86_32/seg_fixup.c6
-rw-r--r--xen/arch/x86/x86_32/traps.c14
-rw-r--r--xen/arch/x86/x86_64/asm-offsets.c16
-rw-r--r--xen/arch/x86/x86_64/entry.S40
-rw-r--r--xen/arch/x86/x86_64/mm.c10
-rw-r--r--xen/arch/x86/x86_64/traps.c2
-rw-r--r--xen/common/dom0_ops.c82
-rw-r--r--xen/common/domain.c120
-rw-r--r--xen/common/event_channel.c66
-rw-r--r--xen/common/grant_table.c4
-rw-r--r--xen/common/keyhandler.c30
-rw-r--r--xen/common/sched_bvt.c139
-rw-r--r--xen/common/sched_sedf.c256
-rw-r--r--xen/common/schedule.c162
-rw-r--r--xen/drivers/char/console.c2
-rw-r--r--xen/include/asm-ia64/config.h4
-rw-r--r--xen/include/asm-ia64/domain.h8
-rw-r--r--xen/include/asm-ia64/vcpu.h4
-rw-r--r--xen/include/asm-ia64/vmmu.h10
-rw-r--r--xen/include/asm-ia64/vmx.h6
-rw-r--r--xen/include/asm-ia64/vmx_vpd.h4
-rw-r--r--xen/include/asm-x86/current.h4
-rw-r--r--xen/include/asm-x86/debugger.h2
-rw-r--r--xen/include/asm-x86/domain.h4
-rw-r--r--xen/include/asm-x86/i387.h12
-rw-r--r--xen/include/asm-x86/ldt.h10
-rw-r--r--xen/include/asm-x86/mm.h6
-rw-r--r--xen/include/asm-x86/processor.h14
-rw-r--r--xen/include/asm-x86/shadow.h132
-rw-r--r--xen/include/asm-x86/vmx.h8
-rw-r--r--xen/include/asm-x86/vmx_platform.h2
-rw-r--r--xen/include/asm-x86/vmx_virpit.h2
-rw-r--r--xen/include/asm-x86/vmx_vmcs.h4
-rw-r--r--xen/include/public/event_channel.h4
-rw-r--r--xen/include/xen/domain.h10
-rw-r--r--xen/include/xen/event.h24
-rw-r--r--xen/include/xen/irq.h4
-rw-r--r--xen/include/xen/sched-if.h16
-rw-r--r--xen/include/xen/sched.h64
-rw-r--r--xen/include/xen/time.h2
-rw-r--r--xen/include/xen/types.h2
82 files changed, 1460 insertions, 1456 deletions
diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
index 45990de15e..0fd0757853 100644
--- a/xen/arch/ia64/asm-offsets.c
+++ b/xen/arch/ia64/asm-offsets.c
@@ -13,7 +13,7 @@
#include <asm/tlb.h>
#endif // CONFIG_VTI
-#define task_struct exec_domain
+#define task_struct vcpu
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -60,8 +60,8 @@ void foo(void)
//DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
//DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
//DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
- DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct exec_domain, arch._thread.ksp));
- DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct exec_domain, arch._thread.on_ustack));
+ DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct vcpu, arch._thread.ksp));
+ DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack));
BLANK();
@@ -112,14 +112,14 @@ void foo(void)
DEFINE(IA64_PT_REGS_CR_ISR_OFFSET, offsetof (struct xen_regs, cr_isr));
DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct xen_regs, eml_unat));
DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct xen_regs, rfi_pfs));
- DEFINE(RFI_IIP_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_iip));
- DEFINE(RFI_IPSR_OFFSET, offsetof(struct exec_domain, arch.arch_vmx.rfi_ipsr));
- DEFINE(RFI_IFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_ifs));
- DEFINE(RFI_PFS_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.rfi_pfs));
- DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr5));
- DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr6));
- DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mrr7));
- DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct exec_domain ,arch.arch_vmx.mpta));
+ DEFINE(RFI_IIP_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_iip));
+ DEFINE(RFI_IPSR_OFFSET, offsetof(struct vcpu, arch.arch_vmx.rfi_ipsr));
+ DEFINE(RFI_IFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_ifs));
+ DEFINE(RFI_PFS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.rfi_pfs));
+ DEFINE(SWITCH_MRR5_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr5));
+ DEFINE(SWITCH_MRR6_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr6));
+ DEFINE(SWITCH_MRR7_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mrr7));
+ DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta));
#endif //CONFIG_VTI
DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
@@ -193,7 +193,7 @@ void foo(void)
BLANK();
#ifdef CONFIG_VTI
- DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct exec_domain, arch.arch_vmx.vpd));
+ DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.vpd));
DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
diff --git a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
index 772b4bd99d..1905c7f1eb 100644
--- a/xen/arch/ia64/domain.c
+++ b/xen/arch/ia64/domain.c
@@ -154,23 +154,23 @@ void startup_cpu_idle_loop(void)
continue_cpu_idle_loop();
}
-struct exec_domain *arch_alloc_exec_domain_struct(void)
+struct vcpu *arch_alloc_vcpu_struct(void)
{
- /* Per-vp stack is used here. So we need keep exec_domain
+ /* Per-vp stack is used here. So we need keep vcpu
* same page as per-vp stack */
return alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER);
}
-void arch_free_exec_domain_struct(struct exec_domain *ed)
+void arch_free_vcpu_struct(struct vcpu *v)
{
- free_xenheap_pages(ed, KERNEL_STACK_SIZE_ORDER);
+ free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
}
#ifdef CONFIG_VTI
-void arch_do_createdomain(struct exec_domain *ed)
+void arch_do_createdomain(struct vcpu *v)
{
- struct domain *d = ed->domain;
- struct thread_info *ti = alloc_thread_info(ed);
+ struct domain *d = v->domain;
+ struct thread_info *ti = alloc_thread_info(v);
/* If domain is VMX domain, shared info area is created
* by domain and then domain notifies HV by specific hypercall.
@@ -187,18 +187,18 @@ void arch_do_createdomain(struct exec_domain *ed)
* normal xen convention.
*/
d->shared_info = NULL;
- ed->vcpu_info = (void *)alloc_xenheap_page();
- if (!ed->vcpu_info) {
+ v->vcpu_info = (void *)alloc_xenheap_page();
+ if (!v->vcpu_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
while (1);
}
- memset(ed->vcpu_info, 0, PAGE_SIZE);
+ memset(v->vcpu_info, 0, PAGE_SIZE);
/* Clear thread_info to clear some important fields, like preempt_count */
memset(ti, 0, sizeof(struct thread_info));
/* Allocate per-domain vTLB and vhpt */
- ed->arch.vtlb = init_domain_tlb(ed);
+ v->arch.vtlb = init_domain_tlb(v);
/* Physical->machine page table will be allocated when
* final setup, since we have no the maximum pfn number in
@@ -215,20 +215,20 @@ void arch_do_createdomain(struct exec_domain *ed)
// stay on kernel stack because may get interrupts!
// ia64_ret_from_clone (which b0 gets in new_thread) switches
// to user stack
- ed->arch._thread.on_ustack = 0;
+ v->arch._thread.on_ustack = 0;
}
#else // CONFIG_VTI
-void arch_do_createdomain(struct exec_domain *ed)
+void arch_do_createdomain(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
d->shared_info = (void *)alloc_xenheap_page();
- ed->vcpu_info = (void *)alloc_xenheap_page();
- if (!ed->vcpu_info) {
+ v->vcpu_info = (void *)alloc_xenheap_page();
+ if (!v->vcpu_info) {
printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
while (1);
}
- memset(ed->vcpu_info, 0, PAGE_SIZE);
+ memset(v->vcpu_info, 0, PAGE_SIZE);
/* pin mapping */
// FIXME: Does this belong here? Or do only at domain switch time?
#if 0
@@ -246,7 +246,7 @@ void arch_do_createdomain(struct exec_domain *ed)
d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
if ((d->metaphysical_rid = allocate_metaphysical_rid()) == -1UL)
BUG();
- ed->vcpu_info->arch.metaphysical_mode = 1;
+ v->vcpu_info->arch.metaphysical_mode = 1;
#define DOMAIN_RID_BITS_DEFAULT 18
if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
BUG();
@@ -258,22 +258,22 @@ void arch_do_createdomain(struct exec_domain *ed)
// stay on kernel stack because may get interrupts!
// ia64_ret_from_clone (which b0 gets in new_thread) switches
// to user stack
- ed->arch._thread.on_ustack = 0;
+ v->arch._thread.on_ustack = 0;
}
#endif // CONFIG_VTI
-void arch_do_boot_vcpu(struct exec_domain *p)
+void arch_do_boot_vcpu(struct vcpu *v)
{
return;
}
-int arch_set_info_guest(struct exec_domain *p, struct vcpu_guest_context *c)
+int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
{
dummy();
return 1;
}
-int arch_final_setup_guest(struct exec_domain *p, struct vcpu_guest_context *c)
+int arch_final_setup_guest(struct vcpu *v, struct vcpu_guest_context *c)
{
dummy();
return 1;
@@ -285,12 +285,12 @@ void domain_relinquish_resources(struct domain *d)
}
#ifdef CONFIG_VTI
-void new_thread(struct exec_domain *ed,
+void new_thread(struct vcpu *v,
unsigned long start_pc,
unsigned long start_stack,
unsigned long start_info)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
struct switch_stack *sw;
struct xen_regs *regs;
struct ia64_boot_param *bp;
@@ -302,12 +302,12 @@ void new_thread(struct exec_domain *ed,
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
if (d == dom0) start_pc += dom0_start;
#endif
- regs = (struct xen_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
+ regs = (struct xen_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
sw = (struct switch_stack *) regs - 1;
/* Sanity Clear */
memset(sw, 0, sizeof(struct xen_regs) + sizeof(struct switch_stack));
- if (VMX_DOMAIN(ed)) {
+ if (VMX_DOMAIN(v)) {
/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
regs->cr_ipsr = 0x501008826008; /* Need to be expanded as macro */
} else {
@@ -320,42 +320,42 @@ void new_thread(struct exec_domain *ed,
regs->ar_rsc = 0x0;
regs->cr_ifs = 0x0;
regs->ar_fpsr = sw->ar_fpsr = FPSR_DEFAULT;
- sw->ar_bspstore = (unsigned long)ed + IA64_RBS_OFFSET;
- printf("new_thread: ed=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
- ed,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
+ sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
+ printf("new_thread: v=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
+ v,regs,sw,sw->ar_bspstore,IA64_STK_OFFSET,&regs->r8);
printf("iip:0x%lx,ipsr:0x%lx\n", regs->cr_iip, regs->cr_ipsr);
sw->b0 = (unsigned long) &ia64_ret_from_clone;
- ed->arch._thread.ksp = (unsigned long) sw - 16;
+ v->arch._thread.ksp = (unsigned long) sw - 16;
printk("new_thread, about to call init_all_rr\n");
- if (VMX_DOMAIN(ed)) {
- vmx_init_all_rr(ed);
+ if (VMX_DOMAIN(v)) {
+ vmx_init_all_rr(v);
} else
- init_all_rr(ed);
+ init_all_rr(v);
// set up boot parameters (and fake firmware)
printk("new_thread, about to call dom_fw_setup\n");
- VMX_VPD(ed,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); //FIXME
+ VMX_VPD(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); //FIXME
printk("new_thread, done with dom_fw_setup\n");
- if (VMX_DOMAIN(ed)) {
+ if (VMX_DOMAIN(v)) {
/* Virtual processor context setup */
- VMX_VPD(ed, vpsr) = IA64_PSR_BN;
- VPD_CR(ed, dcr) = 0;
+ VMX_VPD(v, vpsr) = IA64_PSR_BN;
+ VPD_CR(v, dcr) = 0;
} else {
// don't forget to set this!
- ed->vcpu_info->arch.banknum = 1;
+ v->vcpu_info->arch.banknum = 1;
}
}
#else // CONFIG_VTI
// heavily leveraged from linux/arch/ia64/kernel/process.c:copy_thread()
// and linux/arch/ia64/kernel/process.c:kernel_thread()
-void new_thread(struct exec_domain *ed,
+void new_thread(struct vcpu *v,
unsigned long start_pc,
unsigned long start_stack,
unsigned long start_info)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
struct switch_stack *sw;
struct pt_regs *regs;
unsigned long new_rbs;
@@ -366,10 +366,10 @@ void new_thread(struct exec_domain *ed,
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
if (d == dom0) start_pc += dom0_start;
#endif
- regs = (struct pt_regs *) ((unsigned long) ed + IA64_STK_OFFSET) - 1;
+ regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
sw = (struct switch_stack *) regs - 1;
memset(sw,0,sizeof(struct switch_stack)+sizeof(struct pt_regs));
- new_rbs = (unsigned long) ed + IA64_RBS_OFFSET;
+ new_rbs = (unsigned long) v + IA64_RBS_OFFSET;
regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
| IA64_PSR_BITS_TO_SET | IA64_PSR_BN
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS);
@@ -389,20 +389,20 @@ void new_thread(struct exec_domain *ed,
sw->caller_unat = 0;
sw->ar_pfs = 0;
sw->ar_bspstore = new_rbs;
- //regs->r13 = (unsigned long) ed;
-printf("new_thread: ed=%p, start_pc=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
-ed,start_pc,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
+ //regs->r13 = (unsigned long) v;
+printf("new_thread: v=%p, start_pc=%p, regs=%p, sw=%p, new_rbs=%p, IA64_STK_OFFSET=%p, &r8=%p\n",
+v,start_pc,regs,sw,new_rbs,IA64_STK_OFFSET,&regs->r8);
sw->b0 = (unsigned long) &ia64_ret_from_clone;
- ed->arch._thread.ksp = (unsigned long) sw - 16;
- //ed->thread_info->flags = 0;
+ v->arch._thread.ksp = (unsigned long) sw - 16;
+ //v->thread_info->flags = 0;
printk("new_thread, about to call init_all_rr\n");
- init_all_rr(ed);
+ init_all_rr(v);
// set up boot parameters (and fake firmware)
printk("new_thread, about to call dom_fw_setup\n");
regs->r28 = dom_fw_setup(d,saved_command_line,256L); //FIXME
printk("new_thread, done with dom_fw_setup\n");
// don't forget to set this!
- ed->vcpu_info->arch.banknum = 1;
+ v->vcpu_info->arch.banknum = 1;
}
#endif // CONFIG_VTI
@@ -737,9 +737,9 @@ domU_staging_write_32(unsigned long at, unsigned long a, unsigned long b,
* here.
*/
void
-post_arch_do_create_domain(struct exec_domain *ed, int vmx_domain)
+post_arch_do_create_domain(struct vcpu *v, int vmx_domain)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
if (!vmx_domain) {
d->shared_info = (void*)alloc_xenheap_page();
@@ -786,7 +786,7 @@ int construct_dom0(struct domain *d,
unsigned long alloc_start, alloc_end;
struct pfn_info *page = NULL;
start_info_t *si;
- struct exec_domain *ed = d->exec_domain[0];
+ struct vcpu *v = d->vcpu[0];
struct domain_setup_info dsi;
unsigned long p_start;
unsigned long pkern_start;
@@ -882,7 +882,7 @@ int construct_dom0(struct domain *d,
machine_to_phys_mapping[mfn] = mfn;
}
- post_arch_do_create_domain(ed, vmx_dom0);
+ post_arch_do_create_domain(v, vmx_dom0);
/* Load Dom0 image to its own memory */
loaddomainelfimage(d,image_start);
@@ -898,7 +898,7 @@ int construct_dom0(struct domain *d,
/* Physical mode emulation initialization, including
* emulation ID allcation and related memory request
*/
- physical_mode_init(ed);
+ physical_mode_init(v);
/* Dom0's pfn is equal to mfn, so there's no need to allocate pmt
* for dom0
*/
@@ -916,11 +916,11 @@ int construct_dom0(struct domain *d,
vmx_final_setup_domain(dom0);
/* vpd is ready now */
- vlsapic_reset(ed);
- vtm_init(ed);
+ vlsapic_reset(v);
+ vtm_init(v);
set_bit(_DOMF_constructed, &d->domain_flags);
- new_thread(ed, pkern_entry, 0, 0);
+ new_thread(v, pkern_entry, 0, 0);
// FIXME: Hack for keyboard input
#ifdef CLONE_DOMAIN0
@@ -928,12 +928,12 @@ if (d == dom0)
#endif
serial_input_init();
if (d == dom0) {
- ed->vcpu_info->arch.delivery_mask[0] = -1L;
- ed->vcpu_info->arch.delivery_mask[1] = -1L;
- ed->vcpu_info->arch.delivery_mask[2] = -1L;
- ed->vcpu_info->arch.delivery_mask[3] = -1L;
+ v->vcpu_info->arch.delivery_mask[0] = -1L;
+ v->vcpu_info->arch.delivery_mask[1] = -1L;
+ v->vcpu_info->arch.delivery_mask[2] = -1L;
+ v->vcpu_info->arch.delivery_mask[3] = -1L;
}
- else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
+ else __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
return 0;
}
@@ -953,7 +953,7 @@ int construct_dom0(struct domain *d,
//l1_pgentry_t *l1tab = NULL, *l1start = NULL;
struct pfn_info *page = NULL;
start_info_t *si;
- struct exec_domain *ed = d->exec_domain[0];
+ struct vcpu *v = d->vcpu[0];
struct domain_setup_info dsi;
unsigned long p_start;
@@ -1095,19 +1095,19 @@ int construct_dom0(struct domain *d,
set_bit(_DOMF_constructed, &d->domain_flags);
- new_thread(ed, pkern_entry, 0, 0);
+ new_thread(v, pkern_entry, 0, 0);
// FIXME: Hack for keyboard input
#ifdef CLONE_DOMAIN0
if (d == dom0)
#endif
serial_input_init();
if (d == dom0) {
- ed->vcpu_info->arch.delivery_mask[0] = -1L;
- ed->vcpu_info->arch.delivery_mask[1] = -1L;
- ed->vcpu_info->arch.delivery_mask[2] = -1L;
- ed->vcpu_info->arch.delivery_mask[3] = -1L;
+ v->vcpu_info->arch.delivery_mask[0] = -1L;
+ v->vcpu_info->arch.delivery_mask[1] = -1L;
+ v->vcpu_info->arch.delivery_mask[2] = -1L;
+ v->vcpu_info->arch.delivery_mask[3] = -1L;
}
- else __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
+ else __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
return 0;
}
@@ -1120,7 +1120,7 @@ int construct_domU(struct domain *d,
char *cmdline)
{
int i, rc;
- struct exec_domain *ed = d->exec_domain[0];
+ struct vcpu *v = d->vcpu[0];
unsigned long pkern_entry;
#ifndef DOMU_AUTO_RESTART
@@ -1161,25 +1161,25 @@ int construct_domU(struct domain *d,
printk("calling new_thread, entry=%p\n",pkern_entry);
#ifdef DOMU_AUTO_RESTART
- ed->domain->arch.image_start = image_start;
- ed->domain->arch.image_len = image_len;
- ed->domain->arch.entry = pkern_entry;
+ v->domain->arch.image_start = image_start;
+ v->domain->arch.image_len = image_len;
+ v->domain->arch.entry = pkern_entry;
#endif
- new_thread(ed, pkern_entry, 0, 0);
+ new_thread(v, pkern_entry, 0, 0);
printk("new_thread returns\n");
- __set_bit(0x30,ed->vcpu_info->arch.delivery_mask);
+ __set_bit(0x30,v->vcpu_info->arch.delivery_mask);
return 0;
}
#ifdef DOMU_AUTO_RESTART
-void reconstruct_domU(struct exec_domain *ed)
+void reconstruct_domU(struct vcpu *v)
{
/* re-copy the OS image to reset data values to original */
printk("reconstruct_domU: restarting domain %d...\n",
- ed->domain->domain_id);
- loaddomainelfimage(ed->domain,ed->domain->arch.image_start);
- new_thread(ed, ed->domain->arch.entry, 0, 0);
+ v->domain->domain_id);
+ loaddomainelfimage(v->domain,v->domain->arch.image_start);
+ new_thread(v, v->domain->arch.entry, 0, 0);
}
#endif
@@ -1229,9 +1229,9 @@ void dummy(void)
#if 0
-void switch_to(struct exec_domain *prev, struct exec_domain *next)
+void switch_to(struct vcpu *prev, struct vcpu *next)
{
- struct exec_domain *last;
+ struct vcpu *last;
__switch_to(prev,next,last);
//set_current(next);
@@ -1240,7 +1240,7 @@ void switch_to(struct exec_domain *prev, struct exec_domain *next)
void domain_pend_keyboard_interrupt(int irq)
{
- vcpu_pend_interrupt(dom0->exec_domain[0],irq);
+ vcpu_pend_interrupt(dom0->vcpu[0],irq);
}
/////////////////////////////////
diff --git a/xen/arch/ia64/hypercall.c b/xen/arch/ia64/hypercall.c
index adaf7d3b44..0fcc6f7cf8 100644
--- a/xen/arch/ia64/hypercall.c
+++ b/xen/arch/ia64/hypercall.c
@@ -24,7 +24,7 @@ void fooefi(void) {}
int
ia64_hypercall (struct pt_regs *regs)
{
- struct exec_domain *ed = (struct domain *) current;
+ struct vcpu *v = (struct domain *) current;
struct ia64_sal_retval x;
unsigned long *tv, *tc;
@@ -38,7 +38,7 @@ ia64_hypercall (struct pt_regs *regs)
// to a yet-to-be-found bug where pending_interruption
// is zero when it shouldn't be. Since PAL is called
// in the idle loop, this should resolve it
- ed->vcpu_info->arch.pending_interruption = 1;
+ v->vcpu_info->arch.pending_interruption = 1;
#endif
x = pal_emulator_static(regs->r28);
if (regs->r28 == PAL_HALT_LIGHT) {
@@ -49,10 +49,10 @@ ia64_hypercall (struct pt_regs *regs)
regs->r10 = x.v1; regs->r11 = x.v2;
break;
case FW_HYPERCALL_SAL_CALL:
- x = sal_emulator(vcpu_get_gr(ed,32),vcpu_get_gr(ed,33),
- vcpu_get_gr(ed,34),vcpu_get_gr(ed,35),
- vcpu_get_gr(ed,36),vcpu_get_gr(ed,37),
- vcpu_get_gr(ed,38),vcpu_get_gr(ed,39));
+ x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
+ vcpu_get_gr(v,34),vcpu_get_gr(v,35),
+ vcpu_get_gr(v,36),vcpu_get_gr(v,37),
+ vcpu_get_gr(v,38),vcpu_get_gr(v,39));
regs->r8 = x.status; regs->r9 = x.v0;
regs->r10 = x.v1; regs->r11 = x.v2;
break;
@@ -73,8 +73,8 @@ ia64_hypercall (struct pt_regs *regs)
#endif
break;
case FW_HYPERCALL_EFI_GET_TIME:
- tv = vcpu_get_gr(ed,32);
- tc = vcpu_get_gr(ed,33);
+ tv = vcpu_get_gr(v,32);
+ tc = vcpu_get_gr(v,33);
//printf("efi_get_time(%p,%p) called...",tv,tc);
tv = __va(translate_domain_mpaddr(tv));
if (tc) tc = __va(translate_domain_mpaddr(tc));
@@ -99,28 +99,28 @@ ia64_hypercall (struct pt_regs *regs)
break;
case 0xffff: // test dummy hypercall
regs->r8 = dump_privop_counts_to_user(
- vcpu_get_gr(ed,32),
- vcpu_get_gr(ed,33));
+ vcpu_get_gr(v,32),
+ vcpu_get_gr(v,33));
break;
case 0xfffe: // test dummy hypercall
regs->r8 = zero_privop_counts_to_user(
- vcpu_get_gr(ed,32),
- vcpu_get_gr(ed,33));
+ vcpu_get_gr(v,32),
+ vcpu_get_gr(v,33));
break;
case 0xfffd: // test dummy hypercall
regs->r8 = launch_domainU(
- vcpu_get_gr(ed,32));
+ vcpu_get_gr(v,32));
break;
case 0xfffc: // test dummy hypercall
regs->r8 = domU_staging_write_32(
- vcpu_get_gr(ed,32),
- vcpu_get_gr(ed,33),
- vcpu_get_gr(ed,34),
- vcpu_get_gr(ed,35),
- vcpu_get_gr(ed,36));
+ vcpu_get_gr(v,32),
+ vcpu_get_gr(v,33),
+ vcpu_get_gr(v,34),
+ vcpu_get_gr(v,35),
+ vcpu_get_gr(v,36));
break;
case 0xfffb: // test dummy hypercall
- regs->r8 = domU_staging_read_8(vcpu_get_gr(ed,32));
+ regs->r8 = domU_staging_read_8(vcpu_get_gr(v,32));
break;
}
return 1;
diff --git a/xen/arch/ia64/idle0_task.c b/xen/arch/ia64/idle0_task.c
index 8aa41a131c..bfb49f7591 100644
--- a/xen/arch/ia64/idle0_task.c
+++ b/xen/arch/ia64/idle0_task.c
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(init_mm);
struct domain idle0_domain = IDLE0_DOMAIN(idle0_domain);
#if 0
-struct exec_domain idle0_exec_domain = IDLE0_EXEC_DOMAIN(idle0_exec_domain,
+struct vcpu idle0_vcpu = IDLE0_EXEC_DOMAIN(idle0_vcpu,
&idle0_domain);
#endif
diff --git a/xen/arch/ia64/irq.c b/xen/arch/ia64/irq.c
index b04d49cc70..5d4fda28a4 100644
--- a/xen/arch/ia64/irq.c
+++ b/xen/arch/ia64/irq.c
@@ -1364,7 +1364,7 @@ int pirq_guest_unmask(struct domain *d)
return 0;
}
-int pirq_guest_bind(struct exec_domain *d, int irq, int will_share)
+int pirq_guest_bind(struct vcpu *d, int irq, int will_share)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
diff --git a/xen/arch/ia64/mmio.c b/xen/arch/ia64/mmio.c
index 88146c8d8e..2fb5c34251 100644
--- a/xen/arch/ia64/mmio.c
+++ b/xen/arch/ia64/mmio.c
@@ -211,7 +211,7 @@ static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
LID lid;
for (i=0; i<MAX_VIRT_CPUS; i++) {
- vcpu = d->exec_domain[i];
+ vcpu = d->vcpu[i];
lid.val = VPD_CR(vcpu, lid);
if ( lid.id == id && lid.eid == eid ) {
return vcpu;
diff --git a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c
index 1daf4a3100..51f8fe05cf 100644
--- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c
+++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c
@@ -82,7 +82,7 @@
+
+ if (vector != IA64_TIMER_VECTOR) {
+ /* FIXME: Leave IRQ re-route later */
-+ vmx_vcpu_pend_interrupt(dom0->exec_domain[0],vector);
++ vmx_vcpu_pend_interrupt(dom0->vcpu[0],vector);
+ wake_dom0 = 1;
+ }
+ else { // FIXME: Handle Timer only now
@@ -108,7 +108,7 @@
+ */
+ irq_exit();
+ if ( wake_dom0 && current != dom0 )
-+ domain_wake(dom0->exec_domain[0]);
++ domain_wake(dom0->vcpu[0]);
+}
+#endif
+
diff --git a/xen/arch/ia64/patch/linux-2.6.7/current.h b/xen/arch/ia64/patch/linux-2.6.7/current.h
index d61b6fda58..cf11820756 100644
--- a/xen/arch/ia64/patch/linux-2.6.7/current.h
+++ b/xen/arch/ia64/patch/linux-2.6.7/current.h
@@ -6,7 +6,7 @@
*/
+#ifdef XEN
+struct domain;
-+#define get_current() ((struct exec_domain *) ia64_getreg(_IA64_REG_TP))
++#define get_current() ((struct vcpu *) ia64_getreg(_IA64_REG_TP))
+#define current get_current()
+//#define set_current(d) ia64_setreg(_IA64_REG_TP,(void *)d);
+#define set_current(d) (ia64_r13 = (void *)d)
diff --git a/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c b/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c
index e1dfafadb8..8d991ddd2c 100644
--- a/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c
+++ b/xen/arch/ia64/patch/linux-2.6.7/irq_ia64.c
@@ -72,8 +72,8 @@
+ }
+#endif
+ //FIXME: TEMPORARY HACK!!!!
-+ vcpu_pend_interrupt(dom0->exec_domain[0],vector);
-+ domain_wake(dom0->exec_domain[0]);
++ vcpu_pend_interrupt(dom0->vcpu[0],vector);
++ domain_wake(dom0->vcpu[0]);
+ }
+ else
+#endif
diff --git a/xen/arch/ia64/patch/linux-2.6.7/time.c b/xen/arch/ia64/patch/linux-2.6.7/time.c
index 3552cb8598..1b6263ce4f 100644
--- a/xen/arch/ia64/patch/linux-2.6.7/time.c
+++ b/xen/arch/ia64/patch/linux-2.6.7/time.c
@@ -70,7 +70,7 @@
+ return now;
+}
+
-+void update_dom_time(struct exec_domain *ed)
++void update_dom_time(struct vcpu *v)
+{
+// FIXME: implement this?
+// printf("update_dom_time: called, not implemented, skipping\n");
@@ -206,10 +206,10 @@
+ // call vcpu_timer_expired on it
+ //domain0_ready = 1; // moved to xensetup.c
+ }
-+ if (domain0_ready && vcpu_timer_expired(dom0->exec_domain[0])) {
-+ vcpu_pend_timer(dom0->exec_domain[0]);
-+ //vcpu_set_next_timer(dom0->exec_domain[0]);
-+ domain_wake(dom0->exec_domain[0]);
++ if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
++ vcpu_pend_timer(dom0->vcpu[0]);
++ //vcpu_set_next_timer(dom0->vcpu[0]);
++ domain_wake(dom0->vcpu[0]);
+ }
+ if (!is_idle_task(current->domain) && current->domain != dom0) {
+ if (vcpu_timer_expired(current)) {
diff --git a/xen/arch/ia64/privop.c b/xen/arch/ia64/privop.c
index 0fba1d502f..19ff98fc8d 100644
--- a/xen/arch/ia64/privop.c
+++ b/xen/arch/ia64/privop.c
@@ -761,7 +761,7 @@ unsigned long hyperpriv_cnt[HYPERPRIVOP_MAX+1] = { 0 };
int
ia64_hyperprivop(unsigned long iim, REGS *regs)
{
- struct exec_domain *ed = (struct domain *) current;
+ struct vcpu *v = (struct domain *) current;
INST64 inst;
UINT64 val;
@@ -774,24 +774,24 @@ ia64_hyperprivop(unsigned long iim, REGS *regs)
hyperpriv_cnt[iim]++;
switch(iim) {
case HYPERPRIVOP_RFI:
- (void)vcpu_rfi(ed);
+ (void)vcpu_rfi(v);
return 0; // don't update iip
case HYPERPRIVOP_RSM_DT:
- (void)vcpu_reset_psr_dt(ed);
+ (void)vcpu_reset_psr_dt(v);
return 1;
case HYPERPRIVOP_SSM_DT:
- (void)vcpu_set_psr_dt(ed);
+ (void)vcpu_set_psr_dt(v);
return 1;
case HYPERPRIVOP_COVER:
- (void)vcpu_cover(ed);
+ (void)vcpu_cover(v);
return 1;
case HYPERPRIVOP_ITC_D:
inst.inst = 0;
- (void)priv_itc_d(ed,inst);
+ (void)priv_itc_d(v,inst);
return 1;
case HYPERPRIVOP_ITC_I:
inst.inst = 0;
- (void)priv_itc_i(ed,inst);
+ (void)priv_itc_i(v,inst);
return 1;
}
return 0;
diff --git a/xen/arch/ia64/process.c b/xen/arch/ia64/process.c
index 60a2f30f76..414880882a 100644
--- a/xen/arch/ia64/process.c
+++ b/xen/arch/ia64/process.c
@@ -31,7 +31,7 @@
#include <asm/hpsim_ssc.h>
#include <asm/dom_fw.h>
-extern unsigned long vcpu_get_itir_on_fault(struct exec_domain *, UINT64);
+extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64);
extern struct ia64_sal_retval pal_emulator_static(UINT64);
extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
@@ -61,7 +61,7 @@ long do_iopl(domid_t domain, unsigned int new_io_pl)
return 0;
}
-void schedule_tail(struct exec_domain *next)
+void schedule_tail(struct vcpu *next)
{
unsigned long rr7;
//printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
@@ -76,7 +76,7 @@ void schedule_tail(struct exec_domain *next)
#endif // CONFIG_VTI
}
-extern TR_ENTRY *match_tr(struct exec_domain *ed, unsigned long ifa);
+extern TR_ENTRY *match_tr(struct vcpu *v, unsigned long ifa);
void tdpfoo(void) { }
@@ -132,10 +132,10 @@ unsigned long translate_domain_mpaddr(unsigned long mpaddr)
void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long itiriim, struct pt_regs *regs, unsigned long vector)
{
- unsigned long vcpu_get_ipsr_int_state(struct exec_domain *,unsigned long);
- unsigned long vcpu_get_rr_ve(struct exec_domain *,unsigned long);
+ unsigned long vcpu_get_ipsr_int_state(struct vcpu *,unsigned long);
+ unsigned long vcpu_get_rr_ve(struct vcpu *,unsigned long);
struct domain *d = current->domain;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
if (vector == IA64_EXTINT_VECTOR) {
@@ -147,8 +147,8 @@ void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long it
first_extint = 0;
}
}
- if (!PSCB(ed,interrupt_collection_enabled)) {
- if (!(PSCB(ed,ipsr) & IA64_PSR_DT)) {
+ if (!PSCB(v,interrupt_collection_enabled)) {
+ if (!(PSCB(v,ipsr) & IA64_PSR_DT)) {
panic_domain(regs,"psr.dt off, trying to deliver nested dtlb!\n");
}
vector &= ~0xf;
@@ -156,45 +156,45 @@ void reflect_interruption(unsigned long ifa, unsigned long isr, unsigned long it
vector != IA64_ALT_DATA_TLB_VECTOR &&
vector != IA64_VHPT_TRANS_VECTOR) {
panic_domain(regs,"psr.ic off, delivering fault=%lx,iip=%p,ifa=%p,isr=%p,PSCB.iip=%p\n",
- vector,regs->cr_iip,ifa,isr,PSCB(ed,iip));
+ vector,regs->cr_iip,ifa,isr,PSCB(v,iip));
}
//printf("Delivering NESTED DATA TLB fault\n");
vector = IA64_DATA_NESTED_TLB_VECTOR;
- regs->cr_iip = ((unsigned long) PSCBX(ed,iva) + vector) & ~0xffUL;
+ regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
// NOTE: nested trap must NOT pass PSCB address
- //regs->r31 = (unsigned long) &PSCB(ed);
+ //regs->r31 = (unsigned long) &PSCB(v);
return;
}
if ((vector & 0xf) == IA64_FORCED_IFA)
- ifa = PSCB(ed,tmp[0]);
+ ifa = PSCB(v,tmp[0]);
vector &= ~0xf;
- PSCB(ed,ifa) = ifa;
+ PSCB(v,ifa) = ifa;
if (vector < IA64_DATA_NESTED_TLB_VECTOR) /* VHPT miss, TLB miss, Alt TLB miss */
- vcpu_thash(ed,ifa,&PSCB(current,iha));
- PSCB(ed,unat) = regs->ar_unat; // not sure if this is really needed?
- PSCB(ed,precover_ifs) = regs->cr_ifs;
- vcpu_bsw0(ed);
- PSCB(ed,ipsr) = vcpu_get_ipsr_int_state(ed,regs->cr_ipsr);
+ vcpu_thash(v,ifa,&PSCB(current,iha));
+ PSCB(v,unat) = regs->ar_unat; // not sure if this is really needed?
+ PSCB(v,precover_ifs) = regs->cr_ifs;
+ vcpu_bsw0(v);
+ PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
- PSCB(ed,iim) = itiriim;
- else PSCB(ed,itir) = vcpu_get_itir_on_fault(ed,ifa);
- PSCB(ed,isr) = isr; // this is unnecessary except for interrupts!
- PSCB(ed,iip) = regs->cr_iip;
- PSCB(ed,ifs) = 0;
- PSCB(ed,incomplete_regframe) = 0;
-
- regs->cr_iip = ((unsigned long) PSCBX(ed,iva) + vector) & ~0xffUL;
+ PSCB(v,iim) = itiriim;
+ else PSCB(v,itir) = vcpu_get_itir_on_fault(v,ifa);
+ PSCB(v,isr) = isr; // this is unnecessary except for interrupts!
+ PSCB(v,iip) = regs->cr_iip;
+ PSCB(v,ifs) = 0;
+ PSCB(v,incomplete_regframe) = 0;
+
+ regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
#ifdef CONFIG_SMP
#error "sharedinfo doesn't handle smp yet"
#endif
regs->r31 = &((shared_info_t *)SHAREDINFO_ADDR)->vcpu_data[0].arch;
- PSCB(ed,interrupt_delivery_enabled) = 0;
- PSCB(ed,interrupt_collection_enabled) = 0;
+ PSCB(v,interrupt_delivery_enabled) = 0;
+ PSCB(v,interrupt_collection_enabled) = 0;
}
void foodpi(void) {}
@@ -205,26 +205,26 @@ void foodpi(void) {}
void deliver_pending_interrupt(struct pt_regs *regs)
{
struct domain *d = current->domain;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
// FIXME: Will this work properly if doing an RFI???
if (!is_idle_task(d) && user_mode(regs)) {
- //vcpu_poke_timer(ed);
- if (vcpu_deliverable_interrupts(ed)) {
+ //vcpu_poke_timer(v);
+ if (vcpu_deliverable_interrupts(v)) {
unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
- if (vcpu_timer_pending_early(ed))
-printf("*#*#*#* about to deliver early timer to domain %d!!!\n",ed->domain->domain_id);
+ if (vcpu_timer_pending_early(v))
+printf("*#*#*#* about to deliver early timer to domain %d!!!\n",v->domain->domain_id);
reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
}
}
}
-int handle_lazy_cover(struct exec_domain *ed, unsigned long isr, struct pt_regs *regs)
+int handle_lazy_cover(struct vcpu *v, unsigned long isr, struct pt_regs *regs)
{
- if (!PSCB(ed,interrupt_collection_enabled)) {
+ if (!PSCB(v,interrupt_collection_enabled)) {
if (isr & IA64_ISR_IR) {
// printf("Handling lazy cover\n");
- PSCB(ed,ifs) = regs->cr_ifs;
- PSCB(ed,incomplete_regframe) = 1;
+ PSCB(v,ifs) = regs->cr_ifs;
+ PSCB(v,incomplete_regframe) = 1;
regs->cr_ifs = 0;
return(1); // retry same instruction with cr.ifs off
}
@@ -237,14 +237,14 @@ int handle_lazy_cover(struct exec_domain *ed, unsigned long isr, struct pt_regs
void xen_handle_domain_access(unsigned long address, unsigned long isr, struct pt_regs *regs, unsigned long itir)
{
struct domain *d = (struct domain *) current->domain;
- struct domain *ed = (struct exec_domain *) current;
+ struct domain *ed = (struct vcpu *) current;
TR_ENTRY *trp;
unsigned long psr = regs->cr_ipsr, mask, flags;
unsigned long iip = regs->cr_iip;
// FIXME should validate address here
unsigned long pteval, mpaddr, ps;
unsigned long lookup_domain_mpa(struct domain *,unsigned long);
- unsigned long match_dtlb(struct exec_domain *,unsigned long, unsigned long *, unsigned long *);
+ unsigned long match_dtlb(struct vcpu *,unsigned long, unsigned long *, unsigned long *);
IA64FAULT fault;
// NEED TO HANDLE THREE CASES:
@@ -736,7 +736,7 @@ ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, u
{
static int first_time = 1;
struct domain *d = (struct domain *) current->domain;
- struct exec_domain *ed = (struct domain *) current;
+ struct vcpu *v = (struct domain *) current;
extern unsigned long running_on_sim;
if (first_time) {
@@ -752,7 +752,7 @@ ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, u
if (ia64_hypercall(regs))
vcpu_increment_iip(current);
}
- else if (!PSCB(ed,interrupt_collection_enabled)) {
+ else if (!PSCB(v,interrupt_collection_enabled)) {
if (ia64_hyperprivop(iim,regs))
vcpu_increment_iip(current);
}
@@ -764,11 +764,11 @@ ia64_handle_privop (unsigned long ifa, struct pt_regs *regs, unsigned long isr,
{
IA64FAULT vector;
struct domain *d = current->domain;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
// FIXME: no need to pass itir in to this routine as we need to
// compute the virtual itir anyway (based on domain's RR.ps)
// AND ACTUALLY reflect_interruption doesn't use it anyway!
- itir = vcpu_get_itir_on_fault(ed,ifa);
+ itir = vcpu_get_itir_on_fault(v,ifa);
vector = priv_emulate(current,regs,isr);
if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
reflect_interruption(ifa,isr,itir,regs,vector);
@@ -782,10 +782,10 @@ void
ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim, unsigned long vector)
{
struct domain *d = (struct domain *) current->domain;
- struct exec_domain *ed = (struct domain *) current;
+ struct vcpu *v = (struct domain *) current;
unsigned long check_lazy_cover = 0;
unsigned long psr = regs->cr_ipsr;
- unsigned long itir = vcpu_get_itir_on_fault(ed,ifa);
+ unsigned long itir = vcpu_get_itir_on_fault(v,ifa);
if (!(psr & IA64_PSR_CPL)) {
printk("ia64_handle_reflection: reflecting with priv=0!!\n");
@@ -793,7 +793,7 @@ ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long i
// FIXME: no need to pass itir in to this routine as we need to
// compute the virtual itir anyway (based on domain's RR.ps)
// AND ACTUALLY reflect_interruption doesn't use it anyway!
- itir = vcpu_get_itir_on_fault(ed,ifa);
+ itir = vcpu_get_itir_on_fault(v,ifa);
switch(vector) {
case 8:
vector = IA64_DIRTY_BIT_VECTOR; break;
@@ -814,7 +814,7 @@ ia64_handle_reflection (unsigned long ifa, struct pt_regs *regs, unsigned long i
vector = IA64_DISABLED_FPREG_VECTOR; break;
case 26:
printf("*** NaT fault... attempting to handle as privop\n");
- vector = priv_emulate(ed,regs,isr);
+ vector = priv_emulate(v,regs,isr);
if (vector == IA64_NO_FAULT) {
printf("*** Handled privop masquerading as NaT fault\n");
return;
@@ -832,6 +832,6 @@ printf("*** Handled privop masquerading as NaT fault\n");
while(vector);
return;
}
- if (check_lazy_cover && handle_lazy_cover(ed, isr, regs)) return;
+ if (check_lazy_cover && handle_lazy_cover(v, isr, regs)) return;
reflect_interruption(ifa,isr,itir,regs,vector);
}
diff --git a/xen/arch/ia64/regionreg.c b/xen/arch/ia64/regionreg.c
index bed58aec66..6742d4f602 100644
--- a/xen/arch/ia64/regionreg.c
+++ b/xen/arch/ia64/regionreg.c
@@ -251,7 +251,7 @@ static inline int validate_page_size(unsigned long ps)
// NOTE: DOES NOT SET VCPU's rrs[x] value!!
int set_one_rr(unsigned long rr, unsigned long val)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
unsigned long rreg = REGION_NUMBER(rr);
ia64_rr rrv, newrrv, memrrv;
unsigned long newrid;
@@ -260,12 +260,12 @@ int set_one_rr(unsigned long rr, unsigned long val)
rrv.rrval = val;
newrrv.rrval = 0;
- newrid = ed->domain->starting_rid + rrv.rid;
+ newrid = v->domain->starting_rid + rrv.rid;
- if (newrid > ed->domain->ending_rid) {
+ if (newrid > v->domain->ending_rid) {
printk("can't set rr%d to %lx, starting_rid=%lx,"
"ending_rid=%lx, val=%lx\n", rreg, newrid,
- ed->domain->starting_rid,ed->domain->ending_rid,val);
+ v->domain->starting_rid,v->domain->ending_rid,val);
return 0;
}
@@ -274,7 +274,7 @@ int set_one_rr(unsigned long rr, unsigned long val)
newrrv.rid = newrid;
newrrv.ve = VHPT_ENABLED_REGION_7;
newrrv.ps = IA64_GRANULE_SHIFT;
- ia64_new_rr7(vmMangleRID(newrrv.rrval),ed->vcpu_info);
+ ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info);
}
else {
newrrv.rid = newrid;
@@ -317,45 +317,45 @@ int set_all_rr( u64 rr0, u64 rr1, u64 rr2, u64 rr3,
return 1;
}
-void init_all_rr(struct exec_domain *ed)
+void init_all_rr(struct vcpu *v)
{
ia64_rr rrv;
rrv.rrval = 0;
- rrv.rid = ed->domain->metaphysical_rid;
+ rrv.rid = v->domain->metaphysical_rid;
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
-if (!ed->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
- ed->vcpu_info->arch.rrs[0] = -1;
- ed->vcpu_info->arch.rrs[1] = rrv.rrval;
- ed->vcpu_info->arch.rrs[2] = rrv.rrval;
- ed->vcpu_info->arch.rrs[3] = rrv.rrval;
- ed->vcpu_info->arch.rrs[4] = rrv.rrval;
- ed->vcpu_info->arch.rrs[5] = rrv.rrval;
+if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
+ v->vcpu_info->arch.rrs[0] = -1;
+ v->vcpu_info->arch.rrs[1] = rrv.rrval;
+ v->vcpu_info->arch.rrs[2] = rrv.rrval;
+ v->vcpu_info->arch.rrs[3] = rrv.rrval;
+ v->vcpu_info->arch.rrs[4] = rrv.rrval;
+ v->vcpu_info->arch.rrs[5] = rrv.rrval;
rrv.ve = 0;
- ed->vcpu_info->arch.rrs[6] = rrv.rrval;
-// ed->shared_info->arch.rrs[7] = rrv.rrval;
+ v->vcpu_info->arch.rrs[6] = rrv.rrval;
+// v->shared_info->arch.rrs[7] = rrv.rrval;
}
/* XEN/ia64 INTERNAL ROUTINES */
-unsigned long physicalize_rid(struct exec_domain *ed, unsigned long rrval)
+unsigned long physicalize_rid(struct vcpu *v, unsigned long rrval)
{
ia64_rr rrv;
rrv.rrval = rrval;
- rrv.rid += ed->domain->starting_rid;
+ rrv.rid += v->domain->starting_rid;
return rrv.rrval;
}
unsigned long
-virtualize_rid(struct exec_domain *ed, unsigned long rrval)
+virtualize_rid(struct vcpu *v, unsigned long rrval)
{
ia64_rr rrv;
rrv.rrval = rrval;
- rrv.rid -= ed->domain->starting_rid;
+ rrv.rid -= v->domain->starting_rid;
return rrv.rrval;
}
@@ -366,17 +366,17 @@ virtualize_rid(struct exec_domain *ed, unsigned long rrval)
// rr7 (because we have to to assembly and physical mode
// to change rr7). If no change to rr7 is required, returns 0.
//
-unsigned long load_region_regs(struct exec_domain *ed)
+unsigned long load_region_regs(struct vcpu *v)
{
unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
// TODO: These probably should be validated
unsigned long bad = 0;
- if (ed->vcpu_info->arch.metaphysical_mode) {
+ if (v->vcpu_info->arch.metaphysical_mode) {
ia64_rr rrv;
rrv.rrval = 0;
- rrv.rid = ed->domain->metaphysical_rid;
+ rrv.rid = v->domain->metaphysical_rid;
rrv.ps = PAGE_SHIFT;
rrv.ve = 1;
rr0 = rrv.rrval;
@@ -384,16 +384,16 @@ unsigned long load_region_regs(struct exec_domain *ed)
ia64_srlz_d();
}
else {
- rr0 = ed->vcpu_info->arch.rrs[0];
+ rr0 = v->vcpu_info->arch.rrs[0];
if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
}
- rr1 = ed->vcpu_info->arch.rrs[1];
- rr2 = ed->vcpu_info->arch.rrs[2];
- rr3 = ed->vcpu_info->arch.rrs[3];
- rr4 = ed->vcpu_info->arch.rrs[4];
- rr5 = ed->vcpu_info->arch.rrs[5];
- rr6 = ed->vcpu_info->arch.rrs[6];
- rr7 = ed->vcpu_info->arch.rrs[7];
+ rr1 = v->vcpu_info->arch.rrs[1];
+ rr2 = v->vcpu_info->arch.rrs[2];
+ rr3 = v->vcpu_info->arch.rrs[3];
+ rr4 = v->vcpu_info->arch.rrs[4];
+ rr5 = v->vcpu_info->arch.rrs[5];
+ rr6 = v->vcpu_info->arch.rrs[6];
+ rr7 = v->vcpu_info->arch.rrs[7];
if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
diff --git a/xen/arch/ia64/vmmu.c b/xen/arch/ia64/vmmu.c
index d2b2b30153..69f8cc7279 100644
--- a/xen/arch/ia64/vmmu.c
+++ b/xen/arch/ia64/vmmu.c
@@ -136,7 +136,7 @@ purge_machine_tc_by_domid(domid_t domid)
#endif
}
-static thash_cb_t *init_domain_vhpt(struct exec_domain *d)
+static thash_cb_t *init_domain_vhpt(struct vcpu *d)
{
struct pfn_info *page;
void *vbase,*vcur;
@@ -179,7 +179,7 @@ static thash_cb_t *init_domain_vhpt(struct exec_domain *d)
}
-thash_cb_t *init_domain_tlb(struct exec_domain *d)
+thash_cb_t *init_domain_tlb(struct vcpu *d)
{
struct pfn_info *page;
void *vbase,*vcur;
@@ -234,7 +234,7 @@ alloc_pmt(struct domain *d)
* Insert guest TLB to machine TLB.
* data: In TLB format
*/
-void machine_tlb_insert(struct exec_domain *d, thash_data_t *tlb)
+void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb)
{
u64 saved_itir, saved_ifa, saved_rr;
u64 pages;
@@ -285,7 +285,7 @@ u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
u64 saved_pta, saved_rr0;
u64 hash_addr, tag;
unsigned long psr;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
rr_t vrr;
@@ -299,7 +299,7 @@ u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps)
// TODO: Set to enforce lazy mode
local_irq_save(psr);
ia64_setreg(_IA64_REG_CR_PTA, pta.val);
- ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value));
+ ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
ia64_srlz_d();
hash_addr = ia64_thash(va);
@@ -316,7 +316,7 @@ u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
u64 saved_pta, saved_rr0;
u64 hash_addr, tag;
u64 psr;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
rr_t vrr;
// TODO: Set to enforce lazy mode
@@ -329,7 +329,7 @@ u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps)
va = (va << 3) >> 3; // set VRN to 0.
local_irq_save(psr);
ia64_setreg(_IA64_REG_CR_PTA, pta.val);
- ia64_set_rr(0, vmx_vrrtomrr(ed, vrr.value));
+ ia64_set_rr(0, vmx_vrrtomrr(v, vrr.value));
ia64_srlz_d();
tag = ia64_ttag(va);
diff --git a/xen/arch/ia64/vmx_init.c b/xen/arch/ia64/vmx_init.c
index e4c31ae6e9..87c1339ea4 100644
--- a/xen/arch/ia64/vmx_init.c
+++ b/xen/arch/ia64/vmx_init.c
@@ -174,10 +174,10 @@ static vpd_t *alloc_vpd(void)
* Create a VP on intialized VMX environment.
*/
static void
-vmx_create_vp(struct exec_domain *ed)
+vmx_create_vp(struct vcpu *v)
{
u64 ret;
- vpd_t *vpd = ed->arch.arch_vmx.vpd;
+ vpd_t *vpd = v->arch.arch_vmx.vpd;
u64 ivt_base;
extern char vmx_ia64_ivt;
/* ia64_ivt is function pointer, so need this tranlation */
@@ -207,55 +207,55 @@ void vmx_init_double_mapping_stub(void)
/* Other non-context related tasks can be done in context switch */
void
-vmx_save_state(struct exec_domain *ed)
+vmx_save_state(struct vcpu *v)
{
u64 status, psr;
u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
/* FIXME: about setting of pal_proc_vector... time consuming */
- status = ia64_pal_vp_save(ed->arch.arch_vmx.vpd, 0);
+ status = ia64_pal_vp_save(v->arch.arch_vmx.vpd, 0);
if (status != PAL_STATUS_SUCCESS)
panic("Save vp status failed\n");
- /* FIXME: Do we really need purge double mapping for old ed?
+ /* FIXME: Do we really need purge double mapping for old vcpu?
* Since rid is completely different between prev and next,
* it's not overlap and thus no MCA possible... */
- dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7]));
+ dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
vmx_purge_double_mapping(dom_rr7, KERNEL_START,
- (u64)ed->arch.vtlb->ts->vhpt->hash);
+ (u64)v->arch.vtlb->ts->vhpt->hash);
}
/* Even guest is in physical mode, we still need such double mapping */
void
-vmx_load_state(struct exec_domain *ed)
+vmx_load_state(struct vcpu *v)
{
u64 status, psr;
u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
u64 pte_xen, pte_vhpt;
- status = ia64_pal_vp_restore(ed->arch.arch_vmx.vpd, 0);
+ status = ia64_pal_vp_restore(v->arch.arch_vmx.vpd, 0);
if (status != PAL_STATUS_SUCCESS)
panic("Restore vp status failed\n");
- dom_rr7 = vmx_vrrtomrr(ed, VMX(ed, vrr[7]));
+ dom_rr7 = vmx_vrrtomrr(v, VMX(v, vrr[7]));
pte_xen = pte_val(pfn_pte((xen_pstart >> PAGE_SHIFT), PAGE_KERNEL));
- pte_vhpt = pte_val(pfn_pte((__pa(ed->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
+ pte_vhpt = pte_val(pfn_pte((__pa(v->arch.vtlb->ts->vhpt->hash) >> PAGE_SHIFT), PAGE_KERNEL));
vmx_insert_double_mapping(dom_rr7, KERNEL_START,
- (u64)ed->arch.vtlb->ts->vhpt->hash,
+ (u64)v->arch.vtlb->ts->vhpt->hash,
pte_xen, pte_vhpt);
/* Guest vTLB is not required to be switched explicitly, since
- * anchored in exec_domain */
+ * anchored in vcpu */
}
/* Purge old double mapping and insert new one, due to rr7 change */
void
-vmx_change_double_mapping(struct exec_domain *ed, u64 oldrr7, u64 newrr7)
+vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7)
{
u64 pte_xen, pte_vhpt, vhpt_base;
- vhpt_base = (u64)ed->arch.vtlb->ts->vhpt->hash;
+ vhpt_base = (u64)v->arch.vtlb->ts->vhpt->hash;
vmx_purge_double_mapping(oldrr7, KERNEL_START,
vhpt_base);
@@ -267,29 +267,29 @@ vmx_change_double_mapping(struct exec_domain *ed, u64 oldrr7, u64 newrr7)
}
/*
- * Initialize VMX envirenment for guest. Only the 1st vp/exec_domain
+ * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
* is registered here.
*/
void
vmx_final_setup_domain(struct domain *d)
{
- struct exec_domain *ed = d->exec_domain[0];
+ struct vcpu *v = d->vcpu[0];
vpd_t *vpd;
- /* Allocate resources for exec_domain 0 */
- //memset(&ed->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
+ /* Allocate resources for vcpu 0 */
+ //memset(&v->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
vpd = alloc_vpd();
ASSERT(vpd);
- ed->arch.arch_vmx.vpd = vpd;
+ v->arch.arch_vmx.vpd = vpd;
vpd->virt_env_vaddr = vm_buffer;
- /* ed->arch.schedule_tail = arch_vmx_do_launch; */
- vmx_create_vp(ed);
+ /* v->arch.schedule_tail = arch_vmx_do_launch; */
+ vmx_create_vp(v);
/* Set this ed to be vmx */
- ed->arch.arch_vmx.flags = 1;
+ v->arch.arch_vmx.flags = 1;
/* Other vmx specific initialization work */
}
diff --git a/xen/arch/ia64/vmx_process.c b/xen/arch/ia64/vmx_process.c
index 99701e36ef..fdcce0c151 100644
--- a/xen/arch/ia64/vmx_process.c
+++ b/xen/arch/ia64/vmx_process.c
@@ -59,7 +59,7 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
{
static int first_time = 1;
struct domain *d = (struct domain *) current->domain;
- struct exec_domain *ed = (struct domain *) current;
+ struct vcpu *v = (struct domain *) current;
extern unsigned long running_on_sim;
unsigned long i, sal_param[8];
@@ -80,18 +80,18 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
case FW_HYPERCALL_PAL_CALL:
//printf("*** PAL hypercall: index=%d\n",regs->r28);
//FIXME: This should call a C routine
- x = pal_emulator_static(VMX_VPD(ed, vgr[12]));
+ x = pal_emulator_static(VMX_VPD(v, vgr[12]));
regs->r8 = x.status; regs->r9 = x.v0;
regs->r10 = x.v1; regs->r11 = x.v2;
#if 0
if (regs->r8)
printk("Failed vpal emulation, with index:0x%lx\n",
- VMX_VPD(ed, vgr[12]));
+ VMX_VPD(v, vgr[12]));
#endif
break;
case FW_HYPERCALL_SAL_CALL:
for (i = 0; i < 8; i++)
- vmx_vcpu_get_gr(ed, 32+i, &sal_param[i]);
+ vmx_vcpu_get_gr(v, 32+i, &sal_param[i]);
x = sal_emulator(sal_param[0], sal_param[1],
sal_param[2], sal_param[3],
sal_param[4], sal_param[5],
@@ -117,8 +117,8 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
{
unsigned long *tv, *tc;
fooefi();
- vmx_vcpu_get_gr(ed, 32, &tv);
- vmx_vcpu_get_gr(ed, 33, &tc);
+ vmx_vcpu_get_gr(v, 32, &tv);
+ vmx_vcpu_get_gr(v, 33, &tc);
printf("efi_get_time(%p,%p) called...",tv,tc);
tv = __va(translate_domain_mpaddr(tv));
if (tc) tc = __va(translate_domain_mpaddr(tc));
@@ -191,11 +191,11 @@ void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
void vmx_deliver_pending_interrupt(struct pt_regs *regs)
{
struct domain *d = current->domain;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
// FIXME: Will this work properly if doing an RFI???
if (!is_idle_task(d) ) { // always comes from guest
- //vcpu_poke_timer(ed);
- //if (vcpu_deliverable_interrupts(ed)) {
+ //vcpu_poke_timer(v);
+ //if (vcpu_deliverable_interrupts(v)) {
// unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
// foodpi();
// reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
@@ -207,7 +207,7 @@ void vmx_deliver_pending_interrupt(struct pt_regs *regs)
printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
return;
- vmx_check_pending_irq(ed);
+ vmx_check_pending_irq(v);
}
}
diff --git a/xen/arch/ia64/xenirq.c b/xen/arch/ia64/xenirq.c
index 8809122751..5bf09171c8 100644
--- a/xen/arch/ia64/xenirq.c
+++ b/xen/arch/ia64/xenirq.c
@@ -49,8 +49,8 @@ xen_do_IRQ(ia64_vector vector)
}
#endif
//FIXME: TEMPORARY HACK!!!!
- vcpu_pend_interrupt(dom0->exec_domain[0],vector);
- domain_wake(dom0->exec_domain[0]);
+ vcpu_pend_interrupt(dom0->vcpu[0],vector);
+ domain_wake(dom0->vcpu[0]);
return(1);
}
return(0);
diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c
index e82dd8482d..b25bb67375 100644
--- a/xen/arch/ia64/xenmisc.c
+++ b/xen/arch/ia64/xenmisc.c
@@ -91,14 +91,14 @@ unsigned long __hypercall_create_continuation(
int reprogram_ac_timer(s_time_t timeout)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
#ifdef CONFIG_VTI
- if(VMX_DOMAIN(ed))
+ if(VMX_DOMAIN(v))
return 1;
#endif // CONFIG_VTI
local_cpu_data->itm_next = timeout;
- if (is_idle_task(ed->domain)) vcpu_safe_set_itm(timeout);
+ if (is_idle_task(v->domain)) vcpu_safe_set_itm(timeout);
else vcpu_set_next_timer(current);
return 1;
}
@@ -232,7 +232,7 @@ void cs10foo(void) {}
void cs01foo(void) {}
// context_switch
-void context_switch(struct exec_domain *prev, struct exec_domain *next)
+void context_switch(struct vcpu *prev, struct vcpu *next)
{
//printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
//printk("@@@@@@ context switch from domain %d (%x) to domain %d (%x)\n",
@@ -261,7 +261,7 @@ void context_switch(struct exec_domain *prev, struct exec_domain *next)
{
static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
static int i = 100;
-int id = ((struct exec_domain *)current)->domain->domain_id & 0xf;
+int id = ((struct vcpu *)current)->domain->domain_id & 0xf;
if (!cnt[id]--) { printk("%x",id); cnt[id] = 500; }
if (!i--) { printk("+",id); cnt[id] = 1000; }
}
@@ -281,7 +281,7 @@ if (!i--) { printk("+",id); cnt[id] = 1000; }
#endif // CONFIG_VTI
}
-void continue_running(struct exec_domain *same)
+void continue_running(struct vcpu *same)
{
/* nothing to do */
}
@@ -290,23 +290,23 @@ void panic_domain(struct pt_regs *regs, const char *fmt, ...)
{
va_list args;
char buf[128];
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
static volatile int test = 1; // so can continue easily in debug
extern spinlock_t console_lock;
unsigned long flags;
loop:
printf("$$$$$ PANIC in domain %d (k6=%p): ",
- ed->domain->domain_id, ia64_get_kr(IA64_KR_CURRENT));
+ v->domain->domain_id, ia64_get_kr(IA64_KR_CURRENT));
va_start(args, fmt);
(void)vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printf(buf);
if (regs) show_registers(regs);
domain_pause_by_systemcontroller(current->domain);
- ed->domain->shutdown_code = SHUTDOWN_crash;
- set_bit(_DOMF_shutdown, ed->domain->domain_flags);
- if (ed->domain->domain_id == 0) {
+ v->domain->shutdown_code = SHUTDOWN_crash;
+ set_bit(_DOMF_shutdown, v->domain->domain_flags);
+ if (v->domain->domain_id == 0) {
int i = 1000000000L;
// if domain0 crashes, just periodically print out panic
// message to make post-mortem easier
diff --git a/xen/arch/ia64/xensetup.c b/xen/arch/ia64/xensetup.c
index f1f10a1f15..cb65ff67a2 100644
--- a/xen/arch/ia64/xensetup.c
+++ b/xen/arch/ia64/xensetup.c
@@ -24,7 +24,7 @@ unsigned long xenheap_phys_end;
char saved_command_line[COMMAND_LINE_SIZE];
-struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
+struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu };
#ifdef CLONE_DOMAIN0
struct domain *clones[CLONE_DOMAIN0];
@@ -147,9 +147,9 @@ void start_kernel(void)
xen_pstart = ia64_tpa(KERNEL_START);
/* Must do this early -- e.g., spinlocks rely on get_current(). */
- //set_current(&idle0_exec_domain);
- ia64_r13 = (void *)&idle0_exec_domain;
- idle0_exec_domain.domain = &idle0_domain;
+ //set_current(&idle0_vcpu);
+ ia64_r13 = (void *)&idle0_vcpu;
+ idle0_vcpu.domain = &idle0_domain;
early_setup_arch(&cmdline);
diff --git a/xen/arch/ia64/xentime.c b/xen/arch/ia64/xentime.c
index 22aa437ebe..8031bedf34 100644
--- a/xen/arch/ia64/xentime.c
+++ b/xen/arch/ia64/xentime.c
@@ -94,7 +94,7 @@ s_time_t get_s_time(void)
return now;
}
-void update_dom_time(struct exec_domain *ed)
+void update_dom_time(struct vcpu *v)
{
// FIXME: implement this?
// printf("update_dom_time: called, not implemented, skipping\n");
@@ -161,10 +161,10 @@ xen_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
//domain0_ready = 1; // moved to xensetup.c
current->vcpu_info->arch.pending_interruption = 1;
}
- if (domain0_ready && vcpu_timer_expired(dom0->exec_domain[0])) {
- vcpu_pend_timer(dom0->exec_domain[0]);
- //vcpu_set_next_timer(dom0->exec_domain[0]);
- domain_wake(dom0->exec_domain[0]);
+ if (domain0_ready && vcpu_timer_expired(dom0->vcpu[0])) {
+ vcpu_pend_timer(dom0->vcpu[0]);
+ //vcpu_set_next_timer(dom0->vcpu[0]);
+ domain_wake(dom0->vcpu[0]);
}
if (!is_idle_task(current->domain) && current->domain != dom0) {
if (vcpu_timer_expired(current)) {
@@ -304,7 +304,7 @@ static irqreturn_t
vmx_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long new_itm;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
new_itm = local_cpu_data->itm_next;
@@ -319,7 +319,7 @@ vmx_timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
* fixing that would require updates to all
* platforms.
*/
- update_process_times(user_mode(ed, regs));
+ update_process_times(user_mode(v, regs));
#endif
new_itm += local_cpu_data->itm_delta;
diff --git a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c
index d389758a93..ef58f52eff 100644
--- a/xen/arch/x86/audit.c
+++ b/xen/arch/x86/audit.c
@@ -404,16 +404,16 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
void adjust_for_pgtbase()
{
- struct exec_domain *ed;
+ struct vcpu *v;
- for_each_exec_domain(d, ed)
+ for_each_vcpu(d, v)
{
- if ( pagetable_get_paddr(ed->arch.guest_table) )
- adjust(&frame_table[pagetable_get_pfn(ed->arch.guest_table)], 1);
- if ( pagetable_get_paddr(ed->arch.shadow_table) )
- adjust(&frame_table[pagetable_get_pfn(ed->arch.shadow_table)], 0);
- if ( ed->arch.monitor_shadow_ref )
- adjust(&frame_table[ed->arch.monitor_shadow_ref], 0);
+ if ( pagetable_get_paddr(v->arch.guest_table) )
+ adjust(&frame_table[pagetable_get_pfn(v->arch.guest_table)], 1);
+ if ( pagetable_get_paddr(v->arch.shadow_table) )
+ adjust(&frame_table[pagetable_get_pfn(v->arch.shadow_table)], 0);
+ if ( v->arch.monitor_shadow_ref )
+ adjust(&frame_table[v->arch.monitor_shadow_ref], 0);
}
}
diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
index 70138111fd..a26e41abfe 100644
--- a/xen/arch/x86/dom0_ops.c
+++ b/xen/arch/x86/dom0_ops.c
@@ -374,7 +374,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
}
void arch_getdomaininfo_ctxt(
- struct exec_domain *ed, struct vcpu_guest_context *c)
+ struct vcpu *v, struct vcpu_guest_context *c)
{
#ifdef __i386__ /* Remove when x86_64 VMX is implemented */
#ifdef CONFIG_VMX
@@ -382,30 +382,30 @@ void arch_getdomaininfo_ctxt(
#endif
#endif
- memcpy(c, &ed->arch.guest_context, sizeof(*c));
+ memcpy(c, &v->arch.guest_context, sizeof(*c));
/* IOPL privileges are virtualised -- merge back into returned eflags. */
BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
- c->user_regs.eflags |= ed->arch.iopl << 12;
+ c->user_regs.eflags |= v->arch.iopl << 12;
#ifdef __i386__
#ifdef CONFIG_VMX
- if ( VMX_DOMAIN(ed) )
+ if ( VMX_DOMAIN(v) )
save_vmx_cpu_user_regs(&c->user_regs);
#endif
#endif
c->flags = 0;
- if ( test_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags) )
+ if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
c->flags |= VGCF_I387_VALID;
- if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) )
+ if ( KERNEL_MODE(v, &v->arch.guest_context.user_regs) )
c->flags |= VGCF_IN_KERNEL;
#ifdef CONFIG_VMX
- if (VMX_DOMAIN(ed))
+ if (VMX_DOMAIN(v))
c->flags |= VGCF_VMX_GUEST;
#endif
- c->pt_base = pagetable_get_paddr(ed->arch.guest_table);
+ c->pt_base = pagetable_get_paddr(v->arch.guest_table);
- c->vm_assist = ed->domain->vm_assist;
+ c->vm_assist = v->domain->vm_assist;
}
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 25f0125609..36dec5d29e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -47,16 +47,16 @@ static int opt_noreboot = 0;
boolean_param("noreboot", opt_noreboot);
struct percpu_ctxt {
- struct exec_domain *curr_ed;
+ struct vcpu *curr_vcpu;
} __cacheline_aligned;
static struct percpu_ctxt percpu_ctxt[NR_CPUS];
-static void continue_idle_task(struct exec_domain *ed)
+static void continue_idle_task(struct vcpu *v)
{
reset_stack_and_jump(idle_loop);
}
-static void continue_nonidle_task(struct exec_domain *ed)
+static void continue_nonidle_task(struct vcpu *v)
{
reset_stack_and_jump(ret_from_intr);
}
@@ -90,12 +90,12 @@ void idle_loop(void)
void startup_cpu_idle_loop(void)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
- ASSERT(is_idle_task(ed->domain));
- percpu_ctxt[smp_processor_id()].curr_ed = ed;
- set_bit(smp_processor_id(), &ed->domain->cpuset);
- ed->arch.schedule_tail = continue_idle_task;
+ ASSERT(is_idle_task(v->domain));
+ percpu_ctxt[smp_processor_id()].curr_vcpu = v;
+ set_bit(smp_processor_id(), &v->domain->cpuset);
+ v->arch.schedule_tail = continue_idle_task;
idle_loop();
}
@@ -206,14 +206,14 @@ void dump_pageframe_info(struct domain *d)
page->u.inuse.type_info);
}
-struct exec_domain *arch_alloc_exec_domain_struct(void)
+struct vcpu *arch_alloc_vcpu_struct(void)
{
- return xmalloc(struct exec_domain);
+ return xmalloc(struct vcpu);
}
-void arch_free_exec_domain_struct(struct exec_domain *ed)
+void arch_free_vcpu_struct(struct vcpu *v)
{
- xfree(ed);
+ xfree(v);
}
void free_perdomain_pt(struct domain *d)
@@ -225,21 +225,21 @@ void free_perdomain_pt(struct domain *d)
#endif
}
-void arch_do_createdomain(struct exec_domain *ed)
+void arch_do_createdomain(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
- ed->arch.flags = TF_kernel_mode;
+ v->arch.flags = TF_kernel_mode;
if ( is_idle_task(d) )
return;
- ed->arch.schedule_tail = continue_nonidle_task;
+ v->arch.schedule_tail = continue_nonidle_task;
d->shared_info = (void *)alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->vcpu_id];
- ed->cpumap = CPUMAP_RUNANYWHERE;
+ v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
+ v->cpumap = CPUMAP_RUNANYWHERE;
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
PAGE_SHIFT] = INVALID_M2P_ENTRY;
@@ -248,16 +248,16 @@ void arch_do_createdomain(struct exec_domain *ed)
memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE);
machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >>
PAGE_SHIFT] = INVALID_M2P_ENTRY;
- ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
- ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
+ v->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
+ v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
- ed->arch.guest_vtable = __linear_l2_table;
- ed->arch.shadow_vtable = __shadow_linear_l2_table;
+ v->arch.guest_vtable = __linear_l2_table;
+ v->arch.shadow_vtable = __shadow_linear_l2_table;
#ifdef __x86_64__
- ed->arch.guest_vl3table = __linear_l3_table;
- ed->arch.guest_vl4table = __linear_l4_table;
+ v->arch.guest_vl3table = __linear_l3_table;
+ v->arch.guest_vl4table = __linear_l4_table;
d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
@@ -277,41 +277,41 @@ void arch_do_createdomain(struct exec_domain *ed)
INIT_LIST_HEAD(&d->arch.free_shadow_frames);
}
-void arch_do_boot_vcpu(struct exec_domain *ed)
+void arch_do_boot_vcpu(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
- ed->arch.flags = TF_kernel_mode;
+ v->arch.flags = TF_kernel_mode;
- ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
+ v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail;
- ed->arch.perdomain_ptes =
- d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
- ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
+ v->arch.perdomain_ptes =
+ d->arch.mm_perdomain_pt + (v->vcpu_id << PDPT_VCPU_SHIFT);
+ v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
}
#ifdef CONFIG_VMX
-void arch_vmx_do_resume(struct exec_domain *ed)
+void arch_vmx_do_resume(struct vcpu *v)
{
- u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->arch.arch_vmx.vmcs);
+ u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
- load_vmcs(&ed->arch.arch_vmx, vmcs_phys_ptr);
- vmx_do_resume(ed);
+ load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+ vmx_do_resume(v);
reset_stack_and_jump(vmx_asm_do_resume);
}
-void arch_vmx_do_launch(struct exec_domain *ed)
+void arch_vmx_do_launch(struct vcpu *v)
{
- u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->arch.arch_vmx.vmcs);
+ u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
- load_vmcs(&ed->arch.arch_vmx, vmcs_phys_ptr);
- vmx_do_launch(ed);
+ load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+ vmx_do_launch(v);
reset_stack_and_jump(vmx_asm_do_launch);
}
static int vmx_final_setup_guest(
- struct exec_domain *ed, struct vcpu_guest_context *ctxt)
+ struct vcpu *v, struct vcpu_guest_context *ctxt)
{
int error;
struct cpu_user_regs *regs;
@@ -327,36 +327,36 @@ static int vmx_final_setup_guest(
return -ENOMEM;
}
- memset(&ed->arch.arch_vmx, 0, sizeof (struct arch_vmx_struct));
+ memset(&v->arch.arch_vmx, 0, sizeof (struct arch_vmx_struct));
- ed->arch.arch_vmx.vmcs = vmcs;
+ v->arch.arch_vmx.vmcs = vmcs;
error = construct_vmcs(
- &ed->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV);
+ &v->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV);
if ( error < 0 )
{
printk("Failed to construct a new VMCS\n");
goto out;
}
- ed->arch.schedule_tail = arch_vmx_do_launch;
+ v->arch.schedule_tail = arch_vmx_do_launch;
#if defined (__i386)
- ed->arch.arch_vmx.vmx_platform.real_mode_data =
+ v->arch.arch_vmx.vmx_platform.real_mode_data =
(unsigned long *) regs->esi;
#endif
- if (ed == ed->domain->exec_domain[0]) {
+ if (v == v->domain->vcpu[0]) {
/*
* Required to do this once per domain
* XXX todo: add a seperate function to do these.
*/
- memset(&ed->domain->shared_info->evtchn_mask[0], 0xff,
- sizeof(ed->domain->shared_info->evtchn_mask));
- clear_bit(IOPACKET_PORT, &ed->domain->shared_info->evtchn_mask[0]);
+ memset(&v->domain->shared_info->evtchn_mask[0], 0xff,
+ sizeof(v->domain->shared_info->evtchn_mask));
+ clear_bit(IOPACKET_PORT, &v->domain->shared_info->evtchn_mask[0]);
/* Put the domain in shadow mode even though we're going to be using
* the shared 1:1 page table initially. It shouldn't hurt */
- shadow_mode_enable(ed->domain,
+ shadow_mode_enable(v->domain,
SHM_enable|SHM_refcounts|
SHM_translate|SHM_external);
}
@@ -365,7 +365,7 @@ static int vmx_final_setup_guest(
out:
free_vmcs(vmcs);
- ed->arch.arch_vmx.vmcs = 0;
+ v->arch.arch_vmx.vmcs = 0;
return error;
}
#endif
@@ -373,9 +373,9 @@ out:
/* This is called by arch_final_setup_guest and do_boot_vcpu */
int arch_set_info_guest(
- struct exec_domain *ed, struct vcpu_guest_context *c)
+ struct vcpu *v, struct vcpu_guest_context *c)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
unsigned long phys_basetab;
int i, rc;
@@ -391,45 +391,45 @@ int arch_set_info_guest(
return -EINVAL;
}
- clear_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags);
+ clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
if ( c->flags & VGCF_I387_VALID )
- set_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags);
+ set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
- ed->arch.flags &= ~TF_kernel_mode;
+ v->arch.flags &= ~TF_kernel_mode;
if ( c->flags & VGCF_IN_KERNEL )
- ed->arch.flags |= TF_kernel_mode;
+ v->arch.flags |= TF_kernel_mode;
- memcpy(&ed->arch.guest_context, c, sizeof(*c));
+ memcpy(&v->arch.guest_context, c, sizeof(*c));
if ( !(c->flags & VGCF_VMX_GUEST) )
{
/* IOPL privileges are virtualised. */
- ed->arch.iopl = (ed->arch.guest_context.user_regs.eflags >> 12) & 3;
- ed->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
+ v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3;
+ v->arch.guest_context.user_regs.eflags &= ~EF_IOPL;
/* Ensure real hardware interrupts are enabled. */
- ed->arch.guest_context.user_regs.eflags |= EF_IE;
+ v->arch.guest_context.user_regs.eflags |= EF_IE;
} else {
- __vmwrite(GUEST_EFLAGS, ed->arch.guest_context.user_regs.eflags);
- if (ed->arch.guest_context.user_regs.eflags & EF_TF)
+ __vmwrite(GUEST_EFLAGS, v->arch.guest_context.user_regs.eflags);
+ if (v->arch.guest_context.user_regs.eflags & EF_TF)
__vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
else
__vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
}
- if ( test_bit(_VCPUF_initialised, &ed->vcpu_flags) )
+ if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
return 0;
- memset(ed->arch.guest_context.debugreg, 0,
- sizeof(ed->arch.guest_context.debugreg));
+ memset(v->arch.guest_context.debugreg, 0,
+ sizeof(v->arch.guest_context.debugreg));
for ( i = 0; i < 8; i++ )
- (void)set_debugreg(ed, i, c->debugreg[i]);
+ (void)set_debugreg(v, i, c->debugreg[i]);
- if ( ed->vcpu_id == 0 )
+ if ( v->vcpu_id == 0 )
d->vm_assist = c->vm_assist;
phys_basetab = c->pt_base;
- ed->arch.guest_table = mk_pagetable(phys_basetab);
+ v->arch.guest_table = mk_pagetable(phys_basetab);
if ( shadow_mode_refcounts(d) )
{
@@ -443,7 +443,7 @@ int arch_set_info_guest(
return -EINVAL;
}
- if ( (rc = (int)set_gdt(ed, c->gdt_frames, c->gdt_ents)) != 0 )
+ if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
{
put_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT]);
return rc;
@@ -461,23 +461,23 @@ int arch_set_info_guest(
// page table, and/or build the table itself, or ???
//
if ( !pagetable_get_paddr(d->arch.phys_table) )
- d->arch.phys_table = ed->arch.guest_table;
+ d->arch.phys_table = v->arch.guest_table;
- if ( (error = vmx_final_setup_guest(ed, c)) )
+ if ( (error = vmx_final_setup_guest(v, c)) )
return error;
}
#endif
- update_pagetables(ed);
+ update_pagetables(v);
/* Don't redo final setup */
- set_bit(_VCPUF_initialised, &ed->vcpu_flags);
+ set_bit(_VCPUF_initialised, &v->vcpu_flags);
return 0;
}
-void new_thread(struct exec_domain *d,
+void new_thread(struct vcpu *d,
unsigned long start_pc,
unsigned long start_stack,
unsigned long start_info)
@@ -506,12 +506,12 @@ void new_thread(struct exec_domain *d,
#ifdef __x86_64__
-void toggle_guest_mode(struct exec_domain *ed)
+void toggle_guest_mode(struct vcpu *v)
{
- ed->arch.flags ^= TF_kernel_mode;
+ v->arch.flags ^= TF_kernel_mode;
__asm__ __volatile__ ( "swapgs" );
- update_pagetables(ed);
- write_ptbase(ed);
+ update_pagetables(v);
+ write_ptbase(v);
}
#define loadsegment(seg,value) ({ \
@@ -530,7 +530,7 @@ void toggle_guest_mode(struct exec_domain *ed)
: "=r" (__r) : "r" (value), "0" (__r) );\
__r; })
-static void load_segments(struct exec_domain *p, struct exec_domain *n)
+static void load_segments(struct vcpu *p, struct vcpu *n)
{
struct vcpu_guest_context *pctxt = &p->arch.guest_context;
struct vcpu_guest_context *nctxt = &n->arch.guest_context;
@@ -632,9 +632,9 @@ static void load_segments(struct exec_domain *p, struct exec_domain *n)
}
}
-static void save_segments(struct exec_domain *ed)
+static void save_segments(struct vcpu *v)
{
- struct cpu_user_regs *regs = &ed->arch.guest_context.user_regs;
+ struct cpu_user_regs *regs = &v->arch.guest_context.user_regs;
__asm__ __volatile__ ( "movl %%ds,%0" : "=m" (regs->ds) );
__asm__ __volatile__ ( "movl %%es,%0" : "=m" (regs->es) );
__asm__ __volatile__ ( "movl %%fs,%0" : "=m" (regs->fs) );
@@ -657,13 +657,13 @@ long do_switch_to_user(void)
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct switch_to_user stu;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
if ( unlikely(copy_from_user(&stu, (void *)regs->rsp, sizeof(stu))) ||
- unlikely(pagetable_get_paddr(ed->arch.guest_table_user) == 0) )
+ unlikely(pagetable_get_paddr(v->arch.guest_table_user) == 0) )
return -EFAULT;
- toggle_guest_mode(ed);
+ toggle_guest_mode(v);
regs->rip = stu.rip;
regs->cs = stu.cs | 3; /* force guest privilege */
@@ -690,7 +690,7 @@ long do_switch_to_user(void)
#define save_segments(_p) ((void)0)
#define clear_segments() ((void)0)
-static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu)
+static inline void switch_kernel_stack(struct vcpu *n, unsigned int cpu)
{
struct tss_struct *tss = &init_tss[cpu];
tss->esp1 = n->arch.guest_context.kernel_sp;
@@ -699,15 +699,15 @@ static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu)
#endif
-#define loaddebug(_ed,_reg) \
- __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_ed)->debugreg[_reg]))
+#define loaddebug(_v,_reg) \
+ __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
static void __context_switch(void)
{
struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
unsigned int cpu = smp_processor_id();
- struct exec_domain *p = percpu_ctxt[cpu].curr_ed;
- struct exec_domain *n = current;
+ struct vcpu *p = percpu_ctxt[cpu].curr_vcpu;
+ struct vcpu *n = current;
if ( !is_idle_task(p->domain) )
{
@@ -759,19 +759,19 @@ static void __context_switch(void)
if ( p->domain != n->domain )
clear_bit(cpu, &p->domain->cpuset);
- percpu_ctxt[cpu].curr_ed = n;
+ percpu_ctxt[cpu].curr_vcpu = n;
}
-void context_switch(struct exec_domain *prev, struct exec_domain *next)
+void context_switch(struct vcpu *prev, struct vcpu *next)
{
- struct exec_domain *realprev;
+ struct vcpu *realprev;
local_irq_disable();
set_current(next);
- if ( ((realprev = percpu_ctxt[smp_processor_id()].curr_ed) == next) ||
+ if ( ((realprev = percpu_ctxt[smp_processor_id()].curr_vcpu) == next) ||
is_idle_task(next->domain) )
{
local_irq_enable();
@@ -801,7 +801,7 @@ void context_switch(struct exec_domain *prev, struct exec_domain *next)
BUG();
}
-void continue_running(struct exec_domain *same)
+void continue_running(struct vcpu *same)
{
schedule_tail(same);
BUG();
@@ -809,7 +809,7 @@ void continue_running(struct exec_domain *same)
int __sync_lazy_execstate(void)
{
- if ( percpu_ctxt[smp_processor_id()].curr_ed == current )
+ if ( percpu_ctxt[smp_processor_id()].curr_vcpu == current )
return 0;
__context_switch();
load_LDT(current);
@@ -893,20 +893,20 @@ unsigned long __hypercall_create_continuation(
}
#ifdef CONFIG_VMX
-static void vmx_relinquish_resources(struct exec_domain *ed)
+static void vmx_relinquish_resources(struct vcpu *v)
{
- if ( !VMX_DOMAIN(ed) )
+ if ( !VMX_DOMAIN(v) )
return;
- BUG_ON(ed->arch.arch_vmx.vmcs == NULL);
- free_vmcs(ed->arch.arch_vmx.vmcs);
- ed->arch.arch_vmx.vmcs = 0;
+ BUG_ON(v->arch.arch_vmx.vmcs == NULL);
+ free_vmcs(v->arch.arch_vmx.vmcs);
+ v->arch.arch_vmx.vmcs = 0;
- free_monitor_pagetable(ed);
- rem_ac_timer(&ed->arch.arch_vmx.vmx_platform.vmx_pit.pit_timer);
+ free_monitor_pagetable(v);
+ rem_ac_timer(&v->arch.arch_vmx.vmx_platform.vmx_pit.pit_timer);
}
#else
-#define vmx_relinquish_resources(_ed) ((void)0)
+#define vmx_relinquish_resources(_v) ((void)0)
#endif
static void relinquish_memory(struct domain *d, struct list_head *list)
@@ -969,7 +969,7 @@ static void relinquish_memory(struct domain *d, struct list_head *list)
void domain_relinquish_resources(struct domain *d)
{
- struct exec_domain *ed;
+ struct vcpu *v;
BUG_ON(d->cpuset != 0);
@@ -981,29 +981,29 @@ void domain_relinquish_resources(struct domain *d)
gnttab_release_dev_mappings(d->grant_table);
/* Drop the in-use references to page-table bases. */
- for_each_exec_domain ( d, ed )
+ for_each_vcpu ( d, v )
{
- if ( pagetable_get_paddr(ed->arch.guest_table) != 0 )
+ if ( pagetable_get_paddr(v->arch.guest_table) != 0 )
{
if ( shadow_mode_refcounts(d) )
- put_page(&frame_table[pagetable_get_pfn(ed->arch.guest_table)]);
+ put_page(&frame_table[pagetable_get_pfn(v->arch.guest_table)]);
else
- put_page_and_type(&frame_table[pagetable_get_pfn(ed->arch.guest_table)]);
+ put_page_and_type(&frame_table[pagetable_get_pfn(v->arch.guest_table)]);
- ed->arch.guest_table = mk_pagetable(0);
+ v->arch.guest_table = mk_pagetable(0);
}
- if ( pagetable_get_paddr(ed->arch.guest_table_user) != 0 )
+ if ( pagetable_get_paddr(v->arch.guest_table_user) != 0 )
{
if ( shadow_mode_refcounts(d) )
- put_page(&frame_table[pagetable_get_pfn(ed->arch.guest_table_user)]);
+ put_page(&frame_table[pagetable_get_pfn(v->arch.guest_table_user)]);
else
- put_page_and_type(&frame_table[pagetable_get_pfn(ed->arch.guest_table_user)]);
+ put_page_and_type(&frame_table[pagetable_get_pfn(v->arch.guest_table_user)]);
- ed->arch.guest_table_user = mk_pagetable(0);
+ v->arch.guest_table_user = mk_pagetable(0);
}
- vmx_relinquish_resources(ed);
+ vmx_relinquish_resources(v);
}
shadow_mode_disable(d);
@@ -1012,8 +1012,8 @@ void domain_relinquish_resources(struct domain *d)
* Relinquish GDT mappings. No need for explicit unmapping of the LDT as
* it automatically gets squashed when the guest's mappings go away.
*/
- for_each_exec_domain(d, ed)
- destroy_gdt(ed);
+ for_each_vcpu(d, v)
+ destroy_gdt(v);
/* Relinquish every page of memory. */
relinquish_memory(d, &d->xenpage_list);
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index a19bbd16d7..b4012c1660 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -84,7 +84,7 @@ int construct_dom0(struct domain *d,
unsigned long count;
struct pfn_info *page = NULL;
start_info_t *si;
- struct exec_domain *ed = d->exec_domain[0];
+ struct vcpu *v = d->vcpu[0];
#if defined(__i386__)
char *image_start = (char *)_image_start; /* use lowmem mappings */
char *initrd_start = (char *)_initrd_start; /* use lowmem mappings */
@@ -238,14 +238,14 @@ int construct_dom0(struct domain *d,
* We're basically forcing default RPLs to 1, so that our "what privilege
* level are we returning to?" logic works.
*/
- ed->arch.guest_context.kernel_ss = FLAT_KERNEL_SS;
+ v->arch.guest_context.kernel_ss = FLAT_KERNEL_SS;
for ( i = 0; i < 256; i++ )
- ed->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS;
+ v->arch.guest_context.trap_ctxt[i].cs = FLAT_KERNEL_CS;
#if defined(__i386__)
- ed->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS;
- ed->arch.guest_context.event_callback_cs = FLAT_KERNEL_CS;
+ v->arch.guest_context.failsafe_callback_cs = FLAT_KERNEL_CS;
+ v->arch.guest_context.event_callback_cs = FLAT_KERNEL_CS;
/*
* Protect the lowest 1GB of memory. We use a temporary mapping there
@@ -267,14 +267,17 @@ int construct_dom0(struct domain *d,
l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] =
l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
}
- unsigned long v;
- for (v = PERDOMAIN_VIRT_START; v < PERDOMAIN_VIRT_END;
- v += (1 << L2_PAGETABLE_SHIFT)) {
- l2tab[v >> L2_PAGETABLE_SHIFT] =
- l2e_from_paddr(__pa(d->arch.mm_perdomain_pt) + (v-PERDOMAIN_VIRT_START),
- __PAGE_HYPERVISOR);
+ {
+ unsigned long va;
+ for (va = PERDOMAIN_VIRT_START; va < PERDOMAIN_VIRT_END;
+ va += (1 << L2_PAGETABLE_SHIFT)) {
+ l2tab[va >> L2_PAGETABLE_SHIFT] =
+ l2e_from_paddr(__pa(d->arch.mm_perdomain_pt) +
+ (va-PERDOMAIN_VIRT_START),
+ __PAGE_HYPERVISOR);
+ }
}
- ed->arch.guest_table = mk_pagetable((unsigned long)l3start);
+ v->arch.guest_table = mk_pagetable((unsigned long)l3start);
#else
l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
@@ -282,7 +285,7 @@ int construct_dom0(struct domain *d,
l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR);
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
- ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
+ v->arch.guest_table = mk_pagetable((unsigned long)l2start);
#endif
l2tab += l2_linear_offset(dsi.v_start);
@@ -405,7 +408,7 @@ int construct_dom0(struct domain *d,
l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
- ed->arch.guest_table = mk_pagetable(__pa(l4start));
+ v->arch.guest_table = mk_pagetable(__pa(l4start));
l4tab += l4_table_offset(dsi.v_start);
mfn = alloc_start >> PAGE_SHIFT;
@@ -498,11 +501,11 @@ int construct_dom0(struct domain *d,
d->shared_info->n_vcpu = num_online_cpus();
/* Set up monitor table */
- update_pagetables(ed);
+ update_pagetables(v);
/* Install the new page tables. */
local_irq_disable();
- write_ptbase(ed);
+ write_ptbase(v);
/* Copy the OS image and free temporary buffer. */
(void)loadelfimage(&dsi);
@@ -604,7 +607,7 @@ int construct_dom0(struct domain *d,
set_bit(_DOMF_constructed, &d->domain_flags);
- new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
+ new_thread(v, dsi.v_kernentry, vstack_end, vstartinfo_start);
if ( opt_dom0_shadow || opt_dom0_translate )
{
@@ -638,13 +641,13 @@ int construct_dom0(struct domain *d,
idle_pg_table[1] = root_from_paddr(
pagetable_get_paddr(d->arch.phys_table), __PAGE_HYPERVISOR);
translate_l2pgtable(d, (l1_pgentry_t *)(1u << L2_PAGETABLE_SHIFT),
- pagetable_get_pfn(ed->arch.guest_table));
+ pagetable_get_pfn(v->arch.guest_table));
idle_pg_table[1] = root_empty();
local_flush_tlb();
#endif
}
- update_pagetables(ed); /* XXX SMP */
+ update_pagetables(v); /* XXX SMP */
}
return 0;
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index bc75be99a5..eb5ff9479b 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -22,7 +22,7 @@ void init_fpu(void)
set_bit(_VCPUF_fpu_initialised, &current->vcpu_flags);
}
-void save_init_fpu(struct exec_domain *tsk)
+void save_init_fpu(struct vcpu *tsk)
{
/*
* The guest OS may have set the 'virtual STTS' flag.
@@ -45,7 +45,7 @@ void save_init_fpu(struct exec_domain *tsk)
stts();
}
-void restore_fpu(struct exec_domain *tsk)
+void restore_fpu(struct vcpu *tsk)
{
/*
* FXRSTOR can fault if passed a corrupted data block. We handle this
diff --git a/xen/arch/x86/idle0_task.c b/xen/arch/x86/idle0_task.c
index 8ed04ea072..b876c619ef 100644
--- a/xen/arch/x86/idle0_task.c
+++ b/xen/arch/x86/idle0_task.c
@@ -9,7 +9,7 @@ struct domain idle0_domain = {
refcnt: ATOMIC_INIT(1)
};
-struct exec_domain idle0_exec_domain = {
+struct vcpu idle0_vcpu = {
processor: 0,
domain: &idle0_domain
};
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index 4381331eeb..a5657e72f8 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -184,22 +184,22 @@ typedef struct {
u8 nr_guests;
u8 in_flight;
u8 shareable;
- struct exec_domain *guest[IRQ_MAX_GUESTS];
+ struct vcpu *guest[IRQ_MAX_GUESTS];
} irq_guest_action_t;
static void __do_IRQ_guest(int irq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
- struct exec_domain *ed;
+ struct vcpu *v;
int i;
for ( i = 0; i < action->nr_guests; i++ )
{
- ed = action->guest[i];
- if ( !test_and_set_bit(irq, &ed->domain->pirq_mask) )
+ v = action->guest[i];
+ if ( !test_and_set_bit(irq, &v->domain->pirq_mask) )
action->in_flight++;
- send_guest_pirq(ed, irq);
+ send_guest_pirq(v, irq);
}
}
@@ -231,9 +231,9 @@ int pirq_guest_unmask(struct domain *d)
return 0;
}
-int pirq_guest_bind(struct exec_domain *ed, int irq, int will_share)
+int pirq_guest_bind(struct vcpu *v, int irq, int will_share)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action;
unsigned long flags;
@@ -275,7 +275,7 @@ int pirq_guest_bind(struct exec_domain *ed, int irq, int will_share)
desc->handler->startup(irq);
/* Attempt to bind the interrupt target to the correct CPU. */
- cpu_set(ed->processor, cpumask);
+ cpu_set(v->processor, cpumask);
if ( desc->handler->set_affinity != NULL )
desc->handler->set_affinity(irq, cpumask);
}
@@ -294,7 +294,7 @@ int pirq_guest_bind(struct exec_domain *ed, int irq, int will_share)
goto out;
}
- action->guest[action->nr_guests++] = ed;
+ action->guest[action->nr_guests++] = v;
out:
spin_unlock_irqrestore(&desc->lock, flags);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 8a60d435ee..d1fb86327a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -232,35 +232,35 @@ void arch_init_memory(void)
subarch_init_memory(dom_xen);
}
-void write_ptbase(struct exec_domain *ed)
+void write_ptbase(struct vcpu *v)
{
- write_cr3(pagetable_get_paddr(ed->arch.monitor_table));
+ write_cr3(pagetable_get_paddr(v->arch.monitor_table));
}
-void invalidate_shadow_ldt(struct exec_domain *d)
+void invalidate_shadow_ldt(struct vcpu *v)
{
int i;
unsigned long pfn;
struct pfn_info *page;
- if ( d->arch.shadow_ldt_mapcnt == 0 )
+ if ( v->arch.shadow_ldt_mapcnt == 0 )
return;
- d->arch.shadow_ldt_mapcnt = 0;
+ v->arch.shadow_ldt_mapcnt = 0;
for ( i = 16; i < 32; i++ )
{
- pfn = l1e_get_pfn(d->arch.perdomain_ptes[i]);
+ pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]);
if ( pfn == 0 ) continue;
- d->arch.perdomain_ptes[i] = l1e_empty();
+ v->arch.perdomain_ptes[i] = l1e_empty();
page = &frame_table[pfn];
ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
- ASSERT_PAGE_IS_DOMAIN(page, d->domain);
+ ASSERT_PAGE_IS_DOMAIN(page, v->domain);
put_page_and_type(page);
}
/* Dispose of the (now possibly invalid) mappings from the TLB. */
- percpu_info[d->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
+ percpu_info[v->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
}
@@ -287,25 +287,25 @@ static int alloc_segdesc_page(struct pfn_info *page)
/* Map shadow page at offset @off. */
int map_ldt_shadow_page(unsigned int off)
{
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
unsigned long gpfn, gmfn;
l1_pgentry_t l1e, nl1e;
- unsigned gva = ed->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
+ unsigned gva = v->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
int res;
#if defined(__x86_64__)
/* If in user mode, switch to kernel mode just to read LDT mapping. */
- extern void toggle_guest_mode(struct exec_domain *);
- int user_mode = !(ed->arch.flags & TF_kernel_mode);
-#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(ed)
+ extern void toggle_guest_mode(struct vcpu *);
+ int user_mode = !(v->arch.flags & TF_kernel_mode);
+#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
#elif defined(__i386__)
#define TOGGLE_MODE() ((void)0)
#endif
BUG_ON(unlikely(in_irq()));
- shadow_sync_va(ed, gva);
+ shadow_sync_va(v, gva);
TOGGLE_MODE();
__copy_from_user(&l1e, &linear_pg_table[l1_linear_offset(gva)],
@@ -335,8 +335,8 @@ int map_ldt_shadow_page(unsigned int off)
nl1e = l1e_from_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
- ed->arch.perdomain_ptes[off + 16] = nl1e;
- ed->arch.shadow_ldt_mapcnt++;
+ v->arch.perdomain_ptes[off + 16] = nl1e;
+ v->arch.shadow_ldt_mapcnt++;
return 1;
}
@@ -615,7 +615,7 @@ void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) )
// XXX SMP BUG?
- invalidate_shadow_ldt(e->exec_domain[0]);
+ invalidate_shadow_ldt(e->vcpu[0]);
put_page(page);
}
}
@@ -1433,8 +1433,8 @@ int get_page_type(struct pfn_info *page, u32 type)
int new_guest_cr3(unsigned long mfn)
{
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
int okay;
unsigned long old_base_mfn;
@@ -1445,13 +1445,13 @@ int new_guest_cr3(unsigned long mfn)
if ( likely(okay) )
{
- invalidate_shadow_ldt(ed);
+ invalidate_shadow_ldt(v);
- old_base_mfn = pagetable_get_pfn(ed->arch.guest_table);
- ed->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
- update_pagetables(ed); /* update shadow_table and monitor_table */
+ old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
+ v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
+ update_pagetables(v); /* update shadow_table and monitor_table */
- write_ptbase(ed);
+ write_ptbase(v);
if ( shadow_mode_refcounts(d) )
put_page(&frame_table[old_base_mfn]);
@@ -1461,12 +1461,12 @@ int new_guest_cr3(unsigned long mfn)
/* CR3 also holds a ref to its shadow... */
if ( shadow_mode_enabled(d) )
{
- if ( ed->arch.monitor_shadow_ref )
- put_shadow_ref(ed->arch.monitor_shadow_ref);
- ed->arch.monitor_shadow_ref =
- pagetable_get_pfn(ed->arch.monitor_table);
- ASSERT(!page_get_owner(&frame_table[ed->arch.monitor_shadow_ref]));
- get_shadow_ref(ed->arch.monitor_shadow_ref);
+ if ( v->arch.monitor_shadow_ref )
+ put_shadow_ref(v->arch.monitor_shadow_ref);
+ v->arch.monitor_shadow_ref =
+ pagetable_get_pfn(v->arch.monitor_table);
+ ASSERT(!page_get_owner(&frame_table[v->arch.monitor_shadow_ref]));
+ get_shadow_ref(v->arch.monitor_shadow_ref);
}
}
else
@@ -1560,15 +1560,15 @@ static inline unsigned long vcpuset_to_pcpuset(
{
unsigned int vcpu;
unsigned long pset = 0;
- struct exec_domain *ed;
+ struct vcpu *v;
while ( vset != 0 )
{
vcpu = find_first_set_bit(vset);
vset &= ~(1UL << vcpu);
if ( (vcpu < MAX_VIRT_CPUS) &&
- ((ed = d->exec_domain[vcpu]) != NULL) )
- pset |= 1UL << ed->processor;
+ ((v = d->vcpu[vcpu]) != NULL) )
+ pset |= 1UL << v->processor;
}
return pset;
@@ -1584,8 +1584,8 @@ int do_mmuext_op(
int rc = 0, i = 0, okay, cpu = smp_processor_id();
unsigned int type, done = 0;
struct pfn_info *page;
- struct exec_domain *ed = current;
- struct domain *d = ed->domain, *e;
+ struct vcpu *v = current;
+ struct domain *d = v->domain, *e;
u32 x, y, _d, _nd;
LOCK_BIGLOCK(d);
@@ -1710,8 +1710,8 @@ int do_mmuext_op(
else
{
unsigned long old_mfn =
- pagetable_get_pfn(ed->arch.guest_table_user);
- ed->arch.guest_table_user = mk_pagetable(op.mfn << PAGE_SHIFT);
+ pagetable_get_pfn(v->arch.guest_table_user);
+ v->arch.guest_table_user = mk_pagetable(op.mfn << PAGE_SHIFT);
if ( old_mfn != 0 )
put_page_and_type(&frame_table[old_mfn]);
}
@@ -1724,7 +1724,7 @@ int do_mmuext_op(
case MMUEXT_INVLPG_LOCAL:
if ( shadow_mode_enabled(d) )
- shadow_invlpg(ed, op.linear_addr);
+ shadow_invlpg(v, op.linear_addr);
local_flush_tlb_one(op.linear_addr);
break;
@@ -1792,13 +1792,13 @@ int do_mmuext_op(
okay = 0;
MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
}
- else if ( (ed->arch.guest_context.ldt_ents != ents) ||
- (ed->arch.guest_context.ldt_base != ptr) )
+ else if ( (v->arch.guest_context.ldt_ents != ents) ||
+ (v->arch.guest_context.ldt_base != ptr) )
{
- invalidate_shadow_ldt(ed);
- ed->arch.guest_context.ldt_base = ptr;
- ed->arch.guest_context.ldt_ents = ents;
- load_LDT(ed);
+ invalidate_shadow_ldt(v);
+ v->arch.guest_context.ldt_base = ptr;
+ v->arch.guest_context.ldt_ents = ents;
+ load_LDT(v);
percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
if ( ents != 0 )
percpu_info[cpu].deferred_ops |= DOP_RELOAD_LDT;
@@ -1943,8 +1943,8 @@ int do_mmu_update(
struct pfn_info *page;
int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
unsigned int cmd, done = 0;
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
u32 type_info;
struct map_dom_mem_cache mapcache, sh_mapcache;
@@ -1953,7 +1953,7 @@ int do_mmu_update(
cleanup_writable_pagetable(d);
if ( unlikely(shadow_mode_enabled(d)) )
- check_pagetable(ed, "pre-mmu"); /* debug */
+ check_pagetable(v, "pre-mmu"); /* debug */
if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
{
@@ -2096,7 +2096,7 @@ int do_mmu_update(
if ( page_is_page_table(page) &&
!page_out_of_sync(page) )
{
- shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
+ shadow_mark_mfn_out_of_sync(v, gpfn, mfn);
}
}
@@ -2185,7 +2185,7 @@ int do_mmu_update(
__put_user(done + i, pdone);
if ( unlikely(shadow_mode_enabled(d)) )
- check_pagetable(ed, "post-mmu"); /* debug */
+ check_pagetable(v, "post-mmu"); /* debug */
UNLOCK_BIGLOCK(d);
return rc;
@@ -2197,7 +2197,7 @@ int do_mmu_update(
int update_grant_va_mapping(unsigned long va,
l1_pgentry_t _nl1e,
struct domain *d,
- struct exec_domain *ed)
+ struct vcpu *v)
{
/* Caller must:
* . own d's BIGLOCK
@@ -2216,7 +2216,7 @@ int update_grant_va_mapping(unsigned long va,
// just everything involved in getting to this L1 (i.e. we need
// linear_pg_table[l1_linear_offset(va)] to be in sync)...
//
- __shadow_sync_va(ed, va);
+ __shadow_sync_va(v, va);
pl1e = &linear_pg_table[l1_linear_offset(va)];
@@ -2242,7 +2242,7 @@ int update_grant_va_mapping(unsigned long va,
}
if ( unlikely(shadow_mode_enabled(d)) )
- shadow_do_update_va_mapping(va, _nl1e, ed);
+ shadow_do_update_va_mapping(va, _nl1e, v);
return rc;
}
@@ -2252,12 +2252,12 @@ int do_update_va_mapping(unsigned long va,
unsigned long val32,
unsigned long flags)
{
- l1_pgentry_t val = l1e_from_intpte(val32);
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
- unsigned int cpu = ed->processor;
- unsigned long vset, pset, bmap_ptr;
- int rc = 0;
+ l1_pgentry_t val = l1e_from_intpte(val32);
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
+ unsigned int cpu = v->processor;
+ unsigned long vset, pset, bmap_ptr;
+ int rc = 0;
perfc_incrc(calls_to_update_va);
@@ -2269,7 +2269,7 @@ int do_update_va_mapping(unsigned long va,
cleanup_writable_pagetable(d);
if ( unlikely(shadow_mode_enabled(d)) )
- check_pagetable(ed, "pre-va"); /* debug */
+ check_pagetable(v, "pre-va"); /* debug */
if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
val)) )
@@ -2288,9 +2288,9 @@ int do_update_va_mapping(unsigned long va,
domain_crash();
}
- rc = shadow_do_update_va_mapping(va, val, ed);
+ rc = shadow_do_update_va_mapping(va, val, v);
- check_pagetable(ed, "post-va"); /* debug */
+ check_pagetable(v, "post-va"); /* debug */
}
switch ( flags & UVMF_FLUSHTYPE_MASK )
@@ -2376,27 +2376,27 @@ int do_update_va_mapping_otherdomain(unsigned long va,
* Descriptor Tables
*/
-void destroy_gdt(struct exec_domain *ed)
+void destroy_gdt(struct vcpu *v)
{
int i;
unsigned long pfn;
- ed->arch.guest_context.gdt_ents = 0;
+ v->arch.guest_context.gdt_ents = 0;
for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
{
- if ( (pfn = l1e_get_pfn(ed->arch.perdomain_ptes[i])) != 0 )
+ if ( (pfn = l1e_get_pfn(v->arch.perdomain_ptes[i])) != 0 )
put_page_and_type(&frame_table[pfn]);
- ed->arch.perdomain_ptes[i] = l1e_empty();
- ed->arch.guest_context.gdt_frames[i] = 0;
+ v->arch.perdomain_ptes[i] = l1e_empty();
+ v->arch.guest_context.gdt_frames[i] = 0;
}
}
-long set_gdt(struct exec_domain *ed,
+long set_gdt(struct vcpu *v,
unsigned long *frames,
unsigned int entries)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
/* NB. There are 512 8-byte entries per GDT page. */
int i, nr_pages = (entries + 511) / 512;
unsigned long pfn;
@@ -2413,14 +2413,14 @@ long set_gdt(struct exec_domain *ed,
goto fail;
/* Tear down the old GDT. */
- destroy_gdt(ed);
+ destroy_gdt(v);
/* Install the new GDT. */
- ed->arch.guest_context.gdt_ents = entries;
+ v->arch.guest_context.gdt_ents = entries;
for ( i = 0; i < nr_pages; i++ )
{
- ed->arch.guest_context.gdt_frames[i] = frames[i];
- ed->arch.perdomain_ptes[i] =
+ v->arch.guest_context.gdt_frames[i] = frames[i];
+ v->arch.perdomain_ptes[i] =
l1e_from_pfn(frames[i], __PAGE_HYPERVISOR);
}
@@ -2610,8 +2610,8 @@ void ptwr_flush(struct domain *d, const int which)
ASSERT(!shadow_mode_enabled(d));
- if ( unlikely(d->arch.ptwr[which].ed != current) )
- write_ptbase(d->arch.ptwr[which].ed);
+ if ( unlikely(d->arch.ptwr[which].vcpu != current) )
+ write_ptbase(d->arch.ptwr[which].vcpu);
l1va = d->arch.ptwr[which].l1va;
ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
@@ -2676,7 +2676,7 @@ void ptwr_flush(struct domain *d, const int which)
d->arch.ptwr[which].l1va = 0;
- if ( unlikely(d->arch.ptwr[which].ed != current) )
+ if ( unlikely(d->arch.ptwr[which].vcpu != current) )
write_ptbase(current);
}
@@ -2871,7 +2871,7 @@ int ptwr_do_page_fault(struct domain *d, unsigned long addr)
* If this is a multi-processor guest then ensure that the page is hooked
* into at most one L2 table, which must be the one running on this VCPU.
*/
- if ( (d->exec_domain[0]->next_in_list != NULL) &&
+ if ( (d->vcpu[0]->next_in_list != NULL) &&
((page->u.inuse.type_info & PGT_count_mask) !=
(!!(page->u.inuse.type_info & PGT_pinned) +
(which == PTWR_PT_ACTIVE))) )
@@ -2905,7 +2905,7 @@ int ptwr_do_page_fault(struct domain *d, unsigned long addr)
d->arch.ptwr[which].l1va = addr | 1;
d->arch.ptwr[which].l2_idx = l2_idx;
- d->arch.ptwr[which].ed = current;
+ d->arch.ptwr[which].vcpu = current;
/* For safety, disconnect the L1 p.t. page from current space. */
if ( which == PTWR_PT_ACTIVE )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 5a6221c05d..599389aaad 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -95,7 +95,7 @@ unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
#endif
EXPORT_SYMBOL(mmu_cr4_features);
-struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
+struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu };
int acpi_disabled;
@@ -252,7 +252,7 @@ void __init __start_xen(multiboot_info_t *mbi)
cmdline_parse(__va(mbi->cmdline));
/* Must do this early -- e.g., spinlocks rely on get_current(). */
- set_current(&idle0_exec_domain);
+ set_current(&idle0_vcpu);
set_processor_id(0);
smp_prepare_boot_cpu();
diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
index 944d1f6f3d..97353a0ab1 100644
--- a/xen/arch/x86/shadow.c
+++ b/xen/arch/x86/shadow.c
@@ -553,7 +553,7 @@ static void free_shadow_pages(struct domain *d)
{
int i;
struct shadow_status *x;
- struct exec_domain *ed;
+ struct vcpu *v;
/*
* WARNING! The shadow page table must not currently be in use!
@@ -568,21 +568,21 @@ static void free_shadow_pages(struct domain *d)
//
free_out_of_sync_state(d);
- // second, remove any outstanding refs from ed->arch.shadow_table
+ // second, remove any outstanding refs from v->arch.shadow_table
// and CR3.
//
- for_each_exec_domain(d, ed)
+ for_each_vcpu(d, v)
{
- if ( pagetable_get_paddr(ed->arch.shadow_table) )
+ if ( pagetable_get_paddr(v->arch.shadow_table) )
{
- put_shadow_ref(pagetable_get_pfn(ed->arch.shadow_table));
- ed->arch.shadow_table = mk_pagetable(0);
+ put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table));
+ v->arch.shadow_table = mk_pagetable(0);
}
- if ( ed->arch.monitor_shadow_ref )
+ if ( v->arch.monitor_shadow_ref )
{
- put_shadow_ref(ed->arch.monitor_shadow_ref);
- ed->arch.monitor_shadow_ref = 0;
+ put_shadow_ref(v->arch.monitor_shadow_ref);
+ v->arch.monitor_shadow_ref = 0;
}
}
@@ -590,9 +590,9 @@ static void free_shadow_pages(struct domain *d)
//
if ( shadow_mode_external(d) )
{
- for_each_exec_domain(d, ed)
+ for_each_vcpu(d, v)
{
- l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
+ l2_pgentry_t *mpl2e = v->arch.monitor_vtable;
if ( mpl2e )
{
@@ -677,14 +677,14 @@ int _shadow_mode_refcounts(struct domain *d)
return shadow_mode_refcounts(d);
}
-static void alloc_monitor_pagetable(struct exec_domain *ed)
+static void alloc_monitor_pagetable(struct vcpu *v)
{
unsigned long mmfn;
l2_pgentry_t *mpl2e;
struct pfn_info *mmfn_info;
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
- ASSERT(pagetable_get_paddr(ed->arch.monitor_table) == 0);
+ ASSERT(pagetable_get_paddr(v->arch.monitor_table) == 0);
mmfn_info = alloc_domheap_page(NULL);
ASSERT(mmfn_info != NULL);
@@ -714,21 +714,21 @@ static void alloc_monitor_pagetable(struct exec_domain *ed)
mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty();
mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
- ed->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
- ed->arch.monitor_vtable = mpl2e;
+ v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
+ v->arch.monitor_vtable = mpl2e;
}
/*
* Free the pages for monitor_table and hl2_table
*/
-void free_monitor_pagetable(struct exec_domain *ed)
+void free_monitor_pagetable(struct vcpu *v)
{
l2_pgentry_t *mpl2e, hl2e, sl2e;
unsigned long mfn;
- ASSERT( pagetable_get_paddr(ed->arch.monitor_table) );
+ ASSERT( pagetable_get_paddr(v->arch.monitor_table) );
- mpl2e = ed->arch.monitor_vtable;
+ mpl2e = v->arch.monitor_vtable;
/*
* First get the mfn for hl2_table by looking at monitor_table
@@ -754,11 +754,11 @@ void free_monitor_pagetable(struct exec_domain *ed)
/*
* Then free monitor_table.
*/
- mfn = pagetable_get_pfn(ed->arch.monitor_table);
+ mfn = pagetable_get_pfn(v->arch.monitor_table);
free_domheap_page(&frame_table[mfn]);
- ed->arch.monitor_table = mk_pagetable(0);
- ed->arch.monitor_vtable = 0;
+ v->arch.monitor_table = mk_pagetable(0);
+ v->arch.monitor_vtable = 0;
}
int
@@ -866,7 +866,7 @@ free_p2m_table(struct domain *d)
int __shadow_mode_enable(struct domain *d, unsigned int mode)
{
- struct exec_domain *ed;
+ struct vcpu *v;
int new_modes = (mode & ~d->arch.shadow_mode);
// Gotta be adding something to call this function.
@@ -875,9 +875,9 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode)
// can't take anything away by calling this function.
ASSERT(!(d->arch.shadow_mode & ~mode));
- for_each_exec_domain(d, ed)
+ for_each_vcpu(d, v)
{
- invalidate_shadow_ldt(ed);
+ invalidate_shadow_ldt(v);
// We need to set these up for __update_pagetables().
// See the comment there.
@@ -885,52 +885,52 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode)
/*
* arch.guest_vtable
*/
- if ( ed->arch.guest_vtable &&
- (ed->arch.guest_vtable != __linear_l2_table) )
+ if ( v->arch.guest_vtable &&
+ (v->arch.guest_vtable != __linear_l2_table) )
{
- unmap_domain_mem(ed->arch.guest_vtable);
+ unmap_domain_mem(v->arch.guest_vtable);
}
if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
- ed->arch.guest_vtable = __linear_l2_table;
+ v->arch.guest_vtable = __linear_l2_table;
else
- ed->arch.guest_vtable = NULL;
+ v->arch.guest_vtable = NULL;
/*
* arch.shadow_vtable
*/
- if ( ed->arch.shadow_vtable &&
- (ed->arch.shadow_vtable != __shadow_linear_l2_table) )
+ if ( v->arch.shadow_vtable &&
+ (v->arch.shadow_vtable != __shadow_linear_l2_table) )
{
- unmap_domain_mem(ed->arch.shadow_vtable);
+ unmap_domain_mem(v->arch.shadow_vtable);
}
if ( !(mode & SHM_external) )
- ed->arch.shadow_vtable = __shadow_linear_l2_table;
+ v->arch.shadow_vtable = __shadow_linear_l2_table;
else
- ed->arch.shadow_vtable = NULL;
+ v->arch.shadow_vtable = NULL;
/*
* arch.hl2_vtable
*/
- if ( ed->arch.hl2_vtable &&
- (ed->arch.hl2_vtable != __linear_hl2_table) )
+ if ( v->arch.hl2_vtable &&
+ (v->arch.hl2_vtable != __linear_hl2_table) )
{
- unmap_domain_mem(ed->arch.hl2_vtable);
+ unmap_domain_mem(v->arch.hl2_vtable);
}
if ( (mode & (SHM_translate | SHM_external)) == SHM_translate )
- ed->arch.hl2_vtable = __linear_hl2_table;
+ v->arch.hl2_vtable = __linear_hl2_table;
else
- ed->arch.hl2_vtable = NULL;
+ v->arch.hl2_vtable = NULL;
/*
* arch.monitor_table & arch.monitor_vtable
*/
- if ( ed->arch.monitor_vtable )
+ if ( v->arch.monitor_vtable )
{
- free_monitor_pagetable(ed);
+ free_monitor_pagetable(v);
}
if ( mode & SHM_external )
{
- alloc_monitor_pagetable(ed);
+ alloc_monitor_pagetable(v);
}
}
@@ -1205,10 +1205,10 @@ void __shadow_mode_disable(struct domain *d)
free_shadow_ht_entries(d);
free_out_of_sync_entries(d);
- struct exec_domain *ed;
- for_each_exec_domain(d, ed)
+ struct vcpu *v;
+ for_each_vcpu(d, v)
{
- update_pagetables(ed);
+ update_pagetables(v);
}
}
@@ -1217,13 +1217,13 @@ static int shadow_mode_table_op(
{
unsigned int op = sc->op;
int i, rc = 0;
- struct exec_domain *ed;
+ struct vcpu *v;
ASSERT(shadow_lock_is_acquired(d));
SH_VLOG("shadow mode table op %lx %lx count %d",
- (unsigned long)pagetable_get_pfn(d->exec_domain[0]->arch.guest_table), /* XXX SMP */
- (unsigned long)pagetable_get_pfn(d->exec_domain[0]->arch.shadow_table), /* XXX SMP */
+ (unsigned long)pagetable_get_pfn(d->vcpu[0]->arch.guest_table), /* XXX SMP */
+ (unsigned long)pagetable_get_pfn(d->vcpu[0]->arch.shadow_table), /* XXX SMP */
d->arch.shadow_page_count);
shadow_audit(d, 1);
@@ -1323,8 +1323,8 @@ static int shadow_mode_table_op(
SH_VLOG("shadow mode table op : page count %d", d->arch.shadow_page_count);
shadow_audit(d, 1);
- for_each_exec_domain(d,ed)
- __update_pagetables(ed);
+ for_each_vcpu(d,v)
+ __update_pagetables(v);
return rc;
}
@@ -1333,7 +1333,7 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
{
unsigned int op = sc->op;
int rc = 0;
- struct exec_domain *ed;
+ struct vcpu *v;
if ( unlikely(d == current->domain) )
{
@@ -1376,8 +1376,8 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
shadow_unlock(d);
- for_each_exec_domain(d,ed)
- update_pagetables(ed);
+ for_each_vcpu(d,v)
+ update_pagetables(v);
domain_unpause(d);
@@ -1393,7 +1393,7 @@ void vmx_shadow_clear_state(struct domain *d)
shadow_lock(d);
free_shadow_pages(d);
shadow_unlock(d);
- update_pagetables(d->exec_domain[0]);
+ update_pagetables(d->vcpu[0]);
}
unsigned long
@@ -1573,14 +1573,14 @@ static unsigned long shadow_l2_table(
void shadow_map_l1_into_current_l2(unsigned long va)
{
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
l1_pgentry_t *gpl1e, *spl1e;
l2_pgentry_t gl2e, sl2e;
unsigned long gl1pfn, gl1mfn, sl1mfn;
int i, init_table = 0;
- __guest_get_l2e(ed, va, &gl2e);
+ __guest_get_l2e(v, va, &gl2e);
ASSERT(l2e_get_flags(gl2e) & _PAGE_PRESENT);
gl1pfn = l2e_get_pfn(gl2e);
@@ -1616,15 +1616,15 @@ void shadow_map_l1_into_current_l2(unsigned long va)
#ifndef NDEBUG
l2_pgentry_t old_sl2e;
- __shadow_get_l2e(ed, va, &old_sl2e);
+ __shadow_get_l2e(v, va, &old_sl2e);
ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
#endif
if ( !get_shadow_ref(sl1mfn) )
BUG();
l2pde_general(d, &gl2e, &sl2e, sl1mfn);
- __guest_set_l2e(ed, va, gl2e);
- __shadow_set_l2e(ed, va, sl2e);
+ __guest_set_l2e(v, va, gl2e);
+ __shadow_set_l2e(v, va, sl2e);
if ( init_table )
{
@@ -1667,16 +1667,16 @@ void shadow_map_l1_into_current_l2(unsigned long va)
}
}
-void shadow_invlpg(struct exec_domain *ed, unsigned long va)
+void shadow_invlpg(struct vcpu *v, unsigned long va)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
l1_pgentry_t gpte, spte;
ASSERT(shadow_mode_enabled(d));
shadow_lock(d);
- __shadow_sync_va(ed, va);
+ __shadow_sync_va(v, va);
// XXX mafetter: will need to think about 4MB pages...
@@ -1808,10 +1808,10 @@ shadow_free_snapshot(struct domain *d, struct out_of_sync_entry *entry)
}
struct out_of_sync_entry *
-shadow_mark_mfn_out_of_sync(struct exec_domain *ed, unsigned long gpfn,
+shadow_mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn,
unsigned long mfn)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
struct pfn_info *page = &frame_table[mfn];
struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
@@ -1864,22 +1864,22 @@ shadow_mark_mfn_out_of_sync(struct exec_domain *ed, unsigned long gpfn,
}
void shadow_mark_va_out_of_sync(
- struct exec_domain *ed, unsigned long gpfn, unsigned long mfn, unsigned long va)
+ struct vcpu *v, unsigned long gpfn, unsigned long mfn, unsigned long va)
{
struct out_of_sync_entry *entry =
- shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
+ shadow_mark_mfn_out_of_sync(v, gpfn, mfn);
l2_pgentry_t sl2e;
// We need the address of shadow PTE that maps @va.
// It might not exist yet. Make sure it's there.
//
- __shadow_get_l2e(ed, va, &sl2e);
+ __shadow_get_l2e(v, va, &sl2e);
if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
{
// either this L1 isn't shadowed yet, or the shadow isn't linked into
// the current L2.
shadow_map_l1_into_current_l2(va);
- __shadow_get_l2e(ed, va, &sl2e);
+ __shadow_get_l2e(v, va, &sl2e);
}
ASSERT(l2e_get_flags(sl2e) & _PAGE_PRESENT);
@@ -1937,10 +1937,10 @@ static int snapshot_entry_matches(
* Returns 1 if va's shadow mapping is out-of-sync.
* Returns 0 otherwise.
*/
-int __shadow_out_of_sync(struct exec_domain *ed, unsigned long va)
+int __shadow_out_of_sync(struct vcpu *v, unsigned long va)
{
- struct domain *d = ed->domain;
- unsigned long l2mfn = pagetable_get_pfn(ed->arch.guest_table);
+ struct domain *d = v->domain;
+ unsigned long l2mfn = pagetable_get_pfn(v->arch.guest_table);
unsigned long l2pfn = __mfn_to_gpfn(d, l2mfn);
l2_pgentry_t l2e;
unsigned long l1pfn, l1mfn;
@@ -1951,11 +1951,11 @@ int __shadow_out_of_sync(struct exec_domain *ed, unsigned long va)
perfc_incrc(shadow_out_of_sync_calls);
if ( page_out_of_sync(&frame_table[l2mfn]) &&
- !snapshot_entry_matches(d, (l1_pgentry_t *)ed->arch.guest_vtable,
+ !snapshot_entry_matches(d, (l1_pgentry_t *)v->arch.guest_vtable,
l2pfn, l2_table_offset(va)) )
return 1;
- __guest_get_l2e(ed, va, &l2e);
+ __guest_get_l2e(v, va, &l2e);
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
return 0;
@@ -2552,8 +2552,8 @@ void __shadow_sync_all(struct domain *d)
int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
{
l1_pgentry_t gpte, spte, orig_gpte;
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
l2_pgentry_t gpde;
spte = l1e_empty();
@@ -2562,7 +2562,7 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
va, (unsigned long)regs->error_code);
perfc_incrc(shadow_fault_calls);
- check_pagetable(ed, "pre-sf");
+ check_pagetable(v, "pre-sf");
/*
* Don't let someone else take the guest's table pages out-of-sync.
@@ -2574,12 +2574,12 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
* out-of-sync table page entry, or if we should pass this
* fault onto the guest.
*/
- __shadow_sync_va(ed, va);
+ __shadow_sync_va(v, va);
/*
* STEP 2. Check the guest PTE.
*/
- __guest_get_l2e(ed, va, &gpde);
+ __guest_get_l2e(v, va, &gpde);
if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) )
{
SH_VVLOG("shadow_fault - EXIT: L1 not present");
@@ -2622,7 +2622,7 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
}
}
- if ( !l1pte_write_fault(ed, &gpte, &spte, va) )
+ if ( !l1pte_write_fault(v, &gpte, &spte, va) )
{
SH_VVLOG("shadow_fault - EXIT: l1pte_write_fault failed");
perfc_incrc(write_fault_bail);
@@ -2671,7 +2671,7 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
shadow_unlock(d);
- check_pagetable(ed, "post-sf");
+ check_pagetable(v, "post-sf");
return EXCRET_fault_fixed;
fail:
@@ -2750,9 +2750,9 @@ void shadow_l4_normal_pt_update(
int shadow_do_update_va_mapping(unsigned long va,
l1_pgentry_t val,
- struct exec_domain *ed)
+ struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
l1_pgentry_t spte;
int rc = 0;
@@ -2764,7 +2764,7 @@ int shadow_do_update_va_mapping(unsigned long va,
// just everything involved in getting to this L1 (i.e. we need
// linear_pg_table[l1_linear_offset(va)] to be in sync)...
//
- __shadow_sync_va(ed, va);
+ __shadow_sync_va(v, va);
l1pte_propagate_from_guest(d, val, &spte);
shadow_set_l1e(va, spte, 0);
@@ -2775,7 +2775,7 @@ int shadow_do_update_va_mapping(unsigned long va,
* for this.
*/
if ( shadow_mode_log_dirty(d) )
- __mark_dirty(d, va_to_l1mfn(ed, va));
+ __mark_dirty(d, va_to_l1mfn(v, va));
// out:
shadow_unlock(d);
@@ -2810,10 +2810,10 @@ int shadow_do_update_va_mapping(unsigned long va,
* shadow_l2_table(), shadow_hl2_table(), and alloc_monitor_pagetable()
* all play a part in maintaining these mappings.
*/
-void __update_pagetables(struct exec_domain *ed)
+void __update_pagetables(struct vcpu *v)
{
- struct domain *d = ed->domain;
- unsigned long gmfn = pagetable_get_pfn(ed->arch.guest_table);
+ struct domain *d = v->domain;
+ unsigned long gmfn = pagetable_get_pfn(v->arch.guest_table);
unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
unsigned long smfn, hl2mfn, old_smfn;
@@ -2830,9 +2830,9 @@ void __update_pagetables(struct exec_domain *ed)
*/
if ( max_mode & (SHM_enable | SHM_external) )
{
- if ( likely(ed->arch.guest_vtable != NULL) )
- unmap_domain_mem(ed->arch.guest_vtable);
- ed->arch.guest_vtable = map_domain_mem(gmfn << PAGE_SHIFT);
+ if ( likely(v->arch.guest_vtable != NULL) )
+ unmap_domain_mem(v->arch.guest_vtable);
+ v->arch.guest_vtable = map_domain_mem(gmfn << PAGE_SHIFT);
}
/*
@@ -2842,8 +2842,8 @@ void __update_pagetables(struct exec_domain *ed)
smfn = shadow_l2_table(d, gpfn, gmfn);
if ( !get_shadow_ref(smfn) )
BUG();
- old_smfn = pagetable_get_pfn(ed->arch.shadow_table);
- ed->arch.shadow_table = mk_pagetable(smfn << PAGE_SHIFT);
+ old_smfn = pagetable_get_pfn(v->arch.shadow_table);
+ v->arch.shadow_table = mk_pagetable(smfn << PAGE_SHIFT);
if ( old_smfn )
put_shadow_ref(old_smfn);
@@ -2854,9 +2854,9 @@ void __update_pagetables(struct exec_domain *ed)
*/
if ( max_mode == SHM_external )
{
- if ( ed->arch.shadow_vtable )
- unmap_domain_mem(ed->arch.shadow_vtable);
- ed->arch.shadow_vtable = map_domain_mem(smfn << PAGE_SHIFT);
+ if ( v->arch.shadow_vtable )
+ unmap_domain_mem(v->arch.shadow_vtable);
+ v->arch.shadow_vtable = map_domain_mem(smfn << PAGE_SHIFT);
}
/*
@@ -2870,9 +2870,9 @@ void __update_pagetables(struct exec_domain *ed)
{
if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
- if ( ed->arch.hl2_vtable )
- unmap_domain_mem(ed->arch.hl2_vtable);
- ed->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT);
+ if ( v->arch.hl2_vtable )
+ unmap_domain_mem(v->arch.hl2_vtable);
+ v->arch.hl2_vtable = map_domain_mem(hl2mfn << PAGE_SHIFT);
}
/*
@@ -2880,7 +2880,7 @@ void __update_pagetables(struct exec_domain *ed)
*/
if ( max_mode == SHM_external )
{
- l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
+ l2_pgentry_t *mpl2e = v->arch.monitor_vtable;
l2_pgentry_t old_hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)];
l2_pgentry_t old_sl2e = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)];
@@ -2959,9 +2959,9 @@ static int sh_l1_present;
char * sh_check_name;
int shadow_status_noswap;
-#define v2m(_ed, _adr) ({ \
+#define v2m(_v, _adr) ({ \
unsigned long _a = (unsigned long)(_adr); \
- l2_pgentry_t _pde = shadow_linear_l2_table(_ed)[l2_table_offset(_a)]; \
+ l2_pgentry_t _pde = shadow_linear_l2_table(_v)[l2_table_offset(_a)]; \
unsigned long _pa = -1; \
if ( l2e_get_flags(_pde) & _PAGE_PRESENT ) \
{ \
@@ -2985,21 +2985,21 @@ int shadow_status_noswap;
l1e_get_intpte(guest_pte), l1e_get_intpte(eff_guest_pte), \
l1e_get_intpte(shadow_pte), l1e_get_intpte(snapshot_pte), \
p_guest_pte, p_shadow_pte, p_snapshot_pte, \
- (void *)v2m(ed, p_guest_pte), (void *)v2m(ed, p_shadow_pte), \
- (void *)v2m(ed, p_snapshot_pte), \
+ (void *)v2m(v, p_guest_pte), (void *)v2m(v, p_shadow_pte), \
+ (void *)v2m(v, p_snapshot_pte), \
(l2_idx << L2_PAGETABLE_SHIFT) | \
(l1_idx << L1_PAGETABLE_SHIFT)); \
errors++; \
} while ( 0 )
static int check_pte(
- struct exec_domain *ed,
+ struct vcpu *v,
l1_pgentry_t *p_guest_pte,
l1_pgentry_t *p_shadow_pte,
l1_pgentry_t *p_snapshot_pte,
int level, int l2_idx, int l1_idx)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
l1_pgentry_t guest_pte = *p_guest_pte;
l1_pgentry_t shadow_pte = *p_shadow_pte;
l1_pgentry_t snapshot_pte = p_snapshot_pte ? *p_snapshot_pte : l1e_empty();
@@ -3104,10 +3104,10 @@ static int check_pte(
#undef v2m
static int check_l1_table(
- struct exec_domain *ed, unsigned long gpfn,
+ struct vcpu *v, unsigned long gpfn,
unsigned long gmfn, unsigned long smfn, unsigned l2_idx)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
int i;
unsigned long snapshot_mfn;
l1_pgentry_t *p_guest, *p_shadow, *p_snapshot = NULL;
@@ -3124,7 +3124,7 @@ static int check_l1_table(
p_shadow = map_domain_mem(smfn << PAGE_SHIFT);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
- errors += check_pte(ed, p_guest+i, p_shadow+i,
+ errors += check_pte(v, p_guest+i, p_shadow+i,
p_snapshot ? p_snapshot+i : NULL,
1, l2_idx, i);
@@ -3143,9 +3143,9 @@ static int check_l1_table(
} while ( 0 )
int check_l2_table(
- struct exec_domain *ed, unsigned long gmfn, unsigned long smfn, int oos_pdes)
+ struct vcpu *v, unsigned long gmfn, unsigned long smfn, int oos_pdes)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_mem(gmfn << PAGE_SHIFT);
l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
l2_pgentry_t match;
@@ -3213,7 +3213,7 @@ int check_l2_table(
/* Check the whole L2. */
for ( i = 0; i < limit; i++ )
- errors += check_pte(ed,
+ errors += check_pte(v,
(l1_pgentry_t*)(&gpl2e[i]), /* Hmm, dirty ... */
(l1_pgentry_t*)(&spl2e[i]),
NULL,
@@ -3231,10 +3231,10 @@ int check_l2_table(
}
#undef FAILPT
-int _check_pagetable(struct exec_domain *ed, char *s)
+int _check_pagetable(struct vcpu *v, char *s)
{
- struct domain *d = ed->domain;
- pagetable_t pt = ed->arch.guest_table;
+ struct domain *d = v->domain;
+ pagetable_t pt = v->arch.guest_table;
unsigned long gptbase = pagetable_get_paddr(pt);
unsigned long ptbase_pfn, smfn;
unsigned long i;
@@ -3265,7 +3265,7 @@ int _check_pagetable(struct exec_domain *ed, char *s)
ASSERT(ptbase_mfn);
}
- errors += check_l2_table(ed, ptbase_mfn, smfn, oos_pdes);
+ errors += check_l2_table(v, ptbase_mfn, smfn, oos_pdes);
gpl2e = (l2_pgentry_t *) map_domain_mem( ptbase_mfn << PAGE_SHIFT );
spl2e = (l2_pgentry_t *) map_domain_mem( smfn << PAGE_SHIFT );
@@ -3288,7 +3288,7 @@ int _check_pagetable(struct exec_domain *ed, char *s)
if ( l2e_get_intpte(spl2e[i]) != 0 ) /* FIXME: check flags? */
{
- errors += check_l1_table(ed, gl1pfn, gl1mfn, sl1mfn, i);
+ errors += check_l1_table(v, gl1pfn, gl1mfn, sl1mfn, i);
}
}
@@ -3309,9 +3309,9 @@ int _check_pagetable(struct exec_domain *ed, char *s)
return errors;
}
-int _check_all_pagetables(struct exec_domain *ed, char *s)
+int _check_all_pagetables(struct vcpu *v, char *s)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
int i;
struct shadow_status *a;
unsigned long gmfn;
@@ -3334,11 +3334,11 @@ int _check_all_pagetables(struct exec_domain *ed, char *s)
switch ( a->gpfn_and_flags & PGT_type_mask )
{
case PGT_l1_shadow:
- errors += check_l1_table(ed, a->gpfn_and_flags & PGT_mfn_mask,
+ errors += check_l1_table(v, a->gpfn_and_flags & PGT_mfn_mask,
gmfn, a->smfn, 0);
break;
case PGT_l2_shadow:
- errors += check_l2_table(ed, gmfn, a->smfn,
+ errors += check_l2_table(v, gmfn, a->smfn,
page_out_of_sync(pfn_to_page(gmfn)));
break;
case PGT_l3_shadow:
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index cfc2a57e46..382fee8d15 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -757,7 +757,7 @@ static int __init do_boot_cpu(int apicid)
*/
{
struct domain *idle;
- struct exec_domain *ed;
+ struct vcpu *v;
void *stack;
unsigned long boot_error;
int timeout, cpu;
@@ -769,11 +769,11 @@ static int __init do_boot_cpu(int apicid)
if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
panic("failed 'createdomain' for CPU %d", cpu);
- ed = idle_task[cpu] = idle->exec_domain[0];
+ v = idle_task[cpu] = idle->vcpu[0];
set_bit(_DOMF_idle_domain, &idle->domain_flags);
- ed->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
+ v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline();
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 3e3b770ae4..2efd0187b0 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -273,9 +273,9 @@ s_time_t get_s_time(void)
return now;
}
-static inline void __update_dom_time(struct exec_domain *ed)
+static inline void __update_dom_time(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
shared_info_t *si = d->shared_info;
spin_lock(&d->time_lock);
@@ -295,14 +295,14 @@ static inline void __update_dom_time(struct exec_domain *ed)
spin_unlock(&d->time_lock);
}
-void update_dom_time(struct exec_domain *ed)
+void update_dom_time(struct vcpu *v)
{
unsigned long flags;
- if ( ed->domain->shared_info->tsc_timestamp != full_tsc_irq )
+ if ( v->domain->shared_info->tsc_timestamp != full_tsc_irq )
{
read_lock_irqsave(&time_lock, flags);
- __update_dom_time(ed);
+ __update_dom_time(v);
read_unlock_irqrestore(&time_lock, flags);
}
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index f9d0b1893e..35940b0df8 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -236,8 +236,8 @@ static inline int do_trap(int trapnr, char *str,
struct cpu_user_regs *regs,
int use_error_code)
{
- struct exec_domain *ed = current;
- struct trap_bounce *tb = &ed->arch.trap_bounce;
+ struct vcpu *v = current;
+ struct trap_bounce *tb = &v->arch.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
@@ -303,8 +303,8 @@ DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
asmlinkage int do_int3(struct cpu_user_regs *regs)
{
- struct exec_domain *ed = current;
- struct trap_bounce *tb = &ed->arch.trap_bounce;
+ struct vcpu *v = current;
+ struct trap_bounce *tb = &v->arch.trap_bounce;
trap_info_t *ti;
DEBUGGER_trap_entry(TRAP_int3, regs);
@@ -335,10 +335,10 @@ asmlinkage int do_machine_check(struct cpu_user_regs *regs)
void propagate_page_fault(unsigned long addr, u16 error_code)
{
trap_info_t *ti;
- struct exec_domain *ed = current;
- struct trap_bounce *tb = &ed->arch.trap_bounce;
+ struct vcpu *v = current;
+ struct trap_bounce *tb = &v->arch.trap_bounce;
- ti = &ed->arch.guest_context.trap_ctxt[TRAP_page_fault];
+ ti = &v->arch.guest_context.trap_ctxt[TRAP_page_fault];
tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
tb->cr2 = addr;
tb->error_code = error_code;
@@ -347,7 +347,7 @@ void propagate_page_fault(unsigned long addr, u16 error_code)
if ( TI_GET_IF(ti) )
tb->flags |= TBF_INTERRUPT;
- ed->arch.guest_cr2 = addr;
+ v->arch.guest_cr2 = addr;
}
static int handle_perdomain_mapping_fault(
@@ -355,8 +355,8 @@ static int handle_perdomain_mapping_fault(
{
extern int map_ldt_shadow_page(unsigned int);
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
int ret;
/* Which vcpu's area did we fault in, and is it in the ldt sub-area? */
@@ -383,7 +383,7 @@ static int handle_perdomain_mapping_fault(
return 0;
/* In guest mode? Propagate #PF to guest, with adjusted %cr2. */
propagate_page_fault(
- ed->arch.guest_context.ldt_base + offset, regs->error_code);
+ v->arch.guest_context.ldt_base + offset, regs->error_code);
}
}
else
@@ -399,8 +399,8 @@ static int handle_perdomain_mapping_fault(
asmlinkage int do_page_fault(struct cpu_user_regs *regs)
{
unsigned long addr, fixup;
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
__asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : );
@@ -433,7 +433,7 @@ asmlinkage int do_page_fault(struct cpu_user_regs *regs)
if ( unlikely(shadow_mode_enabled(d)) &&
((addr < HYPERVISOR_VIRT_START) ||
- (shadow_mode_external(d) && GUEST_CONTEXT(ed, regs))) &&
+ (shadow_mode_external(d) && GUEST_CONTEXT(v, regs))) &&
shadow_fault(addr, regs) )
return EXCRET_fault_fixed;
@@ -472,17 +472,17 @@ asmlinkage int do_page_fault(struct cpu_user_regs *regs)
long do_fpu_taskswitch(int set)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
if ( set )
{
- set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
+ set_bit(_VCPUF_guest_stts, &v->vcpu_flags);
stts();
}
else
{
- clear_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
- if ( test_bit(_VCPUF_fpu_dirtied, &ed->vcpu_flags) )
+ clear_bit(_VCPUF_guest_stts, &v->vcpu_flags);
+ if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
clts();
}
@@ -492,25 +492,25 @@ long do_fpu_taskswitch(int set)
/* Has the guest requested sufficient permission for this I/O access? */
static inline int guest_io_okay(
unsigned int port, unsigned int bytes,
- struct exec_domain *ed, struct cpu_user_regs *regs)
+ struct vcpu *v, struct cpu_user_regs *regs)
{
u16 x;
#if defined(__x86_64__)
/* If in user mode, switch to kernel mode just to read I/O bitmap. */
- extern void toggle_guest_mode(struct exec_domain *);
- int user_mode = !(ed->arch.flags & TF_kernel_mode);
-#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(ed)
+ extern void toggle_guest_mode(struct vcpu *);
+ int user_mode = !(v->arch.flags & TF_kernel_mode);
+#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
#elif defined(__i386__)
#define TOGGLE_MODE() ((void)0)
#endif
- if ( ed->arch.iopl >= (KERNEL_MODE(ed, regs) ? 1 : 3) )
+ if ( v->arch.iopl >= (KERNEL_MODE(v, regs) ? 1 : 3) )
return 1;
- if ( ed->arch.iobmp_limit > (port + bytes) )
+ if ( v->arch.iobmp_limit > (port + bytes) )
{
TOGGLE_MODE();
- __get_user(x, (u16 *)(ed->arch.iobmp+(port>>3)));
+ __get_user(x, (u16 *)(v->arch.iobmp+(port>>3)));
TOGGLE_MODE();
if ( (x & (((1<<bytes)-1) << (port&7))) == 0 )
return 1;
@@ -522,9 +522,9 @@ static inline int guest_io_okay(
/* Has the administrator granted sufficient permission for this I/O access? */
static inline int admin_io_okay(
unsigned int port, unsigned int bytes,
- struct exec_domain *ed, struct cpu_user_regs *regs)
+ struct vcpu *v, struct cpu_user_regs *regs)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
u16 x;
if ( d->arch.iobmp_mask != NULL )
@@ -565,7 +565,7 @@ static inline int admin_io_okay(
static int emulate_privileged_op(struct cpu_user_regs *regs)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
unsigned long *reg, eip = regs->eip;
u8 opcode, modrm_reg = 0, rep_prefix = 0;
unsigned int port, i, op_bytes = 4, data;
@@ -619,22 +619,22 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0x6c: /* INSB */
op_bytes = 1;
case 0x6d: /* INSW/INSL */
- if ( !guest_io_okay((u16)regs->edx, op_bytes, ed, regs) )
+ if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
goto fail;
switch ( op_bytes )
{
case 1:
- data = (u8)inb_user((u16)regs->edx, ed, regs);
+ data = (u8)inb_user((u16)regs->edx, v, regs);
if ( put_user((u8)data, (u8 *)regs->edi) )
PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
break;
case 2:
- data = (u16)inw_user((u16)regs->edx, ed, regs);
+ data = (u16)inw_user((u16)regs->edx, v, regs);
if ( put_user((u16)data, (u16 *)regs->edi) )
PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
break;
case 4:
- data = (u32)inl_user((u16)regs->edx, ed, regs);
+ data = (u32)inl_user((u16)regs->edx, v, regs);
if ( put_user((u32)data, (u32 *)regs->edi) )
PAGE_FAULT(regs->edi, USER_WRITE_FAULT);
break;
@@ -645,24 +645,24 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0x6e: /* OUTSB */
op_bytes = 1;
case 0x6f: /* OUTSW/OUTSL */
- if ( !guest_io_okay((u16)regs->edx, op_bytes, ed, regs) )
+ if ( !guest_io_okay((u16)regs->edx, op_bytes, v, regs) )
goto fail;
switch ( op_bytes )
{
case 1:
if ( get_user(data, (u8 *)regs->esi) )
PAGE_FAULT(regs->esi, USER_READ_FAULT);
- outb_user((u8)data, (u16)regs->edx, ed, regs);
+ outb_user((u8)data, (u16)regs->edx, v, regs);
break;
case 2:
if ( get_user(data, (u16 *)regs->esi) )
PAGE_FAULT(regs->esi, USER_READ_FAULT);
- outw_user((u16)data, (u16)regs->edx, ed, regs);
+ outw_user((u16)data, (u16)regs->edx, v, regs);
break;
case 4:
if ( get_user(data, (u32 *)regs->esi) )
PAGE_FAULT(regs->esi, USER_READ_FAULT);
- outl_user((u32)data, (u16)regs->edx, ed, regs);
+ outl_user((u32)data, (u16)regs->edx, v, regs);
break;
}
regs->esi += (regs->eflags & EF_DF) ? -op_bytes : op_bytes;
@@ -687,20 +687,20 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0xe5: /* IN imm8,%eax */
port = insn_fetch(u8, 1, eip);
exec_in:
- if ( !guest_io_okay(port, op_bytes, ed, regs) )
+ if ( !guest_io_okay(port, op_bytes, v, regs) )
goto fail;
switch ( op_bytes )
{
case 1:
regs->eax &= ~0xffUL;
- regs->eax |= (u8)inb_user(port, ed, regs);
+ regs->eax |= (u8)inb_user(port, v, regs);
break;
case 2:
regs->eax &= ~0xffffUL;
- regs->eax |= (u16)inw_user(port, ed, regs);
+ regs->eax |= (u16)inw_user(port, v, regs);
break;
case 4:
- regs->eax = (u32)inl_user(port, ed, regs);
+ regs->eax = (u32)inl_user(port, v, regs);
break;
}
goto done;
@@ -716,18 +716,18 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0xe7: /* OUT %eax,imm8 */
port = insn_fetch(u8, 1, eip);
exec_out:
- if ( !guest_io_okay(port, op_bytes, ed, regs) )
+ if ( !guest_io_okay(port, op_bytes, v, regs) )
goto fail;
switch ( op_bytes )
{
case 1:
- outb_user((u8)regs->eax, port, ed, regs);
+ outb_user((u8)regs->eax, port, v, regs);
break;
case 2:
- outw_user((u16)regs->eax, port, ed, regs);
+ outw_user((u16)regs->eax, port, v, regs);
break;
case 4:
- outl_user((u32)regs->eax, port, ed, regs);
+ outl_user((u32)regs->eax, port, v, regs);
break;
}
goto done;
@@ -740,7 +740,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0xfa: /* CLI */
case 0xfb: /* STI */
- if ( ed->arch.iopl < (KERNEL_MODE(ed, regs) ? 1 : 3) )
+ if ( v->arch.iopl < (KERNEL_MODE(v, regs) ? 1 : 3) )
goto fail;
/*
* This is just too dangerous to allow, in my opinion. Consider if the
@@ -748,7 +748,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
* that and we'll end up with hard-to-debug lockups. Fast & loose will
* do for us. :-)
*/
- /*ed->vcpu_info->evtchn_upcall_mask = (opcode == 0xfa);*/
+ /*v->vcpu_info->evtchn_upcall_mask = (opcode == 0xfa);*/
goto done;
case 0x0f: /* Two-byte opcode */
@@ -759,7 +759,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
}
/* Remaining instructions only emulated from guest kernel. */
- if ( !KERNEL_MODE(ed, regs) )
+ if ( !KERNEL_MODE(v, regs) )
goto fail;
/* Privileged (ring 0) instructions. */
@@ -772,7 +772,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0x09: /* WBINVD */
/* Ignore the instruction if unprivileged. */
- if ( !IS_CAPABLE_PHYSDEV(ed->domain) )
+ if ( !IS_CAPABLE_PHYSDEV(v->domain) )
DPRINTK("Non-physdev domain attempted WBINVD.\n");
else
wbinvd();
@@ -789,15 +789,15 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0: /* Read CR0 */
*reg =
(read_cr0() & ~X86_CR0_TS) |
- (test_bit(_VCPUF_guest_stts, &ed->vcpu_flags) ? X86_CR0_TS:0);
+ (test_bit(_VCPUF_guest_stts, &v->vcpu_flags) ? X86_CR0_TS:0);
break;
case 2: /* Read CR2 */
- *reg = ed->arch.guest_cr2;
+ *reg = v->arch.guest_cr2;
break;
case 3: /* Read CR3 */
- *reg = pagetable_get_paddr(ed->arch.guest_table);
+ *reg = pagetable_get_paddr(v->arch.guest_table);
break;
default:
@@ -818,13 +818,13 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
break;
case 2: /* Write CR2 */
- ed->arch.guest_cr2 = *reg;
+ v->arch.guest_cr2 = *reg;
break;
case 3: /* Write CR3 */
- LOCK_BIGLOCK(ed->domain);
+ LOCK_BIGLOCK(v->domain);
(void)new_guest_cr3(*reg);
- UNLOCK_BIGLOCK(ed->domain);
+ UNLOCK_BIGLOCK(v->domain);
break;
default:
@@ -834,7 +834,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0x30: /* WRMSR */
/* Ignore the instruction if unprivileged. */
- if ( !IS_PRIV(ed->domain) )
+ if ( !IS_PRIV(v->domain) )
DPRINTK("Non-priv domain attempted WRMSR(%p,%08lx,%08lx).\n",
_p(regs->ecx), (long)regs->eax, (long)regs->edx);
else if ( wrmsr_user(regs->ecx, regs->eax, regs->edx) )
@@ -842,7 +842,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
break;
case 0x32: /* RDMSR */
- if ( !IS_PRIV(ed->domain) )
+ if ( !IS_PRIV(v->domain) )
DPRINTK("Non-priv domain attempted RDMSR(%p,%08lx,%08lx).\n",
_p(regs->ecx), (long)regs->eax, (long)regs->edx);
/* Everyone can read the MSR space. */
@@ -864,8 +864,8 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
asmlinkage int do_general_protection(struct cpu_user_regs *regs)
{
- struct exec_domain *ed = current;
- struct trap_bounce *tb = &ed->arch.trap_bounce;
+ struct vcpu *v = current;
+ struct trap_bounce *tb = &v->arch.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
@@ -901,7 +901,7 @@ asmlinkage int do_general_protection(struct cpu_user_regs *regs)
{
/* This fault must be due to <INT n> instruction. */
ti = &current->arch.guest_context.trap_ctxt[regs->error_code>>3];
- if ( PERMIT_SOFTINT(TI_GET_DPL(ti), ed, regs) )
+ if ( PERMIT_SOFTINT(TI_GET_DPL(ti), v, regs) )
{
tb->flags = TBF_EXCEPTION;
regs->eip += 2;
@@ -915,7 +915,7 @@ asmlinkage int do_general_protection(struct cpu_user_regs *regs)
return 0;
#if defined(__i386__)
- if ( VM_ASSIST(ed->domain, VMASST_TYPE_4gb_segments) &&
+ if ( VM_ASSIST(v->domain, VMASST_TYPE_4gb_segments) &&
(regs->error_code == 0) &&
gpf_emulate_4gb(regs) )
return 0;
@@ -958,10 +958,10 @@ static void nmi_softirq(void)
return;
if ( test_and_clear_bit(0, &nmi_softirq_reason) )
- send_guest_virq(dom0->exec_domain[0], VIRQ_PARITY_ERR);
+ send_guest_virq(dom0->vcpu[0], VIRQ_PARITY_ERR);
if ( test_and_clear_bit(1, &nmi_softirq_reason) )
- send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
+ send_guest_virq(dom0->vcpu[0], VIRQ_IO_ERR);
}
asmlinkage void mem_parity_error(struct cpu_user_regs *regs)
@@ -1045,14 +1045,14 @@ asmlinkage int math_state_restore(struct cpu_user_regs *regs)
asmlinkage int do_debug(struct cpu_user_regs *regs)
{
unsigned long condition;
- struct exec_domain *ed = current;
- struct trap_bounce *tb = &ed->arch.trap_bounce;
+ struct vcpu *v = current;
+ struct trap_bounce *tb = &v->arch.trap_bounce;
__asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
/* Mask out spurious debug traps due to lazy DR7 setting */
if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
- (ed->arch.guest_context.debugreg[7] == 0) )
+ (v->arch.guest_context.debugreg[7] == 0) )
{
__asm__("mov %0,%%db7" : : "r" (0UL));
goto out;
@@ -1074,11 +1074,11 @@ asmlinkage int do_debug(struct cpu_user_regs *regs)
}
/* Save debug status register where guest OS can peek at it */
- ed->arch.guest_context.debugreg[6] = condition;
+ v->arch.guest_context.debugreg[6] = condition;
tb->flags = TBF_EXCEPTION;
- tb->cs = ed->arch.guest_context.trap_ctxt[TRAP_debug].cs;
- tb->eip = ed->arch.guest_context.trap_ctxt[TRAP_debug].address;
+ tb->cs = v->arch.guest_context.trap_ctxt[TRAP_debug].cs;
+ tb->eip = v->arch.guest_context.trap_ctxt[TRAP_debug].address;
out:
return EXCRET_not_a_fault;
@@ -1208,7 +1208,7 @@ long do_set_trap_table(trap_info_t *traps)
}
-long set_debugreg(struct exec_domain *p, int reg, unsigned long value)
+long set_debugreg(struct vcpu *p, int reg, unsigned long value)
{
int i;
diff --git a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
index 09088ab2de..8aa476f3c6 100644
--- a/xen/arch/x86/vmx.c
+++ b/xen/arch/x86/vmx.c
@@ -243,7 +243,7 @@ static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_reg
{
unsigned int reg;
unsigned long *reg_p = 0;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
unsigned long eip;
__vmread(GUEST_EIP, &eip);
@@ -272,18 +272,18 @@ static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_reg
case TYPE_MOV_TO_DR:
/* don't need to check the range */
if (reg != REG_ESP)
- ed->arch.guest_context.debugreg[reg] = *reg_p;
+ v->arch.guest_context.debugreg[reg] = *reg_p;
else {
unsigned long value;
__vmread(GUEST_ESP, &value);
- ed->arch.guest_context.debugreg[reg] = value;
+ v->arch.guest_context.debugreg[reg] = value;
}
break;
case TYPE_MOV_FROM_DR:
if (reg != REG_ESP)
- *reg_p = ed->arch.guest_context.debugreg[reg];
+ *reg_p = v->arch.guest_context.debugreg[reg];
else {
- __vmwrite(GUEST_ESP, ed->arch.guest_context.debugreg[reg]);
+ __vmwrite(GUEST_ESP, v->arch.guest_context.debugreg[reg]);
}
break;
}
@@ -296,7 +296,7 @@ static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_reg
static void vmx_vmexit_do_invlpg(unsigned long va)
{
unsigned long eip;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
__vmread(GUEST_EIP, &eip);
@@ -307,7 +307,7 @@ static void vmx_vmexit_do_invlpg(unsigned long va)
* We do the safest things first, then try to update the shadow
* copying from guest
*/
- shadow_invlpg(ed, va);
+ shadow_invlpg(v, va);
}
static int check_for_null_selector(unsigned long eip)
@@ -362,7 +362,7 @@ static int check_for_null_selector(unsigned long eip)
static void vmx_io_instruction(struct cpu_user_regs *regs,
unsigned long exit_qualification, unsigned long inst_len)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
vcpu_iodata_t *vio;
ioreq_t *p;
unsigned long addr;
@@ -489,7 +489,7 @@ vmx_copy(void *buf, unsigned long laddr, int size, int dir)
}
int
-vmx_world_save(struct exec_domain *d, struct vmx_assist_context *c)
+vmx_world_save(struct vcpu *d, struct vmx_assist_context *c)
{
unsigned long inst_len;
int error = 0;
@@ -554,7 +554,7 @@ vmx_world_save(struct exec_domain *d, struct vmx_assist_context *c)
}
int
-vmx_world_restore(struct exec_domain *d, struct vmx_assist_context *c)
+vmx_world_restore(struct vcpu *d, struct vmx_assist_context *c)
{
unsigned long mfn, old_cr4;
int error = 0;
@@ -664,7 +664,7 @@ skip_cr3:
enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
int
-vmx_assist(struct exec_domain *d, int mode)
+vmx_assist(struct vcpu *d, int mode)
{
struct vmx_assist_context c;
unsigned long magic, cp;
@@ -731,7 +731,7 @@ error:
static int vmx_set_cr0(unsigned long value)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
unsigned long old_base_mfn, mfn;
unsigned long eip;
int paging_enabled;
@@ -821,7 +821,7 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
{
unsigned long value;
unsigned long old_cr;
- struct exec_domain *d = current;
+ struct vcpu *d = current;
switch (gp) {
CASE_GET_REG(EAX, eax);
@@ -937,7 +937,7 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
{
unsigned long value;
- struct exec_domain *d = current;
+ struct vcpu *d = current;
if (cr != 3)
__vmx_bug(regs);
@@ -1046,7 +1046,7 @@ static inline void vmx_vmexit_do_mwait(void)
char print_buf[BUF_SIZ];
static int index;
-static void vmx_print_line(const char c, struct exec_domain *d)
+static void vmx_print_line(const char c, struct vcpu *d)
{
if (index == MAX_LINE || c == '\n') {
@@ -1109,7 +1109,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
{
unsigned int exit_reason, idtv_info_field;
unsigned long exit_qualification, eip, inst_len = 0;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
int error;
if ((error = __vmread(VM_EXIT_REASON, &exit_reason)))
@@ -1143,7 +1143,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
}
__vmread(GUEST_EIP, &eip);
- TRACE_3D(TRC_VMX_VMEXIT, ed->domain->domain_id, eip, exit_reason);
+ TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
@@ -1164,7 +1164,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
perfc_incra(cause_vector, vector);
- TRACE_3D(TRC_VMX_VECTOR, ed->domain->domain_id, eip, vector);
+ TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector);
switch (vector) {
#ifdef XEN_DEBUGGER
case TRAP_debug:
@@ -1216,7 +1216,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
(unsigned long)regs.eax, (unsigned long)regs.ebx,
(unsigned long)regs.ecx, (unsigned long)regs.edx,
(unsigned long)regs.esi, (unsigned long)regs.edi);
- ed->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
+ v->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = &regs;
if (!(error = vmx_do_page_fault(va, &regs))) {
/*
@@ -1230,8 +1230,8 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
TRAP_page_fault);
__vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
__vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, regs.error_code);
- ed->arch.arch_vmx.cpu_cr2 = va;
- TRACE_3D(TRC_VMX_INT, ed->domain->domain_id, TRAP_page_fault, va);
+ v->arch.arch_vmx.cpu_cr2 = va;
+ TRACE_3D(TRC_VMX_INT, v->domain->domain_id, TRAP_page_fault, va);
}
break;
}
@@ -1300,7 +1300,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
__vmread(GUEST_EIP, &eip);
__vmread(EXIT_QUALIFICATION, &exit_qualification);
- vmx_print_line(regs.eax, ed); /* provides the current domain */
+ vmx_print_line(regs.eax, v); /* provides the current domain */
__update_guest_eip(inst_len);
break;
case EXIT_REASON_CR_ACCESS:
@@ -1348,13 +1348,13 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
__vmx_bug(&regs); /* should not happen */
}
- vmx_intr_assist(ed);
+ vmx_intr_assist(v);
return;
}
asmlinkage void load_cr2(void)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
local_irq_disable();
#ifdef __i386__
diff --git a/xen/arch/x86/vmx_intercept.c b/xen/arch/x86/vmx_intercept.c
index d694811b87..cd7e464904 100644
--- a/xen/arch/x86/vmx_intercept.c
+++ b/xen/arch/x86/vmx_intercept.c
@@ -34,7 +34,7 @@
/* for intercepting io request after vm_exit, return value: 0--not handle; 1--handled */
int vmx_io_intercept(ioreq_t *p)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
struct vmx_handler_t *handler = &(d->arch.arch_vmx.vmx_platform.vmx_handler);
int i;
unsigned long addr, offset;
@@ -50,7 +50,7 @@ int vmx_io_intercept(ioreq_t *p)
int register_io_handler(unsigned long addr, unsigned long offset, intercept_action_t action)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
struct vmx_handler_t *handler = &(d->arch.arch_vmx.vmx_platform.vmx_handler);
int num = handler->num_slot;
@@ -162,7 +162,7 @@ static void resume_pit_io(ioreq_t *p)
/* the intercept action for PIT DM retval:0--not handled; 1--handled */
int intercept_pit_io(ioreq_t *p)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
struct vmx_virpit_t *vpit = &(d->arch.arch_vmx.vmx_platform.vmx_pit);
if (p->size != 1 ||
@@ -204,7 +204,7 @@ static void pit_timer_fn(void *data)
/* Only some PIT operations such as load init counter need a hypervisor hook.
* leave all other operations in user space DM
*/
-void vmx_hooks_assist(struct exec_domain *d)
+void vmx_hooks_assist(struct vcpu *d)
{
vcpu_iodata_t *vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va;
ioreq_t *p = &vio->vp_ioreq;
diff --git a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c
index 8b28217489..95fa95aec8 100644
--- a/xen/arch/x86/vmx_io.c
+++ b/xen/arch/x86/vmx_io.c
@@ -180,7 +180,7 @@ static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *r
}
#endif
-void vmx_io_assist(struct exec_domain *ed)
+void vmx_io_assist(struct vcpu *v)
{
vcpu_iodata_t *vio;
ioreq_t *p;
@@ -190,10 +190,10 @@ void vmx_io_assist(struct exec_domain *ed)
struct mi_per_cpu_info *mpci_p;
struct cpu_user_regs *inst_decoder_regs;
- mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci;
+ mpci_p = &v->arch.arch_vmx.vmx_platform.mpci;
inst_decoder_regs = mpci_p->inst_decoder_regs;
- vio = (vcpu_iodata_t *) ed->arch.arch_vmx.vmx_platform.shared_page_va;
+ vio = (vcpu_iodata_t *) v->arch.arch_vmx.vmx_platform.shared_page_va;
if (vio == 0) {
VMX_DBG_LOG(DBG_LEVEL_1,
"bad shared page: %lx", (unsigned long) vio);
@@ -202,18 +202,18 @@ void vmx_io_assist(struct exec_domain *ed)
p = &vio->vp_ioreq;
if (p->state == STATE_IORESP_HOOK){
- vmx_hooks_assist(ed);
+ vmx_hooks_assist(v);
}
/* clear IO wait VMX flag */
- if (test_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags)) {
+ if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
if (p->state != STATE_IORESP_READY) {
/* An interrupt send event raced us */
return;
} else {
p->state = STATE_INVALID;
}
- clear_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags);
+ clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
} else {
return;
}
@@ -229,10 +229,10 @@ void vmx_io_assist(struct exec_domain *ed)
}
int size = -1, index = -1;
- size = operand_size(ed->arch.arch_vmx.vmx_platform.mpci.mmio_target);
- index = operand_index(ed->arch.arch_vmx.vmx_platform.mpci.mmio_target);
+ size = operand_size(v->arch.arch_vmx.vmx_platform.mpci.mmio_target);
+ index = operand_index(v->arch.arch_vmx.vmx_platform.mpci.mmio_target);
- if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
+ if (v->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
p->u.data = p->u.data & 0xffff;
}
set_reg_value(size, index, 0, regs, p->u.data);
@@ -273,17 +273,17 @@ void vmx_io_assist(struct exec_domain *ed)
}
}
-int vmx_clear_pending_io_event(struct exec_domain *ed)
+int vmx_clear_pending_io_event(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
/* evtchn_pending is shared by other event channels in 0-31 range */
if (!d->shared_info->evtchn_pending[IOPACKET_PORT>>5])
- clear_bit(IOPACKET_PORT>>5, &ed->vcpu_info->evtchn_pending_sel);
+ clear_bit(IOPACKET_PORT>>5, &v->vcpu_info->evtchn_pending_sel);
/* Note: VMX domains may need upcalls as well */
- if (!ed->vcpu_info->evtchn_pending_sel)
- ed->vcpu_info->evtchn_upcall_pending = 0;
+ if (!v->vcpu_info->evtchn_pending_sel)
+ v->vcpu_info->evtchn_upcall_pending = 0;
/* clear the pending bit for IOPACKET_PORT */
return test_and_clear_bit(IOPACKET_PORT,
@@ -296,7 +296,7 @@ int vmx_clear_pending_io_event(struct exec_domain *ed)
* interrupts are guaranteed to be checked before resuming guest.
* VMX upcalls have been already arranged for if necessary.
*/
-void vmx_check_events(struct exec_domain *d)
+void vmx_check_events(struct vcpu *d)
{
/* clear the event *before* checking for work. This should avoid
the set-and-check races */
@@ -384,7 +384,7 @@ static __inline__ int find_highest_irq(u32 *pintr)
* Return 0-255 for pending irq.
* -1 when no pending.
*/
-static inline int find_highest_pending_irq(struct exec_domain *d)
+static inline int find_highest_pending_irq(struct vcpu *d)
{
vcpu_iodata_t *vio;
@@ -398,7 +398,7 @@ static inline int find_highest_pending_irq(struct exec_domain *d)
return find_highest_irq((unsigned int *)&vio->vp_intr[0]);
}
-static inline void clear_highest_bit(struct exec_domain *d, int vector)
+static inline void clear_highest_bit(struct vcpu *d, int vector)
{
vcpu_iodata_t *vio;
@@ -417,7 +417,7 @@ static inline int irq_masked(unsigned long eflags)
return ((eflags & X86_EFLAGS_IF) == 0);
}
-void vmx_intr_assist(struct exec_domain *d)
+void vmx_intr_assist(struct vcpu *d)
{
int highest_vector = find_highest_pending_irq(d);
unsigned long intr_fields, eflags;
@@ -463,7 +463,7 @@ void vmx_intr_assist(struct exec_domain *d)
return;
}
-void vmx_do_resume(struct exec_domain *d)
+void vmx_do_resume(struct vcpu *d)
{
vmx_stts();
if ( vmx_paging_enabled(d) )
diff --git a/xen/arch/x86/vmx_platform.c b/xen/arch/x86/vmx_platform.c
index 47f4ffd9fb..7d976a42e8 100644
--- a/xen/arch/x86/vmx_platform.c
+++ b/xen/arch/x86/vmx_platform.c
@@ -481,7 +481,7 @@ static int read_from_mmio(struct instruction *inst_p)
static void send_mmio_req(unsigned long gpa,
struct instruction *inst_p, long value, int dir, int pvalid)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
vcpu_iodata_t *vio;
ioreq_t *p;
int vm86;
diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
index fd332873e9..fa01316aa2 100644
--- a/xen/arch/x86/vmx_vmcs.c
+++ b/xen/arch/x86/vmx_vmcs.c
@@ -103,7 +103,7 @@ struct host_execution_env {
#define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
-int vmx_setup_platform(struct exec_domain *d, struct cpu_user_regs *regs)
+int vmx_setup_platform(struct vcpu *d, struct cpu_user_regs *regs)
{
int i;
unsigned int n;
@@ -157,7 +157,7 @@ int vmx_setup_platform(struct exec_domain *d, struct cpu_user_regs *regs)
return 0;
}
-void vmx_do_launch(struct exec_domain *ed)
+void vmx_do_launch(struct vcpu *v)
{
/* Update CR3, GDT, LDT, TR */
unsigned int tr, cpu, error = 0;
@@ -168,14 +168,14 @@ void vmx_do_launch(struct exec_domain *ed)
struct cpu_user_regs *regs = guest_cpu_user_regs();
vmx_stts();
- set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
+ set_bit(_VCPUF_guest_stts, &v->vcpu_flags);
cpu = smp_processor_id();
page = (struct pfn_info *) alloc_domheap_page(NULL);
pfn = (unsigned long) (page - frame_table);
- vmx_setup_platform(ed, regs);
+ vmx_setup_platform(v, regs);
__asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory");
host_env.gdtr_limit = desc.size;
@@ -197,11 +197,11 @@ void vmx_do_launch(struct exec_domain *ed)
error |= __vmwrite(GUEST_TR_BASE, 0);
error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
- __vmwrite(GUEST_CR3, pagetable_get_paddr(ed->arch.guest_table));
- __vmwrite(HOST_CR3, pagetable_get_paddr(ed->arch.monitor_table));
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.guest_table));
+ __vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
__vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
- ed->arch.schedule_tail = arch_vmx_do_resume;
+ v->arch.schedule_tail = arch_vmx_do_resume;
}
/*
diff --git a/xen/arch/x86/x86_32/asm-offsets.c b/xen/arch/x86/x86_32/asm-offsets.c
index c4444a5cb3..2a9f84ba4c 100644
--- a/xen/arch/x86/x86_32/asm-offsets.c
+++ b/xen/arch/x86/x86_32/asm-offsets.c
@@ -49,21 +49,21 @@ void __dummy__(void)
DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
BLANK();
- OFFSET(EDOMAIN_processor, struct exec_domain, processor);
- OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
- OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
- OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
- OFFSET(EDOMAIN_event_sel, struct exec_domain,
+ OFFSET(VCPU_processor, struct vcpu, processor);
+ OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
+ OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
+ OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
+ OFFSET(VCPU_event_sel, struct vcpu,
arch.guest_context.event_callback_cs);
- OFFSET(EDOMAIN_event_addr, struct exec_domain,
+ OFFSET(VCPU_event_addr, struct vcpu,
arch.guest_context.event_callback_eip);
- OFFSET(EDOMAIN_failsafe_sel, struct exec_domain,
+ OFFSET(VCPU_failsafe_sel, struct vcpu,
arch.guest_context.failsafe_callback_cs);
- OFFSET(EDOMAIN_failsafe_addr, struct exec_domain,
+ OFFSET(VCPU_failsafe_addr, struct vcpu,
arch.guest_context.failsafe_callback_eip);
- OFFSET(EDOMAIN_kernel_ss, struct exec_domain,
+ OFFSET(VCPU_kernel_ss, struct vcpu,
arch.guest_context.kernel_ss);
- OFFSET(EDOMAIN_kernel_sp, struct exec_domain,
+ OFFSET(VCPU_kernel_sp, struct vcpu,
arch.guest_context.kernel_sp);
BLANK();
diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S
index 279925a1ce..622833ed33 100644
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -139,7 +139,7 @@ vmx_test_all_events:
notl %ecx
cli # tests must not race interrupts
/*test_softirqs:*/
- movl EDOMAIN_processor(%ebx),%eax
+ movl VCPU_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
test %ecx,irq_stat(%eax,1)
jnz vmx_process_softirqs
@@ -216,10 +216,10 @@ DBLFLT1:GET_CURRENT(%ebx)
jmp test_all_events
failsafe_callback:
GET_CURRENT(%ebx)
- leal EDOMAIN_trap_bounce(%ebx),%edx
- movl EDOMAIN_failsafe_addr(%ebx),%eax
+ leal VCPU_trap_bounce(%ebx),%edx
+ movl VCPU_failsafe_addr(%ebx),%eax
movl %eax,TRAPBOUNCE_eip(%edx)
- movl EDOMAIN_failsafe_sel(%ebx),%eax
+ movl VCPU_failsafe_sel(%ebx),%eax
movw %ax,TRAPBOUNCE_cs(%edx)
movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
@@ -269,22 +269,22 @@ test_all_events:
notl %ecx
cli # tests must not race interrupts
/*test_softirqs:*/
- movl EDOMAIN_processor(%ebx),%eax
+ movl VCPU_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
test %ecx,irq_stat(%eax,1)
jnz process_softirqs
/*test_guest_events:*/
- movl EDOMAIN_vcpu_info(%ebx),%eax
+ movl VCPU_vcpu_info(%ebx),%eax
testb $0xFF,VCPUINFO_upcall_mask(%eax)
jnz restore_all_guest
testb $0xFF,VCPUINFO_upcall_pending(%eax)
jz restore_all_guest
/*process_guest_events:*/
sti
- leal EDOMAIN_trap_bounce(%ebx),%edx
- movl EDOMAIN_event_addr(%ebx),%eax
+ leal VCPU_trap_bounce(%ebx),%edx
+ movl VCPU_event_addr(%ebx),%eax
movl %eax,TRAPBOUNCE_eip(%edx)
- movl EDOMAIN_event_sel(%ebx),%eax
+ movl VCPU_event_sel(%ebx),%eax
movw %ax,TRAPBOUNCE_cs(%edx)
movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
@@ -298,15 +298,15 @@ process_softirqs:
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
/* {EIP, CS, EFLAGS, [ESP, SS]} */
-/* %edx == trap_bounce, %ebx == struct exec_domain */
+/* %edx == trap_bounce, %ebx == struct vcpu */
/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
create_bounce_frame:
movl UREGS_eflags+4(%esp),%ecx
movb UREGS_cs+4(%esp),%cl
testl $(2|X86_EFLAGS_VM),%ecx
jz ring1 /* jump if returning to an existing ring-1 activation */
- movl EDOMAIN_kernel_sp(%ebx),%esi
-FLT6: movl EDOMAIN_kernel_ss(%ebx),%gs
+ movl VCPU_kernel_sp(%ebx),%esi
+FLT6: movl VCPU_kernel_ss(%ebx),%gs
testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
jz nvm86_1
subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */
@@ -334,7 +334,7 @@ FLT13: movl UREGS_ss+4(%esp),%gs
test %eax,%eax
jz domain_crash_synchronous
FLT14: movl %eax,%gs:(%esi)
- movl EDOMAIN_vcpu_info(%ebx),%eax
+ movl VCPU_vcpu_info(%ebx),%eax
pushl VCPUINFO_upcall_mask(%eax)
testb $TBF_INTERRUPT,%cl
setnz VCPUINFO_upcall_mask(%eax) # TBF_INTERRUPT -> clear upcall mask
@@ -407,7 +407,7 @@ nvm86_3:/* Rewrite our stack frame and return to ring 1. */
ALIGN
process_guest_exception_and_events:
- leal EDOMAIN_trap_bounce(%ebx),%edx
+ leal VCPU_trap_bounce(%ebx),%edx
testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
jz test_all_events
call create_bounce_frame
diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c
index b6786407cb..89537fe7af 100644
--- a/xen/arch/x86/x86_32/mm.c
+++ b/xen/arch/x86/x86_32/mm.c
@@ -70,7 +70,7 @@ void __init paging_init(void)
printk("PAE disabled.\n");
#endif
- idle0_exec_domain.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
+ idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
/*
* Allocate and map the machine-to-phys table and create read-only mapping
diff --git a/xen/arch/x86/x86_32/seg_fixup.c b/xen/arch/x86/x86_32/seg_fixup.c
index b15eb9c3f6..9931825f60 100644
--- a/xen/arch/x86/x86_32/seg_fixup.c
+++ b/xen/arch/x86/x86_32/seg_fixup.c
@@ -108,7 +108,7 @@ static unsigned char insn_decode[256] = {
*/
int get_baselimit(u16 seg, unsigned long *base, unsigned long *limit)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
unsigned long *table, a, b;
int ldt = !!(seg & 4);
int idx = (seg >> 3) & 8191;
@@ -174,7 +174,7 @@ int linearise_address(u16 seg, unsigned long off, unsigned long *linear)
int fixup_seg(u16 seg, unsigned long offset)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
unsigned long *table, a, b, base, limit;
int ldt = !!(seg & 4);
int idx = (seg >> 3) & 8191;
@@ -267,7 +267,7 @@ int fixup_seg(u16 seg, unsigned long offset)
*/
int gpf_emulate_4gb(struct cpu_user_regs *regs)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
trap_info_t *ti;
struct trap_bounce *tb;
u8 modrm, mod, reg, rm, decode;
diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c
index 0507f3be0a..b950707989 100644
--- a/xen/arch/x86/x86_32/traps.c
+++ b/xen/arch/x86/x86_32/traps.c
@@ -196,9 +196,9 @@ void __init percpu_traps_init(void)
set_task_gate(TRAP_double_fault, __DOUBLEFAULT_TSS_ENTRY<<3);
}
-void init_int80_direct_trap(struct exec_domain *ed)
+void init_int80_direct_trap(struct vcpu *v)
{
- trap_info_t *ti = &ed->arch.guest_context.trap_ctxt[0x80];
+ trap_info_t *ti = &v->arch.guest_context.trap_ctxt[0x80];
/*
* We can't virtualise interrupt gates, as there's no way to get
@@ -207,12 +207,12 @@ void init_int80_direct_trap(struct exec_domain *ed)
if ( TI_GET_IF(ti) )
return;
- ed->arch.int80_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
- ed->arch.int80_desc.b =
+ v->arch.int80_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
+ v->arch.int80_desc.b =
(ti->address & 0xffff0000) | 0x8f00 | ((TI_GET_DPL(ti) & 3) << 13);
- if ( ed == current )
- set_int80_direct_trap(ed);
+ if ( v == current )
+ set_int80_direct_trap(v);
}
long do_set_callbacks(unsigned long event_selector,
@@ -220,7 +220,7 @@ long do_set_callbacks(unsigned long event_selector,
unsigned long failsafe_selector,
unsigned long failsafe_address)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
return -EPERM;
diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
index ff2855aeee..d19610a99f 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -53,17 +53,17 @@ void __dummy__(void)
DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
BLANK();
- OFFSET(EDOMAIN_processor, struct exec_domain, processor);
- OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
- OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce);
- OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags);
- OFFSET(EDOMAIN_event_addr, struct exec_domain,
+ OFFSET(VCPU_processor, struct vcpu, processor);
+ OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
+ OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
+ OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
+ OFFSET(VCPU_event_addr, struct vcpu,
arch.guest_context.event_callback_eip);
- OFFSET(EDOMAIN_failsafe_addr, struct exec_domain,
+ OFFSET(VCPU_failsafe_addr, struct vcpu,
arch.guest_context.failsafe_callback_eip);
- OFFSET(EDOMAIN_syscall_addr, struct exec_domain,
+ OFFSET(VCPU_syscall_addr, struct vcpu,
arch.guest_context.syscall_callback_eip);
- OFFSET(EDOMAIN_kernel_sp, struct exec_domain,
+ OFFSET(VCPU_kernel_sp, struct vcpu,
arch.guest_context.kernel_sp);
BLANK();
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index ed2e41dc6f..fcf9201e4e 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -19,15 +19,15 @@
movq (reg),reg;
ALIGN
-/* %rbx: struct exec_domain, interrupts disabled */
+/* %rbx: struct vcpu, interrupts disabled */
switch_to_kernel:
- leaq EDOMAIN_trap_bounce(%rbx),%rdx
- movq EDOMAIN_syscall_addr(%rbx),%rax
+ leaq VCPU_trap_bounce(%rbx),%rdx
+ movq VCPU_syscall_addr(%rbx),%rax
movq %rax,TRAPBOUNCE_eip(%rdx)
movw $0,TRAPBOUNCE_flags(%rdx)
call create_bounce_frame
-/* %rbx: struct exec_domain */
+/* %rbx: struct vcpu */
restore_all_guest:
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
@@ -68,8 +68,8 @@ DBLFLT1:GET_CURRENT(%rbx)
jmp test_all_events
failsafe_callback:
GET_CURRENT(%rbx)
- leaq EDOMAIN_trap_bounce(%rbx),%rdx
- movq EDOMAIN_failsafe_addr(%rbx),%rax
+ leaq VCPU_trap_bounce(%rbx),%rdx
+ movq VCPU_failsafe_addr(%rbx),%rax
movq %rax,TRAPBOUNCE_eip(%rdx)
movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
call create_bounce_frame
@@ -113,7 +113,7 @@ ENTRY(syscall_enter)
movl $TRAP_syscall,4(%rsp)
SAVE_ALL
GET_CURRENT(%rbx)
- testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
+ testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
jz switch_to_kernel
/*hypercall:*/
@@ -125,25 +125,25 @@ ENTRY(syscall_enter)
callq *(%r10,%rax,8)
movq %rax,UREGS_rax(%rsp) # save the return value
-/* %rbx: struct exec_domain */
+/* %rbx: struct vcpu */
test_all_events:
cli # tests must not race interrupts
/*test_softirqs:*/
- movl EDOMAIN_processor(%rbx),%eax
+ movl VCPU_processor(%rbx),%eax
shl $IRQSTAT_shift,%rax
leaq irq_stat(%rip),%rcx
testl $~0,(%rcx,%rax,1)
jnz process_softirqs
/*test_guest_events:*/
- movq EDOMAIN_vcpu_info(%rbx),%rax
+ movq VCPU_vcpu_info(%rbx),%rax
testb $0xFF,VCPUINFO_upcall_mask(%rax)
jnz restore_all_guest
testb $0xFF,VCPUINFO_upcall_pending(%rax)
jz restore_all_guest
/*process_guest_events:*/
sti
- leaq EDOMAIN_trap_bounce(%rbx),%rdx
- movq EDOMAIN_event_addr(%rbx),%rax
+ leaq VCPU_trap_bounce(%rbx),%rdx
+ movq VCPU_event_addr(%rbx),%rax
movq %rax,TRAPBOUNCE_eip(%rdx)
movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
call create_bounce_frame
@@ -231,7 +231,7 @@ vmx_test_all_events:
/* test_all_events: */
cli # tests must not race interrupts
/*test_softirqs:*/
- movl EDOMAIN_processor(%rbx),%eax
+ movl VCPU_processor(%rbx),%eax
shl $IRQSTAT_shift,%rax
leaq irq_stat(%rip), %rdx
testl $~0,(%rdx,%rax,1)
@@ -274,7 +274,7 @@ vmx_process_softirqs:
#endif
ALIGN
-/* %rbx: struct exec_domain */
+/* %rbx: struct vcpu */
process_softirqs:
sti
call do_softirq
@@ -282,17 +282,17 @@ process_softirqs:
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
/* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
-/* %rdx: trap_bounce, %rbx: struct exec_domain */
+/* %rdx: trap_bounce, %rbx: struct vcpu */
/* On return only %rbx is guaranteed non-clobbered. */
create_bounce_frame:
- testb $TF_kernel_mode,EDOMAIN_thread_flags(%rbx)
+ testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
jnz 1f
/* Push new frame at registered guest-OS stack base. */
pushq %rdx
movq %rbx,%rdi
call toggle_guest_mode
popq %rdx
- movq EDOMAIN_kernel_sp(%rbx),%rsi
+ movq VCPU_kernel_sp(%rbx),%rsi
jmp 2f
1: /* In kernel context already: push new frame at existing %rsp. */
movq UREGS_rsp+8(%rsp),%rsi
@@ -311,7 +311,7 @@ FLT2: movq %rax,32(%rsi) # SS
FLT3: movq %rax,24(%rsi) # RSP
movq UREGS_eflags+8(%rsp),%rax
FLT4: movq %rax,16(%rsi) # RFLAGS
- movq EDOMAIN_vcpu_info(%rbx),%rax
+ movq VCPU_vcpu_info(%rbx),%rax
pushq VCPUINFO_upcall_mask(%rax)
testb $TBF_INTERRUPT,%cl
setnz VCPUINFO_upcall_mask(%rax)# TBF_INTERRUPT -> clear upcall mask
@@ -371,9 +371,9 @@ FLT14: movq %rax,(%rsi) # RCX
.previous
ALIGN
-/* %rbx: struct exec_domain */
+/* %rbx: struct vcpu */
process_guest_exception_and_events:
- leaq EDOMAIN_trap_bounce(%rbx),%rdx
+ leaq VCPU_trap_bounce(%rbx),%rdx
testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
jz test_all_events
call create_bounce_frame
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index fcc570ad7d..66d3e96d61 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -79,7 +79,7 @@ void __init paging_init(void)
l2_pgentry_t *l2_ro_mpt;
struct pfn_info *pg;
- idle0_exec_domain.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
+ idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
/* Create user-accessible L2 directory to map the MPT for guests. */
l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page();
@@ -182,7 +182,7 @@ long do_stack_switch(unsigned long ss, unsigned long esp)
long do_set_segment_base(unsigned int which, unsigned long base)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
long ret = 0;
switch ( which )
@@ -191,21 +191,21 @@ long do_set_segment_base(unsigned int which, unsigned long base)
if ( wrmsr_user(MSR_FS_BASE, base, base>>32) )
ret = -EFAULT;
else
- ed->arch.guest_context.fs_base = base;
+ v->arch.guest_context.fs_base = base;
break;
case SEGBASE_GS_USER:
if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) )
ret = -EFAULT;
else
- ed->arch.guest_context.gs_base_user = base;
+ v->arch.guest_context.gs_base_user = base;
break;
case SEGBASE_GS_KERNEL:
if ( wrmsr_user(MSR_GS_BASE, base, base>>32) )
ret = -EFAULT;
else
- ed->arch.guest_context.gs_base_kernel = base;
+ v->arch.guest_context.gs_base_kernel = base;
break;
case SEGBASE_GS_USER_SEL:
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 6fc5b32fea..0d7429cf14 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -182,7 +182,7 @@ long do_set_callbacks(unsigned long event_address,
unsigned long failsafe_address,
unsigned long syscall_address)
{
- struct exec_domain *d = current;
+ struct vcpu *d = current;
d->arch.guest_context.event_callback_eip = event_address;
d->arch.guest_context.failsafe_callback_eip = failsafe_address;
diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
index 74830d3dc7..89654c7c45 100644
--- a/xen/common/dom0_ops.c
+++ b/xen/common/dom0_ops.c
@@ -22,7 +22,7 @@
extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
extern void arch_getdomaininfo_ctxt(
- struct exec_domain *, struct vcpu_guest_context *);
+ struct vcpu *, struct vcpu_guest_context *);
static inline int is_free_domid(domid_t dom)
{
@@ -153,11 +153,11 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
case DOM0_CREATEDOMAIN:
{
- struct domain *d;
- unsigned int pro;
- domid_t dom;
- struct exec_domain *ed;
- unsigned int i, cnt[NR_CPUS] = { 0 };
+ struct domain *d;
+ unsigned int pro;
+ domid_t dom;
+ struct vcpu *v;
+ unsigned int i, cnt[NR_CPUS] = { 0 };
dom = op->u.createdomain.domain;
@@ -175,8 +175,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
/* Do an initial CPU placement. Pick the least-populated CPU. */
read_lock(&domlist_lock);
for_each_domain ( d )
- for_each_exec_domain ( d, ed )
- cnt[ed->processor]++;
+ for_each_vcpu ( d, v )
+ cnt[v->processor]++;
read_unlock(&domlist_lock);
/*
@@ -221,7 +221,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
{
domid_t dom = op->u.pincpudomain.domain;
struct domain *d = find_domain_by_id(dom);
- struct exec_domain *ed;
+ struct vcpu *v;
cpumap_t cpumap;
@@ -232,22 +232,22 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
}
if ( (op->u.pincpudomain.vcpu >= MAX_VIRT_CPUS) ||
- !d->exec_domain[op->u.pincpudomain.vcpu] )
+ !d->vcpu[op->u.pincpudomain.vcpu] )
{
ret = -EINVAL;
put_domain(d);
break;
}
- ed = d->exec_domain[op->u.pincpudomain.vcpu];
- if ( ed == NULL )
+ v = d->vcpu[op->u.pincpudomain.vcpu];
+ if ( v == NULL )
{
ret = -ESRCH;
put_domain(d);
break;
}
- if ( ed == current )
+ if ( v == current )
{
ret = -EINVAL;
put_domain(d);
@@ -262,22 +262,22 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
break;
}
- /* update cpumap for this ed */
- ed->cpumap = cpumap;
+ /* update cpumap for this vcpu */
+ v->cpumap = cpumap;
if ( cpumap == CPUMAP_RUNANYWHERE )
- clear_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
+ clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
else
{
/* pick a new cpu from the usable map */
int new_cpu = (int)find_first_set_bit(cpumap) % num_online_cpus();
- exec_domain_pause(ed);
- if ( ed->processor != new_cpu )
- set_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags);
- set_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
- ed->processor = new_cpu;
- exec_domain_unpause(ed);
+ vcpu_pause(v);
+ if ( v->processor != new_cpu )
+ set_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
+ set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
+ v->processor = new_cpu;
+ vcpu_unpause(v);
}
put_domain(d);
@@ -300,8 +300,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
case DOM0_GETDOMAININFO:
{
- struct domain *d;
- struct exec_domain *ed;
+ struct domain *d;
+ struct vcpu *v;
u64 cpu_time = 0;
int vcpu_count = 0;
int flags = DOMFLAGS_PAUSED | DOMFLAGS_BLOCKED;
@@ -335,17 +335,17 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
* are paused or blocked
* - domain is marked as running if any of its vcpus is running
*/
- for_each_exec_domain ( d, ed ) {
- op->u.getdomaininfo.vcpu_to_cpu[ed->vcpu_id] = ed->processor;
- op->u.getdomaininfo.cpumap[ed->vcpu_id] = ed->cpumap;
- if ( !(ed->vcpu_flags & VCPUF_ctrl_pause) )
+ for_each_vcpu ( d, v ) {
+ op->u.getdomaininfo.vcpu_to_cpu[v->vcpu_id] = v->processor;
+ op->u.getdomaininfo.cpumap[v->vcpu_id] = v->cpumap;
+ if ( !(v->vcpu_flags & VCPUF_ctrl_pause) )
flags &= ~DOMFLAGS_PAUSED;
- if ( !(ed->vcpu_flags & VCPUF_blocked) )
+ if ( !(v->vcpu_flags & VCPUF_blocked) )
flags &= ~DOMFLAGS_BLOCKED;
- if ( ed->vcpu_flags & VCPUF_running )
+ if ( v->vcpu_flags & VCPUF_running )
flags |= DOMFLAGS_RUNNING;
- if ( ed->cpu_time > cpu_time )
- cpu_time += ed->cpu_time;
+ if ( v->cpu_time > cpu_time )
+ cpu_time += v->cpu_time;
vcpu_count++;
}
@@ -373,7 +373,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
{
struct vcpu_guest_context *c;
struct domain *d;
- struct exec_domain *ed;
+ struct vcpu *v;
d = find_domain_by_id(op->u.getvcpucontext.domain);
if ( d == NULL )
@@ -389,15 +389,15 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
break;
}
- ed = d->exec_domain[op->u.getvcpucontext.vcpu];
- if ( ed == NULL )
+ v = d->vcpu[op->u.getvcpucontext.vcpu];
+ if ( v == NULL )
{
ret = -ESRCH;
put_domain(d);
break;
}
- op->u.getvcpucontext.cpu_time = ed->cpu_time;
+ op->u.getvcpucontext.cpu_time = v->cpu_time;
if ( op->u.getvcpucontext.ctxt != NULL )
{
@@ -408,13 +408,13 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
break;
}
- if ( ed != current )
- exec_domain_pause(ed);
+ if ( v != current )
+ vcpu_pause(v);
- arch_getdomaininfo_ctxt(ed,c);
+ arch_getdomaininfo_ctxt(v,c);
- if ( ed != current )
- exec_domain_unpause(ed);
+ if ( v != current )
+ vcpu_unpause(v);
if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
ret = -EINVAL;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index fd32c54ac5..7332cb816b 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -29,18 +29,18 @@ struct domain *dom0;
struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
{
struct domain *d, **pd;
- struct exec_domain *ed;
+ struct vcpu *v;
if ( (d = alloc_domain_struct()) == NULL )
return NULL;
- ed = d->exec_domain[0];
+ v = d->vcpu[0];
atomic_set(&d->refcnt, 1);
- atomic_set(&ed->pausecnt, 0);
+ atomic_set(&v->pausecnt, 0);
d->domain_id = dom_id;
- ed->processor = cpu;
+ v->processor = cpu;
spin_lock_init(&d->time_lock);
@@ -61,9 +61,9 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
return NULL;
}
- arch_do_createdomain(ed);
+ arch_do_createdomain(v);
- sched_add_domain(ed);
+ sched_add_domain(v);
if ( !is_idle_task(d) )
{
@@ -107,13 +107,13 @@ struct domain *find_domain_by_id(domid_t dom)
void domain_kill(struct domain *d)
{
- struct exec_domain *ed;
+ struct vcpu *v;
domain_pause(d);
if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
{
- for_each_exec_domain(d, ed)
- sched_rem_domain(ed);
+ for_each_vcpu(d, v)
+ sched_rem_domain(v);
domain_relinquish_resources(d);
put_domain(d);
}
@@ -151,7 +151,7 @@ static struct domain *domain_shuttingdown[NR_CPUS];
static void domain_shutdown_finalise(void)
{
struct domain *d;
- struct exec_domain *ed;
+ struct vcpu *v;
d = domain_shuttingdown[smp_processor_id()];
domain_shuttingdown[smp_processor_id()] = NULL;
@@ -162,8 +162,8 @@ static void domain_shutdown_finalise(void)
BUG_ON(test_bit(_DOMF_shutdown, &d->domain_flags));
/* Make sure that every vcpu is descheduled before we finalise. */
- for_each_exec_domain ( d, ed )
- while ( test_bit(_VCPUF_running, &ed->vcpu_flags) )
+ for_each_vcpu ( d, v )
+ while ( test_bit(_VCPUF_running, &v->vcpu_flags) )
cpu_relax();
sync_lazy_execstate_cpuset(d->cpuset);
@@ -174,7 +174,7 @@ static void domain_shutdown_finalise(void)
set_bit(_DOMF_shutdown, &d->domain_flags);
clear_bit(_DOMF_shuttingdown, &d->domain_flags);
- send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
+ send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
}
static __init int domain_shutdown_finaliser_init(void)
@@ -188,7 +188,7 @@ __initcall(domain_shutdown_finaliser_init);
void domain_shutdown(u8 reason)
{
struct domain *d = current->domain;
- struct exec_domain *ed;
+ struct vcpu *v;
if ( d->domain_id == 0 )
{
@@ -219,8 +219,8 @@ void domain_shutdown(u8 reason)
}
/* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */
- for_each_exec_domain ( d, ed )
- domain_sleep_nosync(ed);
+ for_each_vcpu ( d, v )
+ domain_sleep_nosync(v);
}
@@ -259,63 +259,63 @@ void domain_destruct(struct domain *d)
free_domain_struct(d);
- send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
+ send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
}
-void exec_domain_pause(struct exec_domain *ed)
+void vcpu_pause(struct vcpu *v)
{
- BUG_ON(ed == current);
- atomic_inc(&ed->pausecnt);
- domain_sleep_sync(ed);
+ BUG_ON(v == current);
+ atomic_inc(&v->pausecnt);
+ domain_sleep_sync(v);
}
void domain_pause(struct domain *d)
{
- struct exec_domain *ed;
+ struct vcpu *v;
- for_each_exec_domain( d, ed )
+ for_each_vcpu( d, v )
{
- BUG_ON(ed == current);
- atomic_inc(&ed->pausecnt);
- domain_sleep_sync(ed);
+ BUG_ON(v == current);
+ atomic_inc(&v->pausecnt);
+ domain_sleep_sync(v);
}
}
-void exec_domain_unpause(struct exec_domain *ed)
+void vcpu_unpause(struct vcpu *v)
{
- BUG_ON(ed == current);
- if ( atomic_dec_and_test(&ed->pausecnt) )
- domain_wake(ed);
+ BUG_ON(v == current);
+ if ( atomic_dec_and_test(&v->pausecnt) )
+ domain_wake(v);
}
void domain_unpause(struct domain *d)
{
- struct exec_domain *ed;
+ struct vcpu *v;
- for_each_exec_domain( d, ed )
- exec_domain_unpause(ed);
+ for_each_vcpu( d, v )
+ vcpu_unpause(v);
}
void domain_pause_by_systemcontroller(struct domain *d)
{
- struct exec_domain *ed;
+ struct vcpu *v;
- for_each_exec_domain ( d, ed )
+ for_each_vcpu ( d, v )
{
- BUG_ON(ed == current);
- if ( !test_and_set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
- domain_sleep_sync(ed);
+ BUG_ON(v == current);
+ if ( !test_and_set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
+ domain_sleep_sync(v);
}
}
void domain_unpause_by_systemcontroller(struct domain *d)
{
- struct exec_domain *ed;
+ struct vcpu *v;
- for_each_exec_domain ( d, ed )
+ for_each_vcpu ( d, v )
{
- if ( test_and_clear_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
- domain_wake(ed);
+ if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
+ domain_wake(v);
}
}
@@ -330,13 +330,13 @@ int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo)
int rc = 0;
struct vcpu_guest_context *c = NULL;
unsigned long vcpu = setdomaininfo->vcpu;
- struct exec_domain *ed;
+ struct vcpu *v;
- if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = d->exec_domain[vcpu]) == NULL) )
+ if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
return -EINVAL;
if (test_bit(_DOMF_constructed, &d->domain_flags) &&
- !test_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags))
+ !test_bit(_VCPUF_ctrl_pause, &v->vcpu_flags))
return -EINVAL;
if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
@@ -348,7 +348,7 @@ int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo)
goto out;
}
- if ( (rc = arch_set_info_guest(ed, c)) != 0 )
+ if ( (rc = arch_set_info_guest(v, c)) != 0 )
goto out;
set_bit(_DOMF_constructed, &d->domain_flags);
@@ -366,14 +366,14 @@ int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo)
long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt)
{
struct domain *d = current->domain;
- struct exec_domain *ed;
+ struct vcpu *v;
int rc = 0;
struct vcpu_guest_context *c;
- if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
+ if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] != NULL) )
return -EINVAL;
- if ( alloc_exec_domain_struct(d, vcpu) == NULL )
+ if ( alloc_vcpu_struct(d, vcpu) == NULL )
return -ENOMEM;
if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
@@ -388,31 +388,31 @@ long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt)
goto out;
}
- ed = d->exec_domain[vcpu];
+ v = d->vcpu[vcpu];
- atomic_set(&ed->pausecnt, 0);
- ed->cpumap = CPUMAP_RUNANYWHERE;
+ atomic_set(&v->pausecnt, 0);
+ v->cpumap = CPUMAP_RUNANYWHERE;
- memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch));
+ memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
- arch_do_boot_vcpu(ed);
+ arch_do_boot_vcpu(v);
- if ( (rc = arch_set_info_guest(ed, c)) != 0 )
+ if ( (rc = arch_set_info_guest(v, c)) != 0 )
goto out;
- sched_add_domain(ed);
+ sched_add_domain(v);
/* domain_unpause_by_systemcontroller */
- if ( test_and_clear_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
- domain_wake(ed);
+ if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
+ domain_wake(v);
xfree(c);
return 0;
out:
xfree(c);
- arch_free_exec_domain_struct(d->exec_domain[vcpu]);
- d->exec_domain[vcpu] = NULL;
+ arch_free_vcpu_struct(d->vcpu[vcpu]);
+ d->vcpu[vcpu] = NULL;
return rc;
}
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 73fee6c38c..d2f0c47452 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -32,16 +32,16 @@
#define EVENT_CHANNELS_SPREAD 32
-static int get_free_port(struct exec_domain *ed)
+static int get_free_port(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
int max, port;
event_channel_t *chn;
max = d->max_event_channel;
chn = d->event_channel;
- for ( port = ed->vcpu_id * EVENT_CHANNELS_SPREAD; port < max; port++ )
+ for ( port = v->vcpu_id * EVENT_CHANNELS_SPREAD; port < max; port++ )
if ( chn[port].state == ECS_FREE )
break;
@@ -103,7 +103,7 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
{
#define ERROR_EXIT(_errno) do { rc = (_errno); goto out; } while ( 0 )
struct domain *d1, *d2;
- struct exec_domain *ed1, *ed2;
+ struct vcpu *v1, *v2;
int port1 = bind->port1, port2 = bind->port2;
domid_t dom1 = bind->dom1, dom2 = bind->dom2;
long rc = 0;
@@ -127,8 +127,8 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
return -ESRCH;
}
- ed1 = d1->exec_domain[0]; /* XXX */
- ed2 = d2->exec_domain[0]; /* XXX */
+ v1 = d1->vcpu[0]; /* XXX */
+ v2 = d2->vcpu[0]; /* XXX */
/* Avoid deadlock by first acquiring lock of domain with smaller id. */
if ( d1 < d2 )
@@ -146,7 +146,7 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
/* Obtain, or ensure that we already have, a valid <port1>. */
if ( port1 == 0 )
{
- if ( (port1 = get_free_port(ed1)) < 0 )
+ if ( (port1 = get_free_port(v1)) < 0 )
ERROR_EXIT(port1);
}
else if ( port1 >= d1->max_event_channel )
@@ -158,7 +158,7 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
/* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
u16 tmp = d1->event_channel[port1].state;
d1->event_channel[port1].state = ECS_INTERDOMAIN;
- port2 = get_free_port(ed2);
+ port2 = get_free_port(v2);
d1->event_channel[port1].state = tmp;
if ( port2 < 0 )
ERROR_EXIT(port2);
@@ -178,7 +178,7 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
break;
case ECS_INTERDOMAIN:
- if ( d1->event_channel[port1].u.interdomain.remote_dom != ed2 )
+ if ( d1->event_channel[port1].u.interdomain.remote_dom != v2 )
ERROR_EXIT(-EINVAL);
if ( (d1->event_channel[port1].u.interdomain.remote_port != port2) &&
(bind->port2 != 0) )
@@ -204,7 +204,7 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
break;
case ECS_INTERDOMAIN:
- if ( d2->event_channel[port2].u.interdomain.remote_dom != ed1 )
+ if ( d2->event_channel[port2].u.interdomain.remote_dom != v1 )
ERROR_EXIT(-EINVAL);
if ( (d2->event_channel[port2].u.interdomain.remote_port != port1) &&
(bind->port1 != 0) )
@@ -220,11 +220,11 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
* Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
*/
- d1->event_channel[port1].u.interdomain.remote_dom = ed2;
+ d1->event_channel[port1].u.interdomain.remote_dom = v2;
d1->event_channel[port1].u.interdomain.remote_port = (u16)port2;
d1->event_channel[port1].state = ECS_INTERDOMAIN;
- d2->event_channel[port2].u.interdomain.remote_dom = ed1;
+ d2->event_channel[port2].u.interdomain.remote_dom = v1;
d2->event_channel[port2].u.interdomain.remote_port = (u16)port1;
d2->event_channel[port2].state = ECS_INTERDOMAIN;
@@ -246,11 +246,11 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
{
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
int port, virq = bind->virq;
- if ( virq >= ARRAY_SIZE(ed->virq_to_evtchn) )
+ if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
return -EINVAL;
spin_lock(&d->event_channel_lock);
@@ -259,14 +259,14 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
* Port 0 is the fallback port for VIRQs that haven't been explicitly
* bound yet.
*/
- if ( ((port = ed->virq_to_evtchn[virq]) != 0) ||
- ((port = get_free_port(ed)) < 0) )
+ if ( ((port = v->virq_to_evtchn[virq]) != 0) ||
+ ((port = get_free_port(v)) < 0) )
goto out;
d->event_channel[port].state = ECS_VIRQ;
d->event_channel[port].u.virq = virq;
- ed->virq_to_evtchn[virq] = port;
+ v->virq_to_evtchn[virq] = port;
out:
spin_unlock(&d->event_channel_lock);
@@ -280,19 +280,19 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
{
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
- int port, ipi_edom = bind->ipi_edom;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
+ int port, ipi_vcpu = bind->ipi_vcpu;
- if ( ipi_edom >= MAX_VIRT_CPUS )
+ if ( ipi_vcpu >= MAX_VIRT_CPUS )
return -EINVAL;
spin_lock(&d->event_channel_lock);
- if ( (port = get_free_port(ed)) >= 0 )
+ if ( (port = get_free_port(v)) >= 0 )
{
d->event_channel[port].state = ECS_IPI;
- d->event_channel[port].u.ipi_edom = ipi_edom;
+ d->event_channel[port].u.ipi_vcpu = ipi_vcpu;
}
spin_unlock(&d->event_channel_lock);
@@ -345,7 +345,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
static long __evtchn_close(struct domain *d1, int port1)
{
struct domain *d2 = NULL;
- struct exec_domain *ed;
+ struct vcpu *v;
event_channel_t *chn1, *chn2;
int port2;
long rc = 0;
@@ -377,10 +377,10 @@ static long __evtchn_close(struct domain *d1, int port1)
break;
case ECS_VIRQ:
- /* XXX could store exec_domain in chn1[port1].u */
- for_each_exec_domain(d1, ed)
- if (ed->virq_to_evtchn[chn1[port1].u.virq] == port1)
- ed->virq_to_evtchn[chn1[port1].u.virq] = 0;
+ /* XXX could store vcpu in chn1[port1].u */
+ for_each_vcpu ( d1, v )
+ if (v->virq_to_evtchn[chn1[port1].u.virq] == port1)
+ v->virq_to_evtchn[chn1[port1].u.virq] = 0;
break;
case ECS_IPI:
@@ -477,7 +477,7 @@ static long evtchn_close(evtchn_close_t *close)
long evtchn_send(int lport)
{
struct domain *ld = current->domain;
- struct exec_domain *rd;
+ struct vcpu *rd;
int rport, ret = 0;
spin_lock(&ld->event_channel_lock);
@@ -498,7 +498,7 @@ long evtchn_send(int lport)
evtchn_set_pending(rd, rport);
break;
case ECS_IPI:
- rd = ld->exec_domain[ld->event_channel[lport].u.ipi_edom];
+ rd = ld->vcpu[ld->event_channel[lport].u.ipi_vcpu];
if ( rd )
evtchn_set_pending(rd, lport);
else
@@ -566,7 +566,7 @@ static long evtchn_status(evtchn_status_t *status)
break;
case ECS_IPI:
status->status = EVTCHNSTAT_ipi;
- status->u.ipi_edom = chn[port].u.ipi_edom;
+ status->u.ipi_vcpu = chn[port].u.ipi_vcpu;
break;
default:
BUG();
@@ -646,7 +646,7 @@ int init_event_channels(struct domain *d)
{
spin_lock_init(&d->event_channel_lock);
/* Call get_free_port to initialize d->event_channel */
- if ( get_free_port(d->exec_domain[0]) != 0 )
+ if ( get_free_port(d->vcpu[0]) != 0 )
return -EINVAL;
d->event_channel[0].state = ECS_RESERVED;
return 0;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 4efd650546..94eaf8b275 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -62,7 +62,7 @@ put_maptrack_handle(
static int
__gnttab_activate_grant_ref(
struct domain *mapping_d, /* IN */
- struct exec_domain *mapping_ed,
+ struct vcpu *mapping_ed,
struct domain *granting_d,
grant_ref_t ref,
u16 dev_hst_ro_flags,
@@ -319,7 +319,7 @@ __gnttab_map_grant_ref(
domid_t dom;
grant_ref_t ref;
struct domain *ld, *rd;
- struct exec_domain *led;
+ struct vcpu *led;
u16 dev_hst_ro_flags;
int handle;
unsigned long frame = 0, host_virt_addr;
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index a40cea65bc..f7136addda 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -99,7 +99,7 @@ static void halt_machine(unsigned char key, struct cpu_user_regs *regs)
static void do_task_queues(unsigned char key)
{
struct domain *d;
- struct exec_domain *ed;
+ struct vcpu *v;
s_time_t now = NOW();
printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
@@ -115,24 +115,24 @@ static void do_task_queues(unsigned char key)
dump_pageframe_info(d);
- for_each_exec_domain ( d, ed ) {
+ for_each_vcpu ( d, v ) {
printk("Guest: %p CPU %d [has=%c] flags=%lx "
- "upcall_pend = %02x, upcall_mask = %02x\n", ed,
- ed->processor,
- test_bit(_VCPUF_running, &ed->vcpu_flags) ? 'T':'F',
- ed->vcpu_flags,
- ed->vcpu_info->evtchn_upcall_pending,
- ed->vcpu_info->evtchn_upcall_mask);
- printk("Notifying guest... %d/%d\n", d->domain_id, ed->vcpu_id);
+ "upcall_pend = %02x, upcall_mask = %02x\n", v,
+ v->processor,
+ test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F',
+ v->vcpu_flags,
+ v->vcpu_info->evtchn_upcall_pending,
+ v->vcpu_info->evtchn_upcall_mask);
+ printk("Notifying guest... %d/%d\n", d->domain_id, v->vcpu_id);
printk("port %d/%d stat %d %d %d\n",
- VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG],
+ VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
+ test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
&d->shared_info->evtchn_pending[0]),
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG],
+ test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
&d->shared_info->evtchn_mask[0]),
- test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5,
- &ed->vcpu_info->evtchn_pending_sel));
- send_guest_virq(ed, VIRQ_DEBUG);
+ test_bit(v->virq_to_evtchn[VIRQ_DEBUG]>>5,
+ &v->vcpu_info->evtchn_pending_sel));
+ send_guest_virq(v, VIRQ_DEBUG);
}
}
diff --git a/xen/common/sched_bvt.c b/xen/common/sched_bvt.c
index 35f901fccc..c37b4bf2f9 100644
--- a/xen/common/sched_bvt.c
+++ b/xen/common/sched_bvt.c
@@ -26,12 +26,12 @@
#include <xen/softirq.h>
/* all per-domain BVT-specific scheduling info is stored here */
-struct bvt_edom_info
+struct bvt_vcpu_info
{
struct list_head run_list; /* runqueue list pointers */
u32 avt; /* actual virtual time */
u32 evt; /* effective virtual time */
- struct exec_domain *exec_domain;
+ struct vcpu *vcpu;
struct bvt_dom_info *inf;
};
@@ -48,7 +48,7 @@ struct bvt_dom_info
s_time_t warpu; /* unwarp time requirement */
struct ac_timer unwarp_timer; /* deals with warpu */
- struct bvt_edom_info ed_inf[MAX_VIRT_CPUS];
+ struct bvt_vcpu_info vcpu_inf[MAX_VIRT_CPUS];
};
struct bvt_cpu_info
@@ -58,7 +58,7 @@ struct bvt_cpu_info
};
#define BVT_INFO(p) ((struct bvt_dom_info *)(p)->sched_priv)
-#define EBVT_INFO(p) ((struct bvt_edom_info *)(p)->sched_priv)
+#define EBVT_INFO(p) ((struct bvt_vcpu_info *)(p)->sched_priv)
#define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
#define RUNLIST(p) ((struct list_head *)&(EBVT_INFO(p)->run_list))
#define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
@@ -69,24 +69,24 @@ struct bvt_cpu_info
#define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
static s32 ctx_allow = (s32)MILLISECS(5); /* context switch allowance */
-static inline void __add_to_runqueue_head(struct exec_domain *d)
+static inline void __add_to_runqueue_head(struct vcpu *d)
{
list_add(RUNLIST(d), RUNQUEUE(d->processor));
}
-static inline void __add_to_runqueue_tail(struct exec_domain *d)
+static inline void __add_to_runqueue_tail(struct vcpu *d)
{
list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
}
-static inline void __del_from_runqueue(struct exec_domain *d)
+static inline void __del_from_runqueue(struct vcpu *d)
{
struct list_head *runlist = RUNLIST(d);
list_del(runlist);
runlist->next = NULL;
}
-static inline int __task_on_runqueue(struct exec_domain *d)
+static inline int __task_on_runqueue(struct vcpu *d)
{
return (RUNLIST(d))->next != NULL;
}
@@ -96,7 +96,7 @@ static inline int __task_on_runqueue(struct exec_domain *d)
static void warp_timer_fn(void *data)
{
struct bvt_dom_info *inf = data;
- unsigned int cpu = inf->domain->exec_domain[0]->processor;
+ unsigned int cpu = inf->domain->vcpu[0]->processor;
spin_lock_irq(&schedule_data[cpu].schedule_lock);
@@ -117,7 +117,7 @@ static void warp_timer_fn(void *data)
static void unwarp_timer_fn(void *data)
{
struct bvt_dom_info *inf = data;
- unsigned int cpu = inf->domain->exec_domain[0]->processor;
+ unsigned int cpu = inf->domain->vcpu[0]->processor;
spin_lock_irq(&schedule_data[cpu].schedule_lock);
@@ -130,11 +130,11 @@ static void unwarp_timer_fn(void *data)
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
}
-static inline u32 calc_avt(struct exec_domain *d, s_time_t now)
+static inline u32 calc_avt(struct vcpu *d, s_time_t now)
{
u32 ranfor, mcus;
struct bvt_dom_info *inf = BVT_INFO(d->domain);
- struct bvt_edom_info *einf = EBVT_INFO(d);
+ struct bvt_vcpu_info *einf = EBVT_INFO(d);
ranfor = (u32)(now - d->lastschd);
mcus = (ranfor + MCU - 1)/MCU;
@@ -146,7 +146,7 @@ static inline u32 calc_avt(struct exec_domain *d, s_time_t now)
* Calculate the effective virtual time for a domain. Take into account
* warping limits
*/
-static inline u32 calc_evt(struct exec_domain *d, u32 avt)
+static inline u32 calc_evt(struct vcpu *d, u32 avt)
{
struct bvt_dom_info *inf = BVT_INFO(d->domain);
/* TODO The warp routines need to be rewritten GM */
@@ -163,9 +163,9 @@ static inline u32 calc_evt(struct exec_domain *d, u32 avt)
*
* Returns non-zero on failure.
*/
-static int bvt_alloc_task(struct exec_domain *ed)
+static int bvt_alloc_task(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
if ( (d->sched_priv == NULL) )
{
@@ -174,10 +174,10 @@ static int bvt_alloc_task(struct exec_domain *ed)
memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
}
- ed->sched_priv = &BVT_INFO(d)->ed_inf[ed->vcpu_id];
+ v->sched_priv = &BVT_INFO(d)->vcpu_inf[v->vcpu_id];
- BVT_INFO(d)->ed_inf[ed->vcpu_id].inf = BVT_INFO(d);
- BVT_INFO(d)->ed_inf[ed->vcpu_id].exec_domain = ed;
+ BVT_INFO(d)->vcpu_inf[v->vcpu_id].inf = BVT_INFO(d);
+ BVT_INFO(d)->vcpu_inf[v->vcpu_id].vcpu = v;
return 0;
}
@@ -185,26 +185,26 @@ static int bvt_alloc_task(struct exec_domain *ed)
/*
* Add and remove a domain
*/
-static void bvt_add_task(struct exec_domain *d)
+static void bvt_add_task(struct vcpu *v)
{
- struct bvt_dom_info *inf = BVT_INFO(d->domain);
- struct bvt_edom_info *einf = EBVT_INFO(d);
+ struct bvt_dom_info *inf = BVT_INFO(v->domain);
+ struct bvt_vcpu_info *einf = EBVT_INFO(v);
ASSERT(inf != NULL);
- ASSERT(d != NULL);
+ ASSERT(v != NULL);
/* Allocate per-CPU context if this is the first domain to be added. */
- if ( CPU_INFO(d->processor) == NULL )
+ if ( CPU_INFO(v->processor) == NULL )
{
- schedule_data[d->processor].sched_priv = xmalloc(struct bvt_cpu_info);
- BUG_ON(CPU_INFO(d->processor) == NULL);
- INIT_LIST_HEAD(RUNQUEUE(d->processor));
- CPU_SVT(d->processor) = 0;
+ schedule_data[v->processor].sched_priv = xmalloc(struct bvt_cpu_info);
+ BUG_ON(CPU_INFO(v->processor) == NULL);
+ INIT_LIST_HEAD(RUNQUEUE(v->processor));
+ CPU_SVT(v->processor) = 0;
}
- if ( d->vcpu_id == 0 )
+ if ( v->vcpu_id == 0 )
{
inf->mcu_advance = MCU_ADVANCE;
- inf->domain = d->domain;
+ inf->domain = v->domain;
inf->warpback = 0;
/* Set some default values here. */
inf->warp = 0;
@@ -212,49 +212,49 @@ static void bvt_add_task(struct exec_domain *d)
inf->warpl = MILLISECS(2000);
inf->warpu = MILLISECS(1000);
/* Initialise the warp timers. */
- init_ac_timer(&inf->warp_timer, warp_timer_fn, inf, d->processor);
- init_ac_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, d->processor);
+ init_ac_timer(&inf->warp_timer, warp_timer_fn, inf, v->processor);
+ init_ac_timer(&inf->unwarp_timer, unwarp_timer_fn, inf, v->processor);
}
- einf->exec_domain = d;
+ einf->vcpu = v;
- if ( is_idle_task(d->domain) )
+ if ( is_idle_task(v->domain) )
{
einf->avt = einf->evt = ~0U;
- BUG_ON(__task_on_runqueue(d));
- __add_to_runqueue_head(d);
+ BUG_ON(__task_on_runqueue(v));
+ __add_to_runqueue_head(v);
}
else
{
/* Set avt and evt to system virtual time. */
- einf->avt = CPU_SVT(d->processor);
- einf->evt = CPU_SVT(d->processor);
+ einf->avt = CPU_SVT(v->processor);
+ einf->evt = CPU_SVT(v->processor);
}
}
-static void bvt_wake(struct exec_domain *ed)
+static void bvt_wake(struct vcpu *v)
{
- struct bvt_edom_info *einf = EBVT_INFO(ed);
- struct exec_domain *curr;
+ struct bvt_vcpu_info *einf = EBVT_INFO(v);
+ struct vcpu *curr;
s_time_t now, r_time;
- int cpu = ed->processor;
+ int cpu = v->processor;
u32 curr_evt;
- if ( unlikely(__task_on_runqueue(ed)) )
+ if ( unlikely(__task_on_runqueue(v)) )
return;
- __add_to_runqueue_head(ed);
+ __add_to_runqueue_head(v);
now = NOW();
/* Set the BVT parameters. AVT should always be updated
if CPU migration ocurred.*/
if ( einf->avt < CPU_SVT(cpu) ||
- unlikely(test_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags)) )
+ unlikely(test_bit(_VCPUF_cpu_migrated, &v->vcpu_flags)) )
einf->avt = CPU_SVT(cpu);
/* Deal with warping here. */
- einf->evt = calc_evt(ed, einf->avt);
+ einf->evt = calc_evt(v, einf->avt);
curr = schedule_data[cpu].curr;
curr_evt = calc_evt(curr, calc_avt(curr, now));
@@ -271,12 +271,12 @@ static void bvt_wake(struct exec_domain *ed)
}
-static void bvt_sleep(struct exec_domain *ed)
+static void bvt_sleep(struct vcpu *v)
{
- if ( test_bit(_VCPUF_running, &ed->vcpu_flags) )
- cpu_raise_softirq(ed->processor, SCHEDULE_SOFTIRQ);
- else if ( __task_on_runqueue(ed) )
- __del_from_runqueue(ed);
+ if ( test_bit(_VCPUF_running, &v->vcpu_flags) )
+ cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ);
+ else if ( __task_on_runqueue(v) )
+ __del_from_runqueue(v);
}
/**
@@ -360,15 +360,15 @@ static int bvt_adjdom(
static struct task_slice bvt_do_schedule(s_time_t now)
{
struct domain *d;
- struct exec_domain *prev = current, *next = NULL, *next_prime, *ed;
+ struct vcpu *prev = current, *next = NULL, *next_prime, *ed;
int cpu = prev->processor;
s32 r_time; /* time for new dom to run */
u32 next_evt, next_prime_evt, min_avt;
struct bvt_dom_info *prev_inf = BVT_INFO(prev->domain);
- struct bvt_edom_info *prev_einf = EBVT_INFO(prev);
- struct bvt_edom_info *p_einf = NULL;
- struct bvt_edom_info *next_einf = NULL;
- struct bvt_edom_info *next_prime_einf = NULL;
+ struct bvt_vcpu_info *prev_einf = EBVT_INFO(prev);
+ struct bvt_vcpu_info *p_einf = NULL;
+ struct bvt_vcpu_info *next_einf = NULL;
+ struct bvt_vcpu_info *next_prime_einf = NULL;
struct task_slice ret;
ASSERT(prev->sched_priv != NULL);
@@ -434,8 +434,8 @@ static struct task_slice bvt_do_schedule(s_time_t now)
set_ac_timer(&next_einf->inf->warp_timer, now + next_einf->inf->warpl);
/* Extract the domain pointers from the dom infos */
- next = next_einf->exec_domain;
- next_prime = next_prime_einf->exec_domain;
+ next = next_einf->vcpu;
+ next_prime = next_prime_einf->vcpu;
/* Update system virtual time. */
if ( min_avt != ~0U )
@@ -450,7 +450,7 @@ static struct task_slice bvt_do_schedule(s_time_t now)
for_each_domain ( d )
{
- for_each_exec_domain (d, ed) {
+ for_each_vcpu (d, ed) {
if ( ed->processor == cpu )
{
p_einf = EBVT_INFO(ed);
@@ -498,9 +498,9 @@ static struct task_slice bvt_do_schedule(s_time_t now)
}
-static void bvt_dump_runq_el(struct exec_domain *p)
+static void bvt_dump_runq_el(struct vcpu *p)
{
- struct bvt_edom_info *inf = EBVT_INFO(p);
+ struct bvt_vcpu_info *inf = EBVT_INFO(p);
printk("mcua=%d ev=0x%08X av=0x%08X ",
inf->inf->mcu_advance, inf->evt, inf->avt);
@@ -515,8 +515,8 @@ static void bvt_dump_cpu_state(int i)
{
struct list_head *queue;
int loop = 0;
- struct bvt_edom_info *ed_inf;
- struct exec_domain *ed;
+ struct bvt_vcpu_info *vcpu_inf;
+ struct vcpu *v;
printk("svt=0x%08lX ", CPU_SVT(i));
@@ -524,15 +524,16 @@ static void bvt_dump_cpu_state(int i)
printk("QUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue,
(unsigned long) queue->next, (unsigned long) queue->prev);
- list_for_each_entry ( ed_inf, queue, run_list )
+ list_for_each_entry ( vcpu_inf, queue, run_list )
{
- ed = ed_inf->exec_domain;
- printk("%3d: %u has=%c ", loop++, ed->domain->domain_id,
- test_bit(_VCPUF_running, &ed->vcpu_flags) ? 'T':'F');
- bvt_dump_runq_el(ed);
- printk("c=0x%X%08X\n", (u32)(ed->cpu_time>>32), (u32)ed->cpu_time);
+ v = vcpu_inf->vcpu;
+ printk("%3d: %u has=%c ", loop++, v->domain->domain_id,
+ test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F');
+ bvt_dump_runq_el(v);
+ printk("c=0x%X%08X\n", (u32)(v->cpu_time>>32), (u32)v->cpu_time);
printk(" l: %p n: %p p: %p\n",
- &ed_inf->run_list, ed_inf->run_list.next, ed_inf->run_list.prev);
+ &vcpu_inf->run_list, vcpu_inf->run_list.next,
+ vcpu_inf->run_list.prev);
}
}
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 15420e6eb4..ab64af3a64 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -64,9 +64,9 @@
struct sedf_dom_info {
struct domain *domain;
};
-struct sedf_edom_info
+struct sedf_vcpu_info
{
- struct exec_domain *exec_domain;
+ struct vcpu *vcpu;
struct list_head list;
struct list_head extralist[2];
@@ -119,14 +119,14 @@ struct sedf_cpu_info {
s_time_t current_slice_expires;
};
-#define EDOM_INFO(d) ((struct sedf_edom_info *)((d)->sched_priv))
+#define EDOM_INFO(d) ((struct sedf_vcpu_info *)((d)->sched_priv))
#define CPU_INFO(cpu) ((struct sedf_cpu_info *)schedule_data[cpu].sched_priv)
#define LIST(d) (&EDOM_INFO(d)->list)
#define EXTRALIST(d,i) (&(EDOM_INFO(d)->extralist[i]))
#define RUNQ(cpu) (&CPU_INFO(cpu)->runnableq)
#define WAITQ(cpu) (&CPU_INFO(cpu)->waitq)
#define EXTRAQ(cpu,i) (&(CPU_INFO(cpu)->extraq[i]))
-#define IDLETASK(cpu) ((struct exec_domain *)schedule_data[cpu].idle)
+#define IDLETASK(cpu) ((struct vcpu *)schedule_data[cpu].idle)
#define PERIOD_BEGIN(inf) ((inf)->deadl_abs - (inf)->period)
@@ -140,24 +140,24 @@ struct sedf_cpu_info {
static void sedf_dump_cpu_state(int i);
-static inline int extraq_on(struct exec_domain *d, int i) {
+static inline int extraq_on(struct vcpu *d, int i) {
return ((EXTRALIST(d,i)->next != NULL) &&
(EXTRALIST(d,i)->next != EXTRALIST(d,i)));
}
-static inline void extraq_add_head(struct exec_domain *d, int i)
+static inline void extraq_add_head(struct vcpu *d, int i)
{
list_add(EXTRALIST(d,i), EXTRAQ(d->processor,i));
ASSERT(extraq_on(d, i));
}
-static inline void extraq_add_tail(struct exec_domain *d, int i)
+static inline void extraq_add_tail(struct vcpu *d, int i)
{
list_add_tail(EXTRALIST(d,i), EXTRAQ(d->processor,i));
ASSERT(extraq_on(d, i));
}
-static inline void extraq_del(struct exec_domain *d, int i)
+static inline void extraq_del(struct vcpu *d, int i)
{
struct list_head *list = EXTRALIST(d,i);
ASSERT(extraq_on(d,i));
@@ -174,9 +174,9 @@ static inline void extraq_del(struct exec_domain *d, int i)
each entry, in order to avoid overflow. The algorithm works by simply
charging each domain that recieved extratime with an inverse of its weight.
*/
-static inline void extraq_add_sort_update(struct exec_domain *d, int i, int sub) {
+static inline void extraq_add_sort_update(struct vcpu *d, int i, int sub) {
struct list_head *cur;
- struct sedf_edom_info *curinf;
+ struct sedf_vcpu_info *curinf;
ASSERT(!extraq_on(d,i));
PRINT(3, "Adding domain %i.%i (score= %i, short_pen= %"PRIi64")"
@@ -186,14 +186,14 @@ static inline void extraq_add_sort_update(struct exec_domain *d, int i, int sub)
/*iterate through all elements to find our "hole" and on our way
update all the other scores*/
list_for_each(cur,EXTRAQ(d->processor,i)){
- curinf = list_entry(cur,struct sedf_edom_info,extralist[i]);
+ curinf = list_entry(cur,struct sedf_vcpu_info,extralist[i]);
curinf->score[i] -= sub;
if (EDOM_INFO(d)->score[i] < curinf->score[i])
break;
else
PRINT(4,"\tbehind domain %i.%i (score= %i)\n",
- curinf->exec_domain->domain->domain_id,
- curinf->exec_domain->vcpu_id, curinf->score[i]);
+ curinf->vcpu->domain->domain_id,
+ curinf->vcpu->vcpu_id, curinf->score[i]);
}
/*cur now contains the element, before which we'll enqueue*/
PRINT(3, "\tlist_add to %p\n", cur->prev);
@@ -203,16 +203,16 @@ static inline void extraq_add_sort_update(struct exec_domain *d, int i, int sub)
if ((cur != EXTRAQ(d->processor,i)) && sub)
for (cur = cur->next; cur != EXTRAQ(d->processor,i);
cur = cur-> next) {
- curinf = list_entry(cur,struct sedf_edom_info,
+ curinf = list_entry(cur,struct sedf_vcpu_info,
extralist[i]);
curinf->score[i] -= sub;
PRINT(4, "\tupdating domain %i.%i (score= %u)\n",
- curinf->exec_domain->domain->domain_id,
- curinf->exec_domain->vcpu_id, curinf->score[i]);
+ curinf->vcpu->domain->domain_id,
+ curinf->vcpu->vcpu_id, curinf->score[i]);
}
ASSERT(extraq_on(d,i));
}
-static inline void extraq_check(struct exec_domain *d) {
+static inline void extraq_check(struct vcpu *d) {
if (extraq_on(d, EXTRA_UTIL_Q)) {
PRINT(2,"Dom %i.%i is on L1 extraQ\n",d->domain->domain_id, d->vcpu_id);
if (!(EDOM_INFO(d)->status & EXTRA_AWARE) &&
@@ -240,9 +240,9 @@ static inline void extraq_check(struct exec_domain *d) {
}
}
-static inline void extraq_check_add_unblocked(struct exec_domain *d,
+static inline void extraq_check_add_unblocked(struct vcpu *d,
int priority) {
- struct sedf_edom_info *inf = EDOM_INFO(d);
+ struct sedf_vcpu_info *inf = EDOM_INFO(d);
if (inf->status & EXTRA_AWARE)
#if (EXTRA == EXTRA_ROUNDR)
if (priority)
@@ -259,10 +259,10 @@ static inline void extraq_check_add_unblocked(struct exec_domain *d,
#endif
}
-static inline int __task_on_queue(struct exec_domain *d) {
+static inline int __task_on_queue(struct vcpu *d) {
return (((LIST(d))->next != NULL) && (LIST(d)->next != LIST(d)));
}
-static inline void __del_from_queue(struct exec_domain *d)
+static inline void __del_from_queue(struct vcpu *d)
{
struct list_head *list = LIST(d);
ASSERT(__task_on_queue(d));
@@ -290,9 +290,9 @@ static inline void list_insert_sort(struct list_head *list,
#define DOMAIN_COMPARER(name, field, comp1, comp2) \
int name##_comp(struct list_head* el1, struct list_head* el2) \
{ \
- struct sedf_edom_info *d1, *d2; \
- d1 = list_entry(el1,struct sedf_edom_info, field); \
- d2 = list_entry(el2,struct sedf_edom_info, field); \
+ struct sedf_vcpu_info *d1, *d2; \
+ d1 = list_entry(el1,struct sedf_vcpu_info, field); \
+ d2 = list_entry(el2,struct sedf_vcpu_info, field); \
if ((comp1) == (comp2)) \
return 0; \
if ((comp1) < (comp2)) \
@@ -305,7 +305,7 @@ int name##_comp(struct list_head* el1, struct list_head* el2) \
absol. deadline - period
*/
DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2))
- static inline void __add_to_waitqueue_sort(struct exec_domain *d) {
+ static inline void __add_to_waitqueue_sort(struct vcpu *d) {
ASSERT(!__task_on_queue(d));
PRINT(3,"Adding domain %i.%i (bop= %"PRIu64") to waitq\n",
d->domain->domain_id, d->vcpu_id, PERIOD_BEGIN(EDOM_INFO(d)));
@@ -319,14 +319,14 @@ DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2))
task will run. As we are implementing EDF, this list is sorted by deadlines.
*/
DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs)
- static inline void __add_to_runqueue_sort(struct exec_domain *d) {
+ static inline void __add_to_runqueue_sort(struct vcpu *d) {
PRINT(3,"Adding domain %i.%i (deadl= %"PRIu64") to runq\n",
d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->deadl_abs);
list_insert_sort(RUNQ(d->processor), LIST(d), runq_comp);
}
/* Allocates memory for per domain private scheduling data*/
-static int sedf_alloc_task(struct exec_domain *d) {
+static int sedf_alloc_task(struct vcpu *d) {
PRINT(2,"sedf_alloc_task was called, domain-id %i.%i\n",d->domain->domain_id,
d->vcpu_id);
if (d->domain->sched_priv == NULL) {
@@ -335,17 +335,17 @@ static int sedf_alloc_task(struct exec_domain *d) {
return -1;
memset(d->domain->sched_priv, 0, sizeof(struct sedf_dom_info));
}
- if ((d->sched_priv = xmalloc(struct sedf_edom_info)) == NULL )
+ if ((d->sched_priv = xmalloc(struct sedf_vcpu_info)) == NULL )
return -1;
- memset(d->sched_priv, 0, sizeof(struct sedf_edom_info));
+ memset(d->sched_priv, 0, sizeof(struct sedf_vcpu_info));
return 0;
}
/* Setup the sedf_dom_info */
-static void sedf_add_task(struct exec_domain *d)
+static void sedf_add_task(struct vcpu *d)
{
- struct sedf_edom_info *inf = EDOM_INFO(d);
- inf->exec_domain = d;
+ struct sedf_vcpu_info *inf = EDOM_INFO(d);
+ inf->vcpu = d;
PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",d->domain->domain_id,
d->vcpu_id);
@@ -401,15 +401,15 @@ static void sedf_free_task(struct domain *d)
xfree(d->sched_priv);
for (i = 0; i < MAX_VIRT_CPUS; i++)
- if ( d->exec_domain[i] ) {
- ASSERT(d->exec_domain[i]->sched_priv != NULL);
- xfree(d->exec_domain[i]->sched_priv);
+ if ( d->vcpu[i] ) {
+ ASSERT(d->vcpu[i]->sched_priv != NULL);
+ xfree(d->vcpu[i]->sched_priv);
}
}
/* handles the rescheduling, bookkeeping of domains running in their realtime-time :)*/
-static inline void desched_edf_dom (s_time_t now, struct exec_domain* d) {
- struct sedf_edom_info* inf = EDOM_INFO(d);
+static inline void desched_edf_dom (s_time_t now, struct vcpu* d) {
+ struct sedf_vcpu_info* inf = EDOM_INFO(d);
/*current domain is running in real time mode*/
ASSERT(__task_on_queue(d));
@@ -470,18 +470,18 @@ static inline void desched_edf_dom (s_time_t now, struct exec_domain* d) {
static inline void update_queues(s_time_t now, struct list_head* runq,
struct list_head* waitq) {
struct list_head *cur,*tmp;
- struct sedf_edom_info *curinf;
+ struct sedf_vcpu_info *curinf;
PRINT(3,"Updating waitq..\n");
/*check for the first elements of the waitqueue, whether their
next period has already started*/
list_for_each_safe(cur, tmp, waitq) {
- curinf = list_entry(cur, struct sedf_edom_info, list);
+ curinf = list_entry(cur, struct sedf_vcpu_info, list);
PRINT(4,"\tLooking @ dom %i.%i\n",
- curinf->exec_domain->domain->domain_id, curinf->exec_domain->vcpu_id);
+ curinf->vcpu->domain->domain_id, curinf->vcpu->vcpu_id);
if (PERIOD_BEGIN(curinf) <= now) {
- __del_from_queue(curinf->exec_domain);
- __add_to_runqueue_sort(curinf->exec_domain);
+ __del_from_queue(curinf->vcpu);
+ __add_to_runqueue_sort(curinf->vcpu);
}
else
break;
@@ -491,20 +491,20 @@ static inline void update_queues(s_time_t now, struct list_head* runq,
/*process the runq, find domains that are on
the runqueue which shouldn't be there*/
list_for_each_safe(cur, tmp, runq) {
- curinf = list_entry(cur,struct sedf_edom_info,list);
+ curinf = list_entry(cur,struct sedf_vcpu_info,list);
PRINT(4,"\tLooking @ dom %i.%i\n",
- curinf->exec_domain->domain->domain_id, curinf->exec_domain->vcpu_id);
+ curinf->vcpu->domain->domain_id, curinf->vcpu->vcpu_id);
if (unlikely(curinf->slice == 0)) {
/*ignore domains with empty slice*/
PRINT(4,"\tUpdating zero-slice domain %i.%i\n",
- curinf->exec_domain->domain->domain_id,
- curinf->exec_domain->vcpu_id);
- __del_from_queue(curinf->exec_domain);
+ curinf->vcpu->domain->domain_id,
+ curinf->vcpu->vcpu_id);
+ __del_from_queue(curinf->vcpu);
/*move them to their next period*/
curinf->deadl_abs += curinf->period;
/*and put them back into the queue*/
- __add_to_waitqueue_sort(curinf->exec_domain);
+ __add_to_waitqueue_sort(curinf->vcpu);
continue;
}
if (unlikely((curinf->deadl_abs < now) ||
@@ -515,11 +515,11 @@ static inline void update_queues(s_time_t now, struct list_head* runq,
PRINT(4,"\tDomain %i.%i exceeded it's deadline/"
"slice (%"PRIu64" / %"PRIu64") now: %"PRIu64
" cputime: %"PRIu64"\n",
- curinf->exec_domain->domain->domain_id,
- curinf->exec_domain->vcpu_id,
+ curinf->vcpu->domain->domain_id,
+ curinf->vcpu->vcpu_id,
curinf->deadl_abs, curinf->slice, now,
curinf->cputime);
- __del_from_queue(curinf->exec_domain);
+ __del_from_queue(curinf->vcpu);
/*common case: we miss one period!*/
curinf->deadl_abs += curinf->period;
@@ -534,9 +534,9 @@ static inline void update_queues(s_time_t now, struct list_head* runq,
/*give a fresh slice*/
curinf->cputime = 0;
if (PERIOD_BEGIN(curinf) > now)
- __add_to_waitqueue_sort(curinf->exec_domain);
+ __add_to_waitqueue_sort(curinf->vcpu);
else
- __add_to_runqueue_sort(curinf->exec_domain);
+ __add_to_runqueue_sort(curinf->vcpu);
}
else
break;
@@ -551,8 +551,8 @@ static inline void update_queues(s_time_t now, struct list_head* runq,
weighted ext.: insert in sorted list by score
if the domain is blocked / has regained its short-block-loss
time it is not put on any queue */
-static inline void desched_extra_dom(s_time_t now, struct exec_domain* d) {
- struct sedf_edom_info *inf = EDOM_INFO(d);
+static inline void desched_extra_dom(s_time_t now, struct vcpu* d) {
+ struct sedf_vcpu_info *inf = EDOM_INFO(d);
int i = extra_get_cur_q(inf);
#if (EXTRA == EXTRA_SLICE_WEIGHT || EXTRA == EXTRA_BLOCK_WEIGHT)
@@ -582,11 +582,11 @@ static inline void desched_extra_dom(s_time_t now, struct exec_domain* d) {
/*inf->short_block_lost_tot -= EXTRA_QUANTUM;*/
inf->short_block_lost_tot -= now - inf->sched_start_abs;
PRINT(3,"Domain %i.%i: Short_block_loss: %"PRIi64"\n",
- inf->exec_domain->domain->domain_id, inf->exec_domain->vcpu_id,
+ inf->vcpu->domain->domain_id, inf->vcpu->vcpu_id,
inf->short_block_lost_tot);
if (inf->short_block_lost_tot <= 0) {
PRINT(4,"Domain %i.%i compensated short block loss!\n",
- inf->exec_domain->domain->domain_id, inf->exec_domain->vcpu_id);
+ inf->vcpu->domain->domain_id, inf->vcpu->vcpu_id);
/*we have (over-)compensated our block penalty*/
inf->short_block_lost_tot = 0;
/*we don't want a place on the penalty queue anymore!*/
@@ -646,7 +646,7 @@ static inline void desched_extra_dom(s_time_t now, struct exec_domain* d) {
static inline struct task_slice sedf_do_extra_schedule (s_time_t now,
s_time_t end_xt, struct list_head *extraq[], int cpu) {
struct task_slice ret;
- struct sedf_edom_info *runinf;
+ struct sedf_vcpu_info *runinf;
/* Enough time left to use for extratime? */
if (end_xt - now < EXTRA_QUANTUM)
@@ -656,9 +656,9 @@ static inline struct task_slice sedf_do_extra_schedule (s_time_t now,
/*we still have elements on the level 0 extraq
=> let those run first!*/
runinf = list_entry(extraq[EXTRA_PEN_Q]->next,
- struct sedf_edom_info, extralist[EXTRA_PEN_Q]);
+ struct sedf_vcpu_info, extralist[EXTRA_PEN_Q]);
runinf->status |= EXTRA_RUN_PEN;
- ret.task = runinf->exec_domain;
+ ret.task = runinf->vcpu;
ret.time = EXTRA_QUANTUM;
#ifdef SEDF_STATS
runinf->pen_extra_slices++;
@@ -668,9 +668,9 @@ static inline struct task_slice sedf_do_extra_schedule (s_time_t now,
if (!list_empty(extraq[EXTRA_UTIL_Q])) {
/*use elements from the normal extraqueue*/
runinf = list_entry(extraq[EXTRA_UTIL_Q]->next,
- struct sedf_edom_info, extralist[EXTRA_UTIL_Q]);
+ struct sedf_vcpu_info, extralist[EXTRA_UTIL_Q]);
runinf->status |= EXTRA_RUN_UTIL;
- ret.task = runinf->exec_domain;
+ ret.task = runinf->vcpu;
ret.time = EXTRA_QUANTUM;
}
else
@@ -698,7 +698,7 @@ static struct task_slice sedf_do_schedule(s_time_t now)
struct list_head *runq = RUNQ(cpu);
struct list_head *waitq = WAITQ(cpu);
#if (EXTRA > EXTRA_OFF)
- struct sedf_edom_info *inf = EDOM_INFO(current);
+ struct sedf_vcpu_info *inf = EDOM_INFO(current);
struct list_head *extraq[] = {EXTRAQ(cpu, EXTRA_PEN_Q),
EXTRAQ(cpu, EXTRA_UTIL_Q)};
#endif
@@ -732,14 +732,14 @@ static struct task_slice sedf_do_schedule(s_time_t now)
/*now simply pick the first domain from the runqueue, which has the
earliest deadline, because the list is sorted*/
- struct sedf_edom_info *runinf, *waitinf;
+ struct sedf_vcpu_info *runinf, *waitinf;
if (!list_empty(runq)) {
- runinf = list_entry(runq->next,struct sedf_edom_info,list);
- ret.task = runinf->exec_domain;
+ runinf = list_entry(runq->next,struct sedf_vcpu_info,list);
+ ret.task = runinf->vcpu;
if (!list_empty(waitq)) {
waitinf = list_entry(waitq->next,
- struct sedf_edom_info,list);
+ struct sedf_vcpu_info,list);
/*rerun scheduler, when scheduled domain reaches it's
end of slice or the first domain from the waitqueue
gets ready*/
@@ -754,7 +754,7 @@ static struct task_slice sedf_do_schedule(s_time_t now)
}
if (!list_empty(waitq)) {
- waitinf = list_entry(waitq->next,struct sedf_edom_info, list);
+ waitinf = list_entry(waitq->next,struct sedf_vcpu_info, list);
/*we could not find any suitable domain
=> look for domains that are aware of extratime*/
#if (EXTRA > EXTRA_OFF)
@@ -789,7 +789,7 @@ static struct task_slice sedf_do_schedule(s_time_t now)
return ret;
}
-static void sedf_sleep(struct exec_domain *d) {
+static void sedf_sleep(struct vcpu *d) {
PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
if (is_idle_task(d->domain))
@@ -886,12 +886,12 @@ static void sedf_sleep(struct exec_domain *d) {
* opposed to approaches 1,2a,2b
*/
static inline void unblock_short_vcons
-(struct sedf_edom_info* inf, s_time_t now) {
+(struct sedf_vcpu_info* inf, s_time_t now) {
inf->deadl_abs += inf->period;
inf->cputime = 0;
}
-static inline void unblock_short_cons(struct sedf_edom_info* inf, s_time_t now)
+static inline void unblock_short_cons(struct sedf_vcpu_info* inf, s_time_t now)
{
/*treat blocked time as consumed by the domain*/
inf->cputime += now - inf->block_abs;
@@ -905,7 +905,7 @@ static inline void unblock_short_cons(struct sedf_edom_info* inf, s_time_t now)
inf->short_cont++;
#endif
}
-static inline void unblock_short_extra_support (struct sedf_edom_info* inf,
+static inline void unblock_short_extra_support (struct sedf_vcpu_info* inf,
s_time_t now) {
/*this unblocking scheme tries to support the domain, by assigning it
a priority in extratime distribution according to the loss of time
@@ -933,9 +933,9 @@ static inline void unblock_short_extra_support (struct sedf_edom_info* inf,
#ifdef SEDF_STATS
inf->pen_extra_blocks++;
#endif
- if (extraq_on(inf->exec_domain, EXTRA_PEN_Q))
+ if (extraq_on(inf->vcpu, EXTRA_PEN_Q))
/*remove domain for possible resorting!*/
- extraq_del(inf->exec_domain, EXTRA_PEN_Q);
+ extraq_del(inf->vcpu, EXTRA_PEN_Q);
else
/*remember that we want to be on the penalty q
so that we can continue when we (un-)block
@@ -943,14 +943,14 @@ static inline void unblock_short_extra_support (struct sedf_edom_info* inf,
inf->status |= EXTRA_WANT_PEN_Q;
/*(re-)add domain to the penalty extraq*/
- extraq_add_sort_update(inf->exec_domain,
+ extraq_add_sort_update(inf->vcpu,
EXTRA_PEN_Q, 0);
}
}
/*give it a fresh slice in the next period!*/
inf->cputime = 0;
}
-static inline void unblock_long_vcons(struct sedf_edom_info* inf, s_time_t now)
+static inline void unblock_long_vcons(struct sedf_vcpu_info* inf, s_time_t now)
{
/* align to next future period */
inf->deadl_abs += (DIV_UP(now - inf->deadl_abs, inf->period) +1)
@@ -958,7 +958,7 @@ static inline void unblock_long_vcons(struct sedf_edom_info* inf, s_time_t now)
inf->cputime = 0;
}
-static inline void unblock_long_cons_a (struct sedf_edom_info* inf,
+static inline void unblock_long_cons_a (struct sedf_vcpu_info* inf,
s_time_t now) {
/*treat the time the domain was blocked in the
CURRENT period as consumed by the domain*/
@@ -969,13 +969,13 @@ static inline void unblock_long_cons_a (struct sedf_edom_info* inf,
unblock_long_vcons(inf, now);
}
}
-static inline void unblock_long_cons_b(struct sedf_edom_info* inf,s_time_t now) {
+static inline void unblock_long_cons_b(struct sedf_vcpu_info* inf,s_time_t now) {
/*Conservative 2b*/
/*Treat the unblocking time as a start of a new period */
inf->deadl_abs = now + inf->period;
inf->cputime = 0;
}
-static inline void unblock_long_cons_c(struct sedf_edom_info* inf,s_time_t now) {
+static inline void unblock_long_cons_c(struct sedf_vcpu_info* inf,s_time_t now) {
if (likely(inf->latency)) {
/*scale the slice and period accordingly to the latency hint*/
/*reduce period temporarily to the latency hint*/
@@ -995,7 +995,7 @@ static inline void unblock_long_cons_c(struct sedf_edom_info* inf,s_time_t now)
}
}
/*a new idea of dealing with short blocks: burst period scaling*/
-static inline void unblock_short_burst(struct sedf_edom_info* inf, s_time_t now)
+static inline void unblock_short_burst(struct sedf_vcpu_info* inf, s_time_t now)
{
/*treat blocked time as consumed by the domain*/
inf->cputime += now - inf->block_abs;
@@ -1035,7 +1035,7 @@ static inline void unblock_short_burst(struct sedf_edom_info* inf, s_time_t now)
}
inf->unblock_abs = now;
}
-static inline void unblock_long_burst(struct sedf_edom_info* inf, s_time_t now) {
+static inline void unblock_long_burst(struct sedf_vcpu_info* inf, s_time_t now) {
if (unlikely(inf->latency && (inf->period > inf->latency))) {
/*scale the slice and period accordingly to the latency hint*/
inf->period = inf->latency;
@@ -1062,8 +1062,8 @@ static inline void unblock_long_burst(struct sedf_edom_info* inf, s_time_t now)
#define DOMAIN_EXTRA_PEN 2
#define DOMAIN_EXTRA_UTIL 3
#define DOMAIN_IDLE 4
-static inline int get_run_type(struct exec_domain* d) {
- struct sedf_edom_info* inf = EDOM_INFO(d);
+static inline int get_run_type(struct vcpu* d) {
+ struct sedf_vcpu_info* inf = EDOM_INFO(d);
if (is_idle_task(d->domain))
return DOMAIN_IDLE;
if (inf->status & EXTRA_RUN_PEN)
@@ -1081,9 +1081,9 @@ static inline int get_run_type(struct exec_domain* d) {
In the same class priorities are assigned as following:
EDF: early deadline > late deadline
L0 extra-time: lower score > higher score*/
-static inline int should_switch(struct exec_domain* cur,
- struct exec_domain* other, s_time_t now) {
- struct sedf_edom_info *cur_inf, *other_inf;
+static inline int should_switch(struct vcpu* cur,
+ struct vcpu* other, s_time_t now) {
+ struct sedf_vcpu_info *cur_inf, *other_inf;
cur_inf = EDOM_INFO(cur);
other_inf = EDOM_INFO(other);
@@ -1115,9 +1115,9 @@ static inline int should_switch(struct exec_domain* cur,
}
return 1;
}
-void sedf_wake(struct exec_domain *d) {
+void sedf_wake(struct vcpu *d) {
s_time_t now = NOW();
- struct sedf_edom_info* inf = EDOM_INFO(d);
+ struct sedf_vcpu_info* inf = EDOM_INFO(d);
PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
@@ -1231,7 +1231,7 @@ void sedf_wake(struct exec_domain *d) {
}
/*Print a lot of use-{full, less} information about a domains in the system*/
-static void sedf_dump_domain(struct exec_domain *d) {
+static void sedf_dump_domain(struct vcpu *d) {
printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id,
test_bit(_VCPUF_running, &d->vcpu_flags) ? 'T':'F');
printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu c=%"PRIu64" sc=%i xtr(%s)=%"PRIu64" ew=%hu",
@@ -1267,9 +1267,9 @@ static void sedf_dump_domain(struct exec_domain *d) {
static void sedf_dump_cpu_state(int i)
{
struct list_head *list, *queue, *tmp;
- struct sedf_edom_info *d_inf;
+ struct sedf_vcpu_info *d_inf;
struct domain *d;
- struct exec_domain *ed;
+ struct vcpu *ed;
int loop = 0;
printk("now=%"PRIu64"\n",NOW());
@@ -1278,8 +1278,8 @@ static void sedf_dump_cpu_state(int i)
(unsigned long) queue->next, (unsigned long) queue->prev);
list_for_each_safe ( list, tmp, queue ) {
printk("%3d: ",loop++);
- d_inf = list_entry(list, struct sedf_edom_info, list);
- sedf_dump_domain(d_inf->exec_domain);
+ d_inf = list_entry(list, struct sedf_vcpu_info, list);
+ sedf_dump_domain(d_inf->vcpu);
}
queue = WAITQ(i); loop = 0;
@@ -1287,8 +1287,8 @@ static void sedf_dump_cpu_state(int i)
(unsigned long) queue->next, (unsigned long) queue->prev);
list_for_each_safe ( list, tmp, queue ) {
printk("%3d: ",loop++);
- d_inf = list_entry(list, struct sedf_edom_info, list);
- sedf_dump_domain(d_inf->exec_domain);
+ d_inf = list_entry(list, struct sedf_vcpu_info, list);
+ sedf_dump_domain(d_inf->vcpu);
}
queue = EXTRAQ(i,EXTRA_PEN_Q); loop = 0;
@@ -1296,10 +1296,10 @@ static void sedf_dump_cpu_state(int i)
(unsigned long)queue, (unsigned long) queue->next,
(unsigned long) queue->prev);
list_for_each_safe ( list, tmp, queue ) {
- d_inf = list_entry(list, struct sedf_edom_info,
+ d_inf = list_entry(list, struct sedf_vcpu_info,
extralist[EXTRA_PEN_Q]);
printk("%3d: ",loop++);
- sedf_dump_domain(d_inf->exec_domain);
+ sedf_dump_domain(d_inf->vcpu);
}
queue = EXTRAQ(i,EXTRA_UTIL_Q); loop = 0;
@@ -1307,16 +1307,16 @@ static void sedf_dump_cpu_state(int i)
(unsigned long)queue, (unsigned long) queue->next,
(unsigned long) queue->prev);
list_for_each_safe ( list, tmp, queue ) {
- d_inf = list_entry(list, struct sedf_edom_info,
+ d_inf = list_entry(list, struct sedf_vcpu_info,
extralist[EXTRA_UTIL_Q]);
printk("%3d: ",loop++);
- sedf_dump_domain(d_inf->exec_domain);
+ sedf_dump_domain(d_inf->vcpu);
}
loop = 0;
printk("\nnot on Q\n");
for_each_domain(d)
- for_each_exec_domain(d, ed)
+ for_each_vcpu(d, ed)
{
if (!__task_on_queue(ed) && (ed->processor == i)) {
printk("%3d: ",loop++);
@@ -1326,7 +1326,7 @@ static void sedf_dump_cpu_state(int i)
}
/*Adjusts periods and slices of the domains accordingly to their weights*/
static inline int sedf_adjust_weights(struct sched_adjdom_cmd *cmd) {
- struct exec_domain *p;
+ struct vcpu *p;
struct domain *d;
int sumw[NR_CPUS];
s_time_t sumt[NR_CPUS];
@@ -1338,7 +1338,7 @@ static inline int sedf_adjust_weights(struct sched_adjdom_cmd *cmd) {
}
/*sum up all weights*/
for_each_domain(d)
- for_each_exec_domain(d, p) {
+ for_each_vcpu(d, p) {
if (EDOM_INFO(p)->weight)
sumw[p->processor] += EDOM_INFO(p)->weight;
else {
@@ -1356,7 +1356,7 @@ static inline int sedf_adjust_weights(struct sched_adjdom_cmd *cmd) {
}
/*adjust all slices (and periods) to the new weight*/
for_each_domain(d)
- for_each_exec_domain(d, p) {
+ for_each_vcpu(d, p) {
if (EDOM_INFO(p)->weight) {
EDOM_INFO(p)->period_orig =
EDOM_INFO(p)->period = WEIGHT_PERIOD;
@@ -1372,7 +1372,7 @@ static inline int sedf_adjust_weights(struct sched_adjdom_cmd *cmd) {
/* set or fetch domain scheduling parameters */
static int sedf_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) {
- struct exec_domain *ed;
+ struct vcpu *v;
PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "\
"new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
@@ -1387,51 +1387,51 @@ static int sedf_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) {
if ((cmd->u.sedf.extratime & EXTRA_AWARE) &&
(! cmd->u.sedf.period)) {
/*weight driven domains with xtime ONLY!*/
- for_each_exec_domain(p, ed) {
- EDOM_INFO(ed)->extraweight = cmd->u.sedf.weight;
- EDOM_INFO(ed)->weight = 0;
- EDOM_INFO(ed)->slice = 0;
- EDOM_INFO(ed)->period = WEIGHT_PERIOD;
+ for_each_vcpu(p, v) {
+ EDOM_INFO(v)->extraweight = cmd->u.sedf.weight;
+ EDOM_INFO(v)->weight = 0;
+ EDOM_INFO(v)->slice = 0;
+ EDOM_INFO(v)->period = WEIGHT_PERIOD;
}
} else {
/*weight driven domains with real-time execution*/
- for_each_exec_domain(p, ed)
- EDOM_INFO(ed)->weight = cmd->u.sedf.weight;
+ for_each_vcpu(p, v)
+ EDOM_INFO(v)->weight = cmd->u.sedf.weight;
}
}
else {
/*time driven domains*/
- for_each_exec_domain(p, ed) {
+ for_each_vcpu(p, v) {
/* sanity checking! */
if(cmd->u.sedf.slice > cmd->u.sedf.period )
return -EINVAL;
- EDOM_INFO(ed)->weight = 0;
- EDOM_INFO(ed)->extraweight = 0;
- EDOM_INFO(ed)->period_orig =
- EDOM_INFO(ed)->period = cmd->u.sedf.period;
- EDOM_INFO(ed)->slice_orig =
- EDOM_INFO(ed)->slice = cmd->u.sedf.slice;
+ EDOM_INFO(v)->weight = 0;
+ EDOM_INFO(v)->extraweight = 0;
+ EDOM_INFO(v)->period_orig =
+ EDOM_INFO(v)->period = cmd->u.sedf.period;
+ EDOM_INFO(v)->slice_orig =
+ EDOM_INFO(v)->slice = cmd->u.sedf.slice;
}
}
if (sedf_adjust_weights(cmd))
return -EINVAL;
- for_each_exec_domain(p, ed) {
- EDOM_INFO(ed)->status =
- (EDOM_INFO(ed)->status &
+ for_each_vcpu(p, v) {
+ EDOM_INFO(v)->status =
+ (EDOM_INFO(v)->status &
~EXTRA_AWARE) | (cmd->u.sedf.extratime & EXTRA_AWARE);
- EDOM_INFO(ed)->latency = cmd->u.sedf.latency;
- extraq_check(ed);
+ EDOM_INFO(v)->latency = cmd->u.sedf.latency;
+ extraq_check(v);
}
}
else if ( cmd->direction == SCHED_INFO_GET )
{
- cmd->u.sedf.period = EDOM_INFO(p->exec_domain[0])->period;
- cmd->u.sedf.slice = EDOM_INFO(p->exec_domain[0])->slice;
- cmd->u.sedf.extratime = EDOM_INFO(p->exec_domain[0])->status
+ cmd->u.sedf.period = EDOM_INFO(p->vcpu[0])->period;
+ cmd->u.sedf.slice = EDOM_INFO(p->vcpu[0])->slice;
+ cmd->u.sedf.extratime = EDOM_INFO(p->vcpu[0])->status
& EXTRA_AWARE;
- cmd->u.sedf.latency = EDOM_INFO(p->exec_domain[0])->latency;
- cmd->u.sedf.weight = EDOM_INFO(p->exec_domain[0])->weight;
+ cmd->u.sedf.latency = EDOM_INFO(p->vcpu[0])->latency;
+ cmd->u.sedf.weight = EDOM_INFO(p->vcpu[0])->weight;
}
PRINT(2,"sedf_adjdom_finished\n");
return 0;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 7707ea03af..b98dcc6456 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -83,57 +83,57 @@ void free_domain_struct(struct domain *d)
SCHED_OP(free_task, d);
for (i = 0; i < MAX_VIRT_CPUS; i++)
- if ( d->exec_domain[i] )
- arch_free_exec_domain_struct(d->exec_domain[i]);
+ if ( d->vcpu[i] )
+ arch_free_vcpu_struct(d->vcpu[i]);
xfree(d);
}
-struct exec_domain *alloc_exec_domain_struct(
+struct vcpu *alloc_vcpu_struct(
struct domain *d, unsigned long vcpu)
{
- struct exec_domain *ed, *edc;
+ struct vcpu *v, *vc;
- ASSERT( d->exec_domain[vcpu] == NULL );
+ ASSERT( d->vcpu[vcpu] == NULL );
- if ( (ed = arch_alloc_exec_domain_struct()) == NULL )
+ if ( (v = arch_alloc_vcpu_struct()) == NULL )
return NULL;
- memset(ed, 0, sizeof(*ed));
+ memset(v, 0, sizeof(*v));
- d->exec_domain[vcpu] = ed;
- ed->domain = d;
- ed->vcpu_id = vcpu;
+ d->vcpu[vcpu] = v;
+ v->domain = d;
+ v->vcpu_id = vcpu;
- if ( SCHED_OP(alloc_task, ed) < 0 )
+ if ( SCHED_OP(alloc_task, v) < 0 )
goto out;
if ( vcpu != 0 )
{
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->vcpu_id];
+ v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id];
- for_each_exec_domain( d, edc )
+ for_each_vcpu( d, vc )
{
- if ( (edc->next_in_list == NULL) ||
- (edc->next_in_list->vcpu_id > vcpu) )
+ if ( (vc->next_in_list == NULL) ||
+ (vc->next_in_list->vcpu_id > vcpu) )
break;
}
- ed->next_in_list = edc->next_in_list;
- edc->next_in_list = ed;
+ v->next_in_list = vc->next_in_list;
+ vc->next_in_list = v;
- if (test_bit(_VCPUF_cpu_pinned, &edc->vcpu_flags)) {
- ed->processor = (edc->processor + 1) % num_online_cpus();
- set_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
+ if (test_bit(_VCPUF_cpu_pinned, &vc->vcpu_flags)) {
+ v->processor = (vc->processor + 1) % num_online_cpus();
+ set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
} else {
- ed->processor = (edc->processor + 1) % num_online_cpus();
+ v->processor = (vc->processor + 1) % num_online_cpus();
}
}
- return ed;
+ return v;
out:
- d->exec_domain[vcpu] = NULL;
- arch_free_exec_domain_struct(ed);
+ d->vcpu[vcpu] = NULL;
+ arch_free_vcpu_struct(v);
return NULL;
}
@@ -147,7 +147,7 @@ struct domain *alloc_domain_struct(void)
memset(d, 0, sizeof(*d));
- if ( alloc_exec_domain_struct(d, 0) == NULL )
+ if ( alloc_vcpu_struct(d, 0) == NULL )
goto out;
return d;
@@ -160,92 +160,92 @@ struct domain *alloc_domain_struct(void)
/*
* Add and remove a domain
*/
-void sched_add_domain(struct exec_domain *ed)
+void sched_add_domain(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
/* Initialise the per-domain timer. */
- init_ac_timer(&ed->timer, dom_timer_fn, ed, ed->processor);
+ init_ac_timer(&v->timer, dom_timer_fn, v, v->processor);
if ( is_idle_task(d) )
{
- schedule_data[ed->processor].curr = ed;
- schedule_data[ed->processor].idle = ed;
- set_bit(_VCPUF_running, &ed->vcpu_flags);
+ schedule_data[v->processor].curr = v;
+ schedule_data[v->processor].idle = v;
+ set_bit(_VCPUF_running, &v->vcpu_flags);
}
else
{
/* Must be unpaused by control software to start execution. */
- set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags);
+ set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
}
- SCHED_OP(add_task, ed);
- TRACE_2D(TRC_SCHED_DOM_ADD, d->domain_id, ed->vcpu_id);
+ SCHED_OP(add_task, v);
+ TRACE_2D(TRC_SCHED_DOM_ADD, d->domain_id, v->vcpu_id);
}
-void sched_rem_domain(struct exec_domain *ed)
+void sched_rem_domain(struct vcpu *v)
{
- rem_ac_timer(&ed->timer);
- SCHED_OP(rem_task, ed);
- TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->domain_id, ed->vcpu_id);
+ rem_ac_timer(&v->timer);
+ SCHED_OP(rem_task, v);
+ TRACE_2D(TRC_SCHED_DOM_REM, v->domain->domain_id, v->vcpu_id);
}
-void domain_sleep_nosync(struct exec_domain *ed)
+void domain_sleep_nosync(struct vcpu *v)
{
unsigned long flags;
- spin_lock_irqsave(&schedule_data[ed->processor].schedule_lock, flags);
- if ( likely(!domain_runnable(ed)) )
- SCHED_OP(sleep, ed);
- spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
+ spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags);
+ if ( likely(!domain_runnable(v)) )
+ SCHED_OP(sleep, v);
+ spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
- TRACE_2D(TRC_SCHED_SLEEP, ed->domain->domain_id, ed->vcpu_id);
+ TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id);
}
-void domain_sleep_sync(struct exec_domain *ed)
+void domain_sleep_sync(struct vcpu *v)
{
- domain_sleep_nosync(ed);
+ domain_sleep_nosync(v);
- while ( test_bit(_VCPUF_running, &ed->vcpu_flags) && !domain_runnable(ed) )
+ while ( test_bit(_VCPUF_running, &v->vcpu_flags) && !domain_runnable(v) )
cpu_relax();
- sync_lazy_execstate_cpuset(ed->domain->cpuset & (1UL << ed->processor));
+ sync_lazy_execstate_cpuset(v->domain->cpuset & (1UL << v->processor));
}
-void domain_wake(struct exec_domain *ed)
+void domain_wake(struct vcpu *v)
{
unsigned long flags;
- spin_lock_irqsave(&schedule_data[ed->processor].schedule_lock, flags);
- if ( likely(domain_runnable(ed)) )
+ spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags);
+ if ( likely(domain_runnable(v)) )
{
- SCHED_OP(wake, ed);
+ SCHED_OP(wake, v);
#ifdef WAKE_HISTO
- ed->wokenup = NOW();
+ v->wokenup = NOW();
#endif
}
- clear_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags);
- spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
+ clear_bit(_VCPUF_cpu_migrated, &v->vcpu_flags);
+ spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags);
- TRACE_2D(TRC_SCHED_WAKE, ed->domain->domain_id, ed->vcpu_id);
+ TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id);
}
/* Block the currently-executing domain until a pertinent event occurs. */
long do_block(void)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
- ed->vcpu_info->evtchn_upcall_mask = 0;
- set_bit(_VCPUF_blocked, &ed->vcpu_flags);
+ v->vcpu_info->evtchn_upcall_mask = 0;
+ set_bit(_VCPUF_blocked, &v->vcpu_flags);
/* Check for events /after/ blocking: avoids wakeup waiting race. */
- if ( event_pending(ed) )
+ if ( event_pending(v) )
{
- clear_bit(_VCPUF_blocked, &ed->vcpu_flags);
+ clear_bit(_VCPUF_blocked, &v->vcpu_flags);
}
else
{
- TRACE_2D(TRC_SCHED_BLOCK, ed->domain->domain_id, ed->vcpu_id);
+ TRACE_2D(TRC_SCHED_BLOCK, v->domain->domain_id, v->vcpu_id);
__enter_scheduler();
}
@@ -300,12 +300,12 @@ long do_sched_op(unsigned long op)
/* Per-domain one-shot-timer hypercall. */
long do_set_timer_op(s_time_t timeout)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
if ( timeout == 0 )
- rem_ac_timer(&ed->timer);
+ rem_ac_timer(&v->timer);
else
- set_ac_timer(&ed->timer, timeout);
+ set_ac_timer(&v->timer, timeout);
return 0;
}
@@ -331,7 +331,7 @@ long sched_ctl(struct sched_ctl_cmd *cmd)
long sched_adjdom(struct sched_adjdom_cmd *cmd)
{
struct domain *d;
- struct exec_domain *ed;
+ struct vcpu *v;
int cpu;
#if NR_CPUS <=32
unsigned long have_lock;
@@ -354,12 +354,12 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd)
if ( d == NULL )
return -ESRCH;
- /* acquire locks on all CPUs on which exec_domains of this domain run */
+ /* acquire locks on all CPUs on which vcpus of this domain run */
do {
succ = 0;
__clear_cpu_bits(have_lock);
- for_each_exec_domain(d, ed) {
- cpu = ed->processor;
+ for_each_vcpu(d, v) {
+ cpu = v->processor;
if (!__get_cpu_bit(cpu, have_lock)) {
/* if we don't have a lock on this CPU: acquire it*/
if (spin_trylock(&schedule_data[cpu].schedule_lock)) {
@@ -379,9 +379,9 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd)
}
}
} while (!succ);
- //spin_lock_irq(&schedule_data[d->exec_domain[0]->processor].schedule_lock);
+ //spin_lock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock);
SCHED_OP(adjdom, d, cmd);
- //spin_unlock_irq(&schedule_data[d->exec_domain[0]->processor].schedule_lock);
+ //spin_unlock_irq(&schedule_data[d->vcpu[0]->processor].schedule_lock);
for (cpu = 0; cpu < NR_CPUS; cpu++)
if (__get_cpu_bit(cpu, have_lock))
spin_unlock(&schedule_data[cpu].schedule_lock);
@@ -399,7 +399,7 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd)
*/
static void __enter_scheduler(void)
{
- struct exec_domain *prev = current, *next = NULL;
+ struct vcpu *prev = current, *next = NULL;
int cpu = prev->processor;
s_time_t now;
struct task_slice next_slice;
@@ -477,7 +477,7 @@ static void __enter_scheduler(void)
/* No locking needed -- pointer comparison is safe :-) */
int idle_cpu(int cpu)
{
- struct exec_domain *p = schedule_data[cpu].curr;
+ struct vcpu *p = schedule_data[cpu].curr;
return p == idle_task[cpu];
}
@@ -499,15 +499,15 @@ static void s_timer_fn(void *unused)
/* Periodic tick timer: send timer event to current domain */
static void t_timer_fn(void *unused)
{
- struct exec_domain *ed = current;
- unsigned int cpu = ed->processor;
+ struct vcpu *v = current;
+ unsigned int cpu = v->processor;
schedule_data[cpu].tick++;
- if ( !is_idle_task(ed->domain) )
+ if ( !is_idle_task(v->domain) )
{
- update_dom_time(ed);
- send_guest_virq(ed, VIRQ_TIMER);
+ update_dom_time(v);
+ send_guest_virq(v, VIRQ_TIMER);
}
page_scrub_schedule_work();
@@ -518,10 +518,10 @@ static void t_timer_fn(void *unused)
/* Domain timer function, sends a virtual timer interrupt to domain */
static void dom_timer_fn(void *data)
{
- struct exec_domain *ed = data;
+ struct vcpu *v = data;
- update_dom_time(ed);
- send_guest_virq(ed, VIRQ_TIMER);
+ update_dom_time(v);
+ send_guest_virq(v, VIRQ_TIMER);
}
/* Initialise the data structures. */
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index de106c149e..6f6234c8d0 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -271,7 +271,7 @@ static void __serial_rx(char c, struct cpu_user_regs *regs)
if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE )
serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c;
/* Always notify the guest: prevents receive path from getting stuck. */
- send_guest_virq(dom0->exec_domain[0], VIRQ_CONSOLE);
+ send_guest_virq(dom0->vcpu[0], VIRQ_CONSOLE);
}
static void serial_rx(char c, struct cpu_user_regs *regs)
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
index 34603f5418..c419adf8be 100644
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -107,13 +107,13 @@ struct page;
// initial task has a different name in Xen
//#define idle0_task init_task
-#define idle0_exec_domain init_task
+#define idle0_vcpu init_task
// avoid redefining task_t in asm/thread_info.h
#define task_t struct domain
// avoid redefining task_struct in asm/current.h
-#define task_struct exec_domain
+#define task_struct vcpu
// linux/include/asm-ia64/machvec.h (linux/arch/ia64/lib/io.c)
#define platform_inb __ia64_inb
diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
index 25d580431e..3365e1e08c 100644
--- a/xen/include/asm-ia64/domain.h
+++ b/xen/include/asm-ia64/domain.h
@@ -8,10 +8,10 @@
#include <asm/regionreg.h>
#endif // CONFIG_VTI
-extern void arch_do_createdomain(struct exec_domain *);
+extern void arch_do_createdomain(struct vcpu *);
extern int arch_final_setup_guestos(
- struct exec_domain *, struct vcpu_guest_context *);
+ struct vcpu *, struct vcpu_guest_context *);
extern void domain_relinquish_resources(struct domain *);
@@ -55,7 +55,7 @@ struct arch_domain {
#define xen_vaend arch.xen_vaend
#define shared_info_va arch.shared_info_va
-struct arch_exec_domain {
+struct arch_vcpu {
#if 1
TR_ENTRY itrs[NITRS];
TR_ENTRY dtrs[NDTRS];
@@ -77,7 +77,7 @@ struct arch_exec_domain {
struct mm_struct *active_mm;
struct thread_struct _thread; // this must be last
#ifdef CONFIG_VTI
- void (*schedule_tail) (struct exec_domain *);
+ void (*schedule_tail) (struct vcpu *);
struct trap_bounce trap_bounce;
thash_cb_t *vtlb;
//for phycial emulation
diff --git a/xen/include/asm-ia64/vcpu.h b/xen/include/asm-ia64/vcpu.h
index bb24bad2b2..d3ef4c229b 100644
--- a/xen/include/asm-ia64/vcpu.h
+++ b/xen/include/asm-ia64/vcpu.h
@@ -10,8 +10,8 @@
typedef unsigned long UINT64;
typedef unsigned int UINT;
typedef int BOOLEAN;
-struct exec_domain;
-typedef struct exec_domain VCPU;
+struct vcpu;
+typedef struct vcpu VCPU;
// NOTE: The actual VCPU structure (struct virtualcpu) is defined in
// thread.h. Moving it to here caused a lot of files to change, so
diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h
index 717a7e809b..cee7d89a90 100644
--- a/xen/include/asm-ia64/vmmu.h
+++ b/xen/include/asm-ia64/vmmu.h
@@ -155,7 +155,7 @@ typedef u64 *(TTAG_FN)(PTA pta, u64 va, u64 rid, u64 ps);
typedef u64 *(GET_MFN_FN)(domid_t d, u64 gpfn, u64 pages);
typedef void *(REM_NOTIFIER_FN)(struct hash_cb *hcb, thash_data_t *entry);
typedef void (RECYCLE_FN)(struct hash_cb *hc, u64 para);
-typedef rr_t (GET_RR_FN)(struct exec_domain *vcpu, u64 reg);
+typedef rr_t (GET_RR_FN)(struct vcpu *vcpu, u64 reg);
typedef thash_data_t *(FIND_OVERLAP_FN)(struct thash_cb *hcb,
u64 va, u64 ps, int rid, char cl, search_section_t s_sect);
typedef thash_data_t *(FIND_NEXT_OVL_FN)(struct thash_cb *hcb);
@@ -204,7 +204,7 @@ typedef struct thash_cb {
GET_RR_FN *get_rr_fn;
RECYCLE_FN *recycle_notifier;
thash_cch_mem_t *cch_freelist;
- struct exec_domain *vcpu;
+ struct vcpu *vcpu;
PTA pta;
/* VTLB/VHPT common information */
FIND_OVERLAP_FN *find_overlap;
@@ -306,7 +306,7 @@ extern void thash_purge_entries_ex(thash_cb_t *hcb,
u64 rid, u64 va, u64 sz,
search_section_t p_sect,
CACHE_LINE_TYPE cl);
-extern thash_cb_t *init_domain_tlb(struct exec_domain *d);
+extern thash_cb_t *init_domain_tlb(struct vcpu *d);
/*
* Purge all TCs or VHPT entries including those in Hash table.
@@ -330,8 +330,8 @@ extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
extern u64 machine_ttag(PTA pta, u64 va, u64 rid, u64 ps);
extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps);
extern void purge_machine_tc_by_domid(domid_t domid);
-extern void machine_tlb_insert(struct exec_domain *d, thash_data_t *tlb);
-extern rr_t vmmu_get_rr(struct exec_domain *vcpu, u64 va);
+extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
+extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
#define VTLB_DEBUG
#ifdef VTLB_DEBUG
diff --git a/xen/include/asm-ia64/vmx.h b/xen/include/asm-ia64/vmx.h
index e7a99959ca..3610bd5cd6 100644
--- a/xen/include/asm-ia64/vmx.h
+++ b/xen/include/asm-ia64/vmx.h
@@ -29,10 +29,10 @@ extern unsigned int vmx_enabled;
extern void vmx_init_env(void);
extern void vmx_final_setup_domain(struct domain *d);
extern void vmx_init_double_mapping_stub(void);
-extern void vmx_save_state(struct exec_domain *ed);
-extern void vmx_load_state(struct exec_domain *ed);
+extern void vmx_save_state(struct vcpu *v);
+extern void vmx_load_state(struct vcpu *v);
extern vmx_insert_double_mapping(u64,u64,u64,u64,u64);
extern void vmx_purge_double_mapping(u64, u64, u64);
-extern void vmx_change_double_mapping(struct exec_domain *ed, u64 oldrr7, u64 newrr7);
+extern void vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7);
#endif /* _ASM_IA64_VT_H */
diff --git a/xen/include/asm-ia64/vmx_vpd.h b/xen/include/asm-ia64/vmx_vpd.h
index fea1cc21ea..78149ba31f 100644
--- a/xen/include/asm-ia64/vmx_vpd.h
+++ b/xen/include/asm-ia64/vmx_vpd.h
@@ -113,9 +113,9 @@ typedef struct vpd {
void vmx_enter_scheduler(void);
-//FIXME: Map for LID to exec_domain, Eddie
+//FIXME: Map for LID to vcpu, Eddie
#define MAX_NUM_LPS (1UL<<16)
-extern struct exec_domain *lid_edt[MAX_NUM_LPS];
+extern struct vcpu *lid_edt[MAX_NUM_LPS];
struct arch_vmx_struct {
// struct virutal_platform_def vmx_platform;
diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
index 6d5cd22132..7916b57ae9 100644
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -11,12 +11,12 @@
#include <public/xen.h>
#include <asm/page.h>
-struct exec_domain;
+struct vcpu;
struct cpu_info {
struct cpu_user_regs guest_cpu_user_regs;
unsigned int processor_id;
- struct exec_domain *current_ed;
+ struct vcpu *current_ed;
};
static inline struct cpu_info *get_cpu_info(void)
diff --git a/xen/include/asm-x86/debugger.h b/xen/include/asm-x86/debugger.h
index cff37a83a8..34ff5bdddc 100644
--- a/xen/include/asm-x86/debugger.h
+++ b/xen/include/asm-x86/debugger.h
@@ -60,7 +60,7 @@ static inline int debugger_trap_fatal(
static inline int debugger_trap_entry(
unsigned int vector, struct cpu_user_regs *regs)
{
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
if ( !KERNEL_MODE(ed, regs) || (ed->domain->domain_id == 0) )
return 0;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 7c9aa59c62..397b65d031 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -66,13 +66,13 @@ struct arch_domain
} __cacheline_aligned;
-struct arch_exec_domain
+struct arch_vcpu
{
struct vcpu_guest_context guest_context;
unsigned long flags; /* TF_ */
- void (*schedule_tail) (struct exec_domain *);
+ void (*schedule_tail) (struct vcpu *);
/* Bounce information for propagating an exception to guest OS. */
struct trap_bounce trap_bounce;
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index a1f0feaec0..f9216605ed 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -15,8 +15,8 @@
#include <asm/processor.h>
extern void init_fpu(void);
-extern void save_init_fpu(struct exec_domain *tsk);
-extern void restore_fpu(struct exec_domain *tsk);
+extern void save_init_fpu(struct vcpu *tsk);
+extern void restore_fpu(struct vcpu *tsk);
#define unlazy_fpu(_tsk) do { \
if ( test_bit(_VCPUF_fpu_dirtied, &(_tsk)->vcpu_flags) ) \
@@ -29,12 +29,12 @@ extern void restore_fpu(struct exec_domain *tsk);
} while ( 0 )
/* Make domain the FPU owner */
-static inline void setup_fpu(struct exec_domain *ed)
+static inline void setup_fpu(struct vcpu *v)
{
- if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &ed->vcpu_flags) )
+ if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
{
- if ( test_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags) )
- restore_fpu(ed);
+ if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
+ restore_fpu(v);
else
init_fpu();
}
diff --git a/xen/include/asm-x86/ldt.h b/xen/include/asm-x86/ldt.h
index 33de175165..8288ffa118 100644
--- a/xen/include/asm-x86/ldt.h
+++ b/xen/include/asm-x86/ldt.h
@@ -4,13 +4,13 @@
#ifndef __ASSEMBLY__
-static inline void load_LDT(struct exec_domain *ed)
+static inline void load_LDT(struct vcpu *v)
{
unsigned int cpu;
struct desc_struct *desc;
unsigned long ents;
- if ( (ents = ed->arch.guest_context.ldt_ents) == 0 )
+ if ( (ents = v->arch.guest_context.ldt_ents) == 0 )
{
__asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
}
@@ -18,9 +18,9 @@ static inline void load_LDT(struct exec_domain *ed)
{
cpu = smp_processor_id();
desc = gdt_table + __LDT(cpu) - FIRST_RESERVED_GDT_ENTRY;
- desc->a = ((LDT_VIRT_START(ed)&0xffff)<<16) | (ents*8-1);
- desc->b = (LDT_VIRT_START(ed)&(0xff<<24)) | 0x8200 |
- ((LDT_VIRT_START(ed)&0xff0000)>>16);
+ desc->a = ((LDT_VIRT_START(v)&0xffff)<<16) | (ents*8-1);
+ desc->b = (LDT_VIRT_START(v)&(0xff<<24)) | 0x8200 |
+ ((LDT_VIRT_START(v)&0xff0000)>>16);
__asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
}
}
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 5c7f91f127..1cc4ac2b1d 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -146,7 +146,7 @@ void init_frametable(void);
int alloc_page_type(struct pfn_info *page, unsigned int type);
void free_page_type(struct pfn_info *page, unsigned int type);
-extern void invalidate_shadow_ldt(struct exec_domain *d);
+extern void invalidate_shadow_ldt(struct vcpu *d);
extern int shadow_remove_all_write_access(
struct domain *d, unsigned long gpfn, unsigned long gmfn);
extern u32 shadow_remove_all_access( struct domain *d, unsigned long gmfn);
@@ -299,7 +299,7 @@ struct ptwr_info {
/* Info about last ptwr update batch. */
unsigned int prev_nr_updates;
/* Exec domain which created writable mapping. */
- struct exec_domain *ed;
+ struct vcpu *vcpu;
};
#define PTWR_PT_ACTIVE 0
@@ -348,5 +348,5 @@ void propagate_page_fault(unsigned long addr, u16 error_code);
int update_grant_va_mapping(unsigned long va,
l1_pgentry_t _nl1e,
struct domain *d,
- struct exec_domain *ed);
+ struct vcpu *v);
#endif /* __ASM_X86_MM_H__ */
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 5e53d48860..bec90dbab0 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -125,14 +125,14 @@
#define TBF_INTERRUPT 8
#define TBF_FAILSAFE 16
-/* 'arch_exec_domain' flags values */
+/* 'arch_vcpu' flags values */
#define _TF_kernel_mode 0
#define TF_kernel_mode (1<<_TF_kernel_mode)
#ifndef __ASSEMBLY__
struct domain;
-struct exec_domain;
+struct vcpu;
/*
* Default implementation of macro that returns current
@@ -401,7 +401,7 @@ extern struct tss_struct init_tss[NR_CPUS];
#ifdef CONFIG_X86_32
-extern void init_int80_direct_trap(struct exec_domain *ed);
+extern void init_int80_direct_trap(struct vcpu *v);
#define set_int80_direct_trap(_ed) \
(memcpy(idt_tables[(_ed)->processor] + 0x80, \
&((_ed)->arch.int80_desc), 8))
@@ -415,14 +415,14 @@ extern void init_int80_direct_trap(struct exec_domain *ed);
extern int gpf_emulate_4gb(struct cpu_user_regs *regs);
-extern void write_ptbase(struct exec_domain *ed);
+extern void write_ptbase(struct vcpu *v);
-void destroy_gdt(struct exec_domain *d);
-long set_gdt(struct exec_domain *d,
+void destroy_gdt(struct vcpu *d);
+long set_gdt(struct vcpu *d,
unsigned long *frames,
unsigned int entries);
-long set_debugreg(struct exec_domain *p, int reg, unsigned long value);
+long set_debugreg(struct vcpu *p, int reg, unsigned long value);
struct microcode_header {
unsigned int hdrver;
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index d9eb792786..6099a6e8a3 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -59,7 +59,7 @@
#define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
#define __shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
(SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
-#define shadow_linear_l2_table(_ed) ((_ed)->arch.shadow_vtable)
+#define shadow_linear_l2_table(_v) ((_v)->arch.shadow_vtable)
// easy access to the hl2 table (for translated but not external modes only)
#define __linear_hl2_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START + \
@@ -113,12 +113,12 @@ extern void shadow_mode_init(void);
extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs);
extern int shadow_mode_enable(struct domain *p, unsigned int mode);
-extern void shadow_invlpg(struct exec_domain *, unsigned long);
+extern void shadow_invlpg(struct vcpu *, unsigned long);
extern struct out_of_sync_entry *shadow_mark_mfn_out_of_sync(
- struct exec_domain *ed, unsigned long gpfn, unsigned long mfn);
-extern void free_monitor_pagetable(struct exec_domain *ed);
+ struct vcpu *v, unsigned long gpfn, unsigned long mfn);
+extern void free_monitor_pagetable(struct vcpu *v);
extern void __shadow_sync_all(struct domain *d);
-extern int __shadow_out_of_sync(struct exec_domain *ed, unsigned long va);
+extern int __shadow_out_of_sync(struct vcpu *v, unsigned long va);
extern int set_p2m_entry(
struct domain *d, unsigned long pfn, unsigned long mfn,
struct map_dom_mem_cache *l2cache,
@@ -143,12 +143,12 @@ extern void shadow_l4_normal_pt_update(struct domain *d,
#endif
extern int shadow_do_update_va_mapping(unsigned long va,
l1_pgentry_t val,
- struct exec_domain *ed);
+ struct vcpu *v);
static inline unsigned long __shadow_status(
struct domain *d, unsigned long gpfn, unsigned long stype);
-static inline void update_hl2e(struct exec_domain *ed, unsigned long va);
+static inline void update_hl2e(struct vcpu *v, unsigned long va);
extern void vmx_shadow_clear_state(struct domain *);
@@ -199,23 +199,23 @@ __shadow_sync_mfn(struct domain *d, unsigned long mfn)
}
static void inline
-__shadow_sync_va(struct exec_domain *ed, unsigned long va)
+__shadow_sync_va(struct vcpu *v, unsigned long va)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
- if ( d->arch.out_of_sync && __shadow_out_of_sync(ed, va) )
+ if ( d->arch.out_of_sync && __shadow_out_of_sync(v, va) )
{
perfc_incrc(shadow_sync_va);
// XXX - could be smarter
//
- __shadow_sync_all(ed->domain);
+ __shadow_sync_all(v->domain);
}
// Also make sure the HL2 is up-to-date for this address.
//
- if ( unlikely(shadow_mode_translate(ed->domain)) )
- update_hl2e(ed, va);
+ if ( unlikely(shadow_mode_translate(v->domain)) )
+ update_hl2e(v, va);
}
static void inline
@@ -239,13 +239,13 @@ shadow_sync_all(struct domain *d)
// This probably shouldn't exist.
//
static void inline
-shadow_sync_va(struct exec_domain *ed, unsigned long gva)
+shadow_sync_va(struct vcpu *v, unsigned long gva)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
if ( unlikely(shadow_mode_enabled(d)) )
{
shadow_lock(d);
- __shadow_sync_va(ed, gva);
+ __shadow_sync_va(v, gva);
shadow_unlock(d);
}
}
@@ -506,56 +506,56 @@ static inline int mark_dirty(struct domain *d, unsigned int mfn)
static inline void
__shadow_get_l2e(
- struct exec_domain *ed, unsigned long va, l2_pgentry_t *psl2e)
+ struct vcpu *v, unsigned long va, l2_pgentry_t *psl2e)
{
- ASSERT(shadow_mode_enabled(ed->domain));
+ ASSERT(shadow_mode_enabled(v->domain));
- *psl2e = ed->arch.shadow_vtable[l2_table_offset(va)];
+ *psl2e = v->arch.shadow_vtable[l2_table_offset(va)];
}
static inline void
__shadow_set_l2e(
- struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
+ struct vcpu *v, unsigned long va, l2_pgentry_t value)
{
- ASSERT(shadow_mode_enabled(ed->domain));
+ ASSERT(shadow_mode_enabled(v->domain));
- ed->arch.shadow_vtable[l2_table_offset(va)] = value;
+ v->arch.shadow_vtable[l2_table_offset(va)] = value;
}
static inline void
__guest_get_l2e(
- struct exec_domain *ed, unsigned long va, l2_pgentry_t *pl2e)
+ struct vcpu *v, unsigned long va, l2_pgentry_t *pl2e)
{
- *pl2e = ed->arch.guest_vtable[l2_table_offset(va)];
+ *pl2e = v->arch.guest_vtable[l2_table_offset(va)];
}
static inline void
__guest_set_l2e(
- struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
+ struct vcpu *v, unsigned long va, l2_pgentry_t value)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
- ed->arch.guest_vtable[l2_table_offset(va)] = value;
+ v->arch.guest_vtable[l2_table_offset(va)] = value;
if ( unlikely(shadow_mode_translate(d)) )
- update_hl2e(ed, va);
+ update_hl2e(v, va);
if ( unlikely(shadow_mode_log_dirty(d)) )
- __mark_dirty(d, pagetable_get_pfn(ed->arch.guest_table));
+ __mark_dirty(d, pagetable_get_pfn(v->arch.guest_table));
}
static inline void
-update_hl2e(struct exec_domain *ed, unsigned long va)
+update_hl2e(struct vcpu *v, unsigned long va)
{
int index = l2_table_offset(va);
unsigned long mfn;
- l2_pgentry_t gl2e = ed->arch.guest_vtable[index];
+ l2_pgentry_t gl2e = v->arch.guest_vtable[index];
l1_pgentry_t old_hl2e, new_hl2e;
int need_flush = 0;
- ASSERT(shadow_mode_translate(ed->domain));
+ ASSERT(shadow_mode_translate(v->domain));
- old_hl2e = ed->arch.hl2_vtable[index];
+ old_hl2e = v->arch.hl2_vtable[index];
if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) &&
VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) )
@@ -568,16 +568,16 @@ update_hl2e(struct exec_domain *ed, unsigned long va)
if ( (l1e_has_changed(old_hl2e, new_hl2e, PAGE_FLAG_MASK)) )
{
if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
- !shadow_get_page(ed->domain, pfn_to_page(l1e_get_pfn(new_hl2e)),
- ed->domain) )
+ !shadow_get_page(v->domain, pfn_to_page(l1e_get_pfn(new_hl2e)),
+ v->domain) )
new_hl2e = l1e_empty();
if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
{
- shadow_put_page(ed->domain, pfn_to_page(l1e_get_pfn(old_hl2e)));
+ shadow_put_page(v->domain, pfn_to_page(l1e_get_pfn(old_hl2e)));
need_flush = 1;
}
- ed->arch.hl2_vtable[l2_table_offset(va)] = new_hl2e;
+ v->arch.hl2_vtable[l2_table_offset(va)] = new_hl2e;
if ( need_flush )
{
@@ -713,14 +713,14 @@ shadow_unpin(unsigned long smfn)
/************************************************************************/
extern void shadow_mark_va_out_of_sync(
- struct exec_domain *ed, unsigned long gpfn, unsigned long mfn,
+ struct vcpu *v, unsigned long gpfn, unsigned long mfn,
unsigned long va);
static inline int l1pte_write_fault(
- struct exec_domain *ed, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
+ struct vcpu *v, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
unsigned long va)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
l1_pgentry_t gpte = *gpte_p;
l1_pgentry_t spte;
unsigned long gpfn = l1e_get_pfn(gpte);
@@ -746,7 +746,7 @@ static inline int l1pte_write_fault(
__mark_dirty(d, gmfn);
if ( mfn_is_page_table(gmfn) )
- shadow_mark_va_out_of_sync(ed, gpfn, gmfn, va);
+ shadow_mark_va_out_of_sync(v, gpfn, gmfn, va);
*gpte_p = gpte;
*spte_p = spte;
@@ -1542,11 +1542,11 @@ extern void shadow_map_l1_into_current_l2(unsigned long va);
void static inline
shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
{
- struct exec_domain *ed = current;
- struct domain *d = ed->domain;
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
l2_pgentry_t sl2e;
- __shadow_get_l2e(ed, va, &sl2e);
+ __shadow_get_l2e(v, va, &sl2e);
if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
{
/*
@@ -1560,7 +1560,7 @@ shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
}
else /* check to see if it exists; if so, link it in */
{
- l2_pgentry_t gpde = linear_l2_table(ed)[l2_table_offset(va)];
+ l2_pgentry_t gpde = linear_l2_table(v)[l2_table_offset(va)];
unsigned long gl1pfn = l2e_get_pfn(gpde);
unsigned long sl1mfn = __shadow_status(d, gl1pfn, PGT_l1_shadow);
@@ -1572,8 +1572,8 @@ shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
if ( !get_shadow_ref(sl1mfn) )
BUG();
l2pde_general(d, &gpde, &sl2e, sl1mfn);
- __guest_set_l2e(ed, va, gpde);
- __shadow_set_l2e(ed, va, sl2e);
+ __guest_set_l2e(v, va, gpde);
+ __shadow_set_l2e(v, va, sl2e);
}
else
{
@@ -1632,18 +1632,18 @@ static inline l1_pgentry_t gva_to_gpte(unsigned long gva)
{
l2_pgentry_t gpde;
l1_pgentry_t gpte;
- struct exec_domain *ed = current;
+ struct vcpu *v = current;
ASSERT( shadow_mode_translate(current->domain) );
- __guest_get_l2e(ed, gva, &gpde);
+ __guest_get_l2e(v, gva, &gpde);
if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) )
return l1e_empty();;
// This is actually overkill - we only need to make sure the hl2
// is in-sync.
//
- shadow_sync_va(ed, gva);
+ shadow_sync_va(v, gva);
if ( unlikely(__copy_from_user(&gpte,
&linear_pg_table[gva >> PAGE_SHIFT],
@@ -1669,22 +1669,22 @@ static inline unsigned long gva_to_gpa(unsigned long gva)
/************************************************************************/
-extern void __update_pagetables(struct exec_domain *ed);
-static inline void update_pagetables(struct exec_domain *ed)
+extern void __update_pagetables(struct vcpu *v);
+static inline void update_pagetables(struct vcpu *v)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
int paging_enabled;
#ifdef CONFIG_VMX
- if ( VMX_DOMAIN(ed) )
- paging_enabled = vmx_paging_enabled(ed);
+ if ( VMX_DOMAIN(v) )
+ paging_enabled = vmx_paging_enabled(v);
else
#endif
// HACK ALERT: there's currently no easy way to figure out if a domU
// has set its arch.guest_table to zero, vs not yet initialized it.
//
- paging_enabled = !!pagetable_get_paddr(ed->arch.guest_table);
+ paging_enabled = !!pagetable_get_paddr(v->arch.guest_table);
/*
* We don't call __update_pagetables() when vmx guest paging is
@@ -1695,33 +1695,33 @@ static inline void update_pagetables(struct exec_domain *ed)
if ( unlikely(shadow_mode_enabled(d)) && paging_enabled )
{
shadow_lock(d);
- __update_pagetables(ed);
+ __update_pagetables(v);
shadow_unlock(d);
}
if ( likely(!shadow_mode_external(d)) )
{
#ifdef __x86_64__
- if ( !(ed->arch.flags & TF_kernel_mode) )
- ed->arch.monitor_table = ed->arch.guest_table_user;
+ if ( !(v->arch.flags & TF_kernel_mode) )
+ v->arch.monitor_table = v->arch.guest_table_user;
else
#endif
if ( shadow_mode_enabled(d) )
- ed->arch.monitor_table = ed->arch.shadow_table;
+ v->arch.monitor_table = v->arch.shadow_table;
else
- ed->arch.monitor_table = ed->arch.guest_table;
+ v->arch.monitor_table = v->arch.guest_table;
}
}
#if SHADOW_DEBUG
-extern int _check_pagetable(struct exec_domain *ed, char *s);
-extern int _check_all_pagetables(struct exec_domain *ed, char *s);
+extern int _check_pagetable(struct vcpu *v, char *s);
+extern int _check_all_pagetables(struct vcpu *v, char *s);
-#define check_pagetable(_ed, _s) _check_pagetable(_ed, _s)
-//#define check_pagetable(_ed, _s) _check_all_pagetables(_ed, _s)
+#define check_pagetable(_v, _s) _check_pagetable(_v, _s)
+//#define check_pagetable(_v, _s) _check_all_pagetables(_v, _s)
#else
-#define check_pagetable(_ed, _s) ((void)0)
+#define check_pagetable(_v, _s) ((void)0)
#endif
#endif /* XEN_SHADOW_H */
diff --git a/xen/include/asm-x86/vmx.h b/xen/include/asm-x86/vmx.h
index a66ebb5f3f..cd1145564c 100644
--- a/xen/include/asm-x86/vmx.h
+++ b/xen/include/asm-x86/vmx.h
@@ -29,10 +29,10 @@
extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
extern void vmx_asm_do_resume(void);
extern void vmx_asm_do_launch(void);
-extern void vmx_intr_assist(struct exec_domain *d);
+extern void vmx_intr_assist(struct vcpu *d);
-extern void arch_vmx_do_launch(struct exec_domain *);
-extern void arch_vmx_do_resume(struct exec_domain *);
+extern void arch_vmx_do_launch(struct vcpu *);
+extern void arch_vmx_do_resume(struct vcpu *);
extern int vmcs_size;
extern unsigned int cpu_rev;
@@ -296,7 +296,7 @@ static inline void vmx_stts()
}
/* Works only for ed == current */
-static inline int vmx_paging_enabled(struct exec_domain *ed)
+static inline int vmx_paging_enabled(struct vcpu *v)
{
unsigned long cr0;
diff --git a/xen/include/asm-x86/vmx_platform.h b/xen/include/asm-x86/vmx_platform.h
index 07fdf408cf..2382ebbc7a 100644
--- a/xen/include/asm-x86/vmx_platform.h
+++ b/xen/include/asm-x86/vmx_platform.h
@@ -86,7 +86,7 @@ struct virutal_platform_def {
extern void handle_mmio(unsigned long, unsigned long);
extern void vmx_wait_io(void);
-extern int vmx_setup_platform(struct exec_domain *, struct cpu_user_regs *);
+extern int vmx_setup_platform(struct vcpu *, struct cpu_user_regs *);
// XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame.
#define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT)))
diff --git a/xen/include/asm-x86/vmx_virpit.h b/xen/include/asm-x86/vmx_virpit.h
index 35fe69c844..1ba907cf3c 100644
--- a/xen/include/asm-x86/vmx_virpit.h
+++ b/xen/include/asm-x86/vmx_virpit.h
@@ -37,6 +37,6 @@ struct vmx_virpit_t {
} ;
/* to hook the ioreq packet to get the PIT initializaiton info */
-extern void vmx_hooks_assist(struct exec_domain *d);
+extern void vmx_hooks_assist(struct vcpu *d);
#endif /* _VMX_VIRPIT_H_ */
diff --git a/xen/include/asm-x86/vmx_vmcs.h b/xen/include/asm-x86/vmx_vmcs.h
index 7ccfac0b6b..672d9fa47d 100644
--- a/xen/include/asm-x86/vmx_vmcs.h
+++ b/xen/include/asm-x86/vmx_vmcs.h
@@ -55,8 +55,8 @@ struct arch_vmx_struct {
#define ARCH_VMX_VMCS_RESUME 2 /* Needs VMCS resume */
#define ARCH_VMX_IO_WAIT 3 /* Waiting for I/O completion */
-void vmx_do_launch(struct exec_domain *);
-void vmx_do_resume(struct exec_domain *);
+void vmx_do_launch(struct vcpu *);
+void vmx_do_resume(struct vcpu *);
struct vmcs_struct *alloc_vmcs(void);
void free_vmcs(struct vmcs_struct *);
diff --git a/xen/include/public/event_channel.h b/xen/include/public/event_channel.h
index 8d1025ff29..a1973c6ea7 100644
--- a/xen/include/public/event_channel.h
+++ b/xen/include/public/event_channel.h
@@ -141,7 +141,7 @@ typedef struct {
} PACKED interdomain; /* EVTCHNSTAT_interdomain */
u32 pirq; /* EVTCHNSTAT_pirq */ /* 12 */
u32 virq; /* EVTCHNSTAT_virq */ /* 12 */
- u32 ipi_edom; /* EVTCHNSTAT_ipi */ /* 12 */
+ u32 ipi_vcpu; /* EVTCHNSTAT_ipi */ /* 12 */
} PACKED u;
} PACKED evtchn_status_t; /* 20 bytes */
@@ -151,7 +151,7 @@ typedef struct {
#define EVTCHNOP_bind_ipi 7
typedef struct {
/* IN parameters. */
- u32 ipi_edom; /* 0 */
+ u32 ipi_vcpu; /* 0 */
/* OUT parameters. */
u32 port; /* 4 */
} PACKED evtchn_bind_ipi_t; /* 8 bytes */
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index d7f135db2e..46c2e4bb5d 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -6,16 +6,16 @@
* Arch-specifics.
*/
-struct exec_domain *arch_alloc_exec_domain_struct(void);
+struct vcpu *arch_alloc_vcpu_struct(void);
-extern void arch_free_exec_domain_struct(struct exec_domain *ed);
+extern void arch_free_vcpu_struct(struct vcpu *v);
-extern void arch_do_createdomain(struct exec_domain *ed);
+extern void arch_do_createdomain(struct vcpu *v);
-extern void arch_do_boot_vcpu(struct exec_domain *ed);
+extern void arch_do_boot_vcpu(struct vcpu *v);
extern int arch_set_info_guest(
- struct exec_domain *d, struct vcpu_guest_context *c);
+ struct vcpu *d, struct vcpu_guest_context *c);
extern void free_perdomain_pt(struct domain *d);
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index 2ae5d94895..a7d911771d 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -21,19 +21,19 @@
* may require explicit memory barriers.
*/
-static inline void evtchn_set_pending(struct exec_domain *ed, int port)
+static inline void evtchn_set_pending(struct vcpu *v, int port)
{
- struct domain *d = ed->domain;
+ struct domain *d = v->domain;
shared_info_t *s = d->shared_info;
int running;
/* These three operations must happen in strict order. */
if ( !test_and_set_bit(port, &s->evtchn_pending[0]) &&
!test_bit (port, &s->evtchn_mask[0]) &&
- !test_and_set_bit(port>>5, &ed->vcpu_info->evtchn_pending_sel) )
+ !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) )
{
/* The VCPU pending flag must be set /after/ update to evtchn-pend. */
- set_bit(0, &ed->vcpu_info->evtchn_upcall_pending);
+ set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
/*
* NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
@@ -44,10 +44,10 @@ static inline void evtchn_set_pending(struct exec_domain *ed, int port)
* NB2. We save VCPUF_running across the unblock to avoid a needless
* IPI for domains that we IPI'd to unblock.
*/
- running = test_bit(_VCPUF_running, &ed->vcpu_flags);
- exec_domain_unblock(ed);
+ running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ vcpu_unblock(v);
if ( running )
- smp_send_event_check_cpu(ed->processor);
+ smp_send_event_check_cpu(v->processor);
}
}
@@ -56,12 +56,12 @@ static inline void evtchn_set_pending(struct exec_domain *ed, int port)
* @d: Domain to which virtual IRQ should be sent
* @virq: Virtual IRQ number (VIRQ_*)
*/
-static inline void send_guest_virq(struct exec_domain *ed, int virq)
+static inline void send_guest_virq(struct vcpu *v, int virq)
{
- int port = ed->virq_to_evtchn[virq];
+ int port = v->virq_to_evtchn[virq];
if ( likely(port != 0) )
- evtchn_set_pending(ed, port);
+ evtchn_set_pending(v, port);
}
/*
@@ -69,9 +69,9 @@ static inline void send_guest_virq(struct exec_domain *ed, int virq)
* @d: Domain to which physical IRQ should be sent
* @pirq: Physical IRQ number
*/
-static inline void send_guest_pirq(struct exec_domain *ed, int pirq)
+static inline void send_guest_pirq(struct vcpu *v, int pirq)
{
- evtchn_set_pending(ed, ed->domain->pirq_to_evtchn[pirq]);
+ evtchn_set_pending(v, v->domain->pirq_to_evtchn[pirq]);
}
#define event_pending(_d) \
diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h
index f419f0c182..0bc8558ae0 100644
--- a/xen/include/xen/irq.h
+++ b/xen/include/xen/irq.h
@@ -67,9 +67,9 @@ extern hw_irq_controller no_irq_type;
extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs);
struct domain;
-struct exec_domain;
+struct vcpu;
extern int pirq_guest_unmask(struct domain *p);
-extern int pirq_guest_bind(struct exec_domain *p, int irq, int will_share);
+extern int pirq_guest_bind(struct vcpu *p, int irq, int will_share);
extern int pirq_guest_unbind(struct domain *p, int irq);
extern int pirq_guest_bindable(int irq, int will_share);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 47fb6452f0..58c33e8b3c 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -13,8 +13,8 @@
struct schedule_data {
spinlock_t schedule_lock; /* spinlock protecting curr */
- struct exec_domain *curr; /* current task */
- struct exec_domain *idle; /* idle task for this cpu */
+ struct vcpu *curr; /* current task */
+ struct vcpu *idle; /* idle task for this cpu */
void *sched_priv;
struct ac_timer s_timer; /* scheduling timer */
unsigned long tick; /* current periodic 'tick' */
@@ -24,7 +24,7 @@ struct schedule_data {
} __cacheline_aligned;
struct task_slice {
- struct exec_domain *task;
+ struct vcpu *task;
s_time_t time;
};
@@ -33,12 +33,12 @@ struct scheduler {
char *opt_name; /* option name for this scheduler */
unsigned int sched_id; /* ID for this scheduler */
- int (*alloc_task) (struct exec_domain *);
- void (*add_task) (struct exec_domain *);
+ int (*alloc_task) (struct vcpu *);
+ void (*add_task) (struct vcpu *);
void (*free_task) (struct domain *);
- void (*rem_task) (struct exec_domain *);
- void (*sleep) (struct exec_domain *);
- void (*wake) (struct exec_domain *);
+ void (*rem_task) (struct vcpu *);
+ void (*sleep) (struct vcpu *);
+ void (*wake) (struct vcpu *);
struct task_slice (*do_schedule) (s_time_t);
int (*control) (struct sched_ctl_cmd *);
int (*adjdom) (struct domain *,
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index a1a24ddf6e..3376f9e30e 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -33,21 +33,21 @@ typedef struct event_channel_st
} __attribute__ ((packed)) unbound; /* state == ECS_UNBOUND */
struct {
u16 remote_port;
- struct exec_domain *remote_dom;
+ struct vcpu *remote_dom;
} __attribute__ ((packed)) interdomain; /* state == ECS_INTERDOMAIN */
u16 pirq; /* state == ECS_PIRQ */
u16 virq; /* state == ECS_VIRQ */
- u32 ipi_edom; /* state == ECS_IPI */
+ u32 ipi_vcpu; /* state == ECS_IPI */
} u;
} event_channel_t;
int init_event_channels(struct domain *d);
void destroy_event_channels(struct domain *d);
-int init_exec_domain_event_channels(struct exec_domain *ed);
+int init_vcpu_event_channels(struct vcpu *v);
#define CPUMAP_RUNANYWHERE 0xFFFFFFFF
-struct exec_domain
+struct vcpu
{
int vcpu_id;
@@ -56,7 +56,7 @@ struct exec_domain
vcpu_info_t *vcpu_info;
struct domain *domain;
- struct exec_domain *next_in_list;
+ struct vcpu *next_in_list;
struct ac_timer timer; /* one-shot timer for timeout values */
unsigned long sleep_tick; /* tick at which this vcpu started sleep */
@@ -75,7 +75,7 @@ struct exec_domain
cpumap_t cpumap; /* which cpus this domain can run on */
- struct arch_exec_domain arch;
+ struct arch_vcpu arch;
};
/* Per-domain lock can be recursively acquired in fault handlers. */
@@ -127,7 +127,7 @@ struct domain
atomic_t refcnt;
- struct exec_domain *exec_domain[MAX_VIRT_CPUS];
+ struct vcpu *vcpu[MAX_VIRT_CPUS];
/* Bitmask of CPUs on which this domain is running. */
unsigned long cpuset;
@@ -155,13 +155,13 @@ struct domain_setup_info
};
extern struct domain idle0_domain;
-extern struct exec_domain idle0_exec_domain;
+extern struct vcpu idle0_vcpu;
-extern struct exec_domain *idle_task[NR_CPUS];
+extern struct vcpu *idle_task[NR_CPUS];
#define IDLE_DOMAIN_ID (0x7FFFU)
#define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
-struct exec_domain *alloc_exec_domain_struct(struct domain *d,
+struct vcpu *alloc_vcpu_struct(struct domain *d,
unsigned long vcpu);
void free_domain_struct(struct domain *d);
@@ -226,7 +226,7 @@ extern void domain_crash(void);
*/
extern void domain_crash_synchronous(void) __attribute__((noreturn));
-void new_thread(struct exec_domain *d,
+void new_thread(struct vcpu *d,
unsigned long start_pc,
unsigned long start_stack,
unsigned long start_info);
@@ -234,14 +234,14 @@ void new_thread(struct exec_domain *d,
#define set_current_state(_s) do { current->state = (_s); } while (0)
void scheduler_init(void);
void schedulers_start(void);
-void sched_add_domain(struct exec_domain *);
-void sched_rem_domain(struct exec_domain *);
+void sched_add_domain(struct vcpu *);
+void sched_rem_domain(struct vcpu *);
long sched_ctl(struct sched_ctl_cmd *);
long sched_adjdom(struct sched_adjdom_cmd *);
int sched_id();
-void domain_wake(struct exec_domain *d);
-void domain_sleep_nosync(struct exec_domain *d);
-void domain_sleep_sync(struct exec_domain *d);
+void domain_wake(struct vcpu *d);
+void domain_sleep_nosync(struct vcpu *d);
+void domain_sleep_sync(struct vcpu *d);
/*
* Force loading of currently-executing domain state on the specified set
@@ -251,14 +251,14 @@ extern void sync_lazy_execstate_cpuset(unsigned long cpuset);
extern void sync_lazy_execstate_all(void);
extern int __sync_lazy_execstate(void);
-/* Called by the scheduler to switch to another exec_domain. */
+/* Called by the scheduler to switch to another vcpu. */
extern void context_switch(
- struct exec_domain *prev,
- struct exec_domain *next);
+ struct vcpu *prev,
+ struct vcpu *next);
-/* Called by the scheduler to continue running the current exec_domain. */
+/* Called by the scheduler to continue running the current vcpu. */
extern void continue_running(
- struct exec_domain *same);
+ struct vcpu *same);
int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
@@ -305,8 +305,8 @@ extern struct domain *domain_list;
#define for_each_domain(_d) \
for ( (_d) = domain_list; (_d) != NULL; (_d) = (_d)->next_in_list )
-#define for_each_exec_domain(_d,_ed) \
- for ( (_ed) = (_d)->exec_domain[0]; \
+#define for_each_vcpu(_d,_ed) \
+ for ( (_ed) = (_d)->vcpu[0]; \
(_ed) != NULL; \
(_ed) = (_ed)->next_in_list )
@@ -366,24 +366,24 @@ extern struct domain *domain_list;
#define _DOMF_dying 6
#define DOMF_dying (1UL<<_DOMF_dying)
-static inline int domain_runnable(struct exec_domain *ed)
+static inline int domain_runnable(struct vcpu *v)
{
- return ( (atomic_read(&ed->pausecnt) == 0) &&
- !(ed->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
- !(ed->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
+ return ( (atomic_read(&v->pausecnt) == 0) &&
+ !(v->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
+ !(v->domain->domain_flags & (DOMF_shutdown|DOMF_shuttingdown)) );
}
-void exec_domain_pause(struct exec_domain *ed);
+void vcpu_pause(struct vcpu *v);
void domain_pause(struct domain *d);
-void exec_domain_unpause(struct exec_domain *ed);
+void vcpu_unpause(struct vcpu *v);
void domain_unpause(struct domain *d);
void domain_pause_by_systemcontroller(struct domain *d);
void domain_unpause_by_systemcontroller(struct domain *d);
-static inline void exec_domain_unblock(struct exec_domain *ed)
+static inline void vcpu_unblock(struct vcpu *v)
{
- if ( test_and_clear_bit(_VCPUF_blocked, &ed->vcpu_flags) )
- domain_wake(ed);
+ if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) )
+ domain_wake(v);
}
#define IS_PRIV(_d) \
diff --git a/xen/include/xen/time.h b/xen/include/xen/time.h
index 104b611212..d0091b6a10 100644
--- a/xen/include/xen/time.h
+++ b/xen/include/xen/time.h
@@ -54,7 +54,7 @@ s_time_t get_s_time(void);
#define MILLISECS(_ms) (((s_time_t)(_ms)) * 1000000ULL )
#define MICROSECS(_us) (((s_time_t)(_us)) * 1000ULL )
-extern void update_dom_time(struct exec_domain *ed);
+extern void update_dom_time(struct vcpu *v);
extern void do_settime(
unsigned long secs, unsigned long usecs, u64 system_time_base);
diff --git a/xen/include/xen/types.h b/xen/include/xen/types.h
index 6258a745cf..ab045d9387 100644
--- a/xen/include/xen/types.h
+++ b/xen/include/xen/types.h
@@ -51,6 +51,6 @@ typedef __u64 uint64_t;
struct domain;
-struct exec_domain;
+struct vcpu;
#endif /* __TYPES_H__ */