aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-11-01 16:08:19 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-11-01 16:08:19 +0000
commit3f4c0e0087ee9b695653604450d5de3af01ce99f (patch)
tree7aac98b819f7dbd4b632e59347f86b5d73e059d6
parent54ab7bfb1a72ed3c74e3867e265867e3f9b9c7d8 (diff)
downloadxen-3f4c0e0087ee9b695653604450d5de3af01ce99f.tar.gz
xen-3f4c0e0087ee9b695653604450d5de3af01ce99f.tar.bz2
xen-3f4c0e0087ee9b695653604450d5de3af01ce99f.zip
[HVM] HVM is now a flag parameter to domain-creation hypercall.
This cleans up HVM start-of-day in Xen and means that the HVM status of a domain is maintained from cradle to grave. Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--tools/libxc/xc_domain.c2
-rw-r--r--tools/libxc/xc_hvm_build.c15
-rw-r--r--tools/libxc/xenctrl.h1
-rw-r--r--tools/python/xen/lowlevel/xc/xc.c17
-rw-r--r--tools/python/xen/xend/XendDomainInfo.py12
-rw-r--r--xen/arch/ia64/xen/xensetup.c6
-rw-r--r--xen/arch/powerpc/mm.c6
-rw-r--r--xen/arch/powerpc/papr/xlate.c2
-rw-r--r--xen/arch/powerpc/setup.c9
-rw-r--r--xen/arch/x86/domain.c31
-rw-r--r--xen/arch/x86/domctl.c12
-rw-r--r--xen/arch/x86/hvm/hvm.c8
-rw-r--r--xen/arch/x86/hvm/svm/svm.c1
-rw-r--r--xen/arch/x86/hvm/svm/vmcb.c18
-rw-r--r--xen/arch/x86/hvm/vmx/vmcs.c20
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c2
-rw-r--r--xen/arch/x86/mm.c2
-rw-r--r--xen/arch/x86/mm/shadow/common.c4
-rw-r--r--xen/arch/x86/mm/shadow/multi.c34
-rw-r--r--xen/arch/x86/mm/shadow/types.h10
-rw-r--r--xen/arch/x86/oprofile/xenoprof.c2
-rw-r--r--xen/arch/x86/setup.c9
-rw-r--r--xen/arch/x86/traps.c2
-rw-r--r--xen/arch/x86/x86_32/domain_page.c2
-rw-r--r--xen/arch/x86/x86_32/traps.c4
-rw-r--r--xen/arch/x86/x86_64/traps.c6
-rw-r--r--xen/common/domain.c7
-rw-r--r--xen/common/domctl.c14
-rw-r--r--xen/include/asm-x86/hvm/support.h3
-rw-r--r--xen/include/asm-x86/processor.h2
-rw-r--r--xen/include/asm-x86/regs.h2
-rw-r--r--xen/include/asm-x86/shadow.h4
-rw-r--r--xen/include/public/arch-x86_64.h6
-rw-r--r--xen/include/public/domctl.h5
-rw-r--r--xen/include/xen/sched.h54
35 files changed, 189 insertions, 145 deletions
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index d559bdb742..b708211185 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -12,6 +12,7 @@
int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
+ uint32_t flags,
uint32_t *pdomid)
{
int err;
@@ -20,6 +21,7 @@ int xc_domain_create(int xc_handle,
domctl.cmd = XEN_DOMCTL_createdomain;
domctl.domain = (domid_t)*pdomid;
domctl.u.createdomain.ssidref = ssidref;
+ domctl.u.createdomain.flags = flags;
memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
return err;
diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
index 09f3d99a86..f2bd062952 100644
--- a/tools/libxc/xc_hvm_build.c
+++ b/tools/libxc/xc_hvm_build.c
@@ -261,6 +261,19 @@ static int setup_guest(int xc_handle,
goto error_out;
}
+ /* HVM domains must be put into shadow mode at the start of day. */
+ /* XXX *After* xc_get_pfn_list()!! */
+ if ( xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_ENABLE,
+ NULL, 0, NULL,
+ XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT |
+ XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
+ XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL,
+ NULL) )
+ {
+ PERROR("Could not enable shadow paging for domain.\n");
+ goto error_out;
+ }
+
loadelfimage(image, xc_handle, dom, page_array, &dsi);
if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
@@ -417,6 +430,7 @@ static int xc_hvm_build_internal(int xc_handle,
goto error_out;
}
+#if 0
/* HVM domains must be put into shadow mode at the start of day */
if ( xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_ENABLE,
NULL, 0, NULL,
@@ -428,6 +442,7 @@ static int xc_hvm_build_internal(int xc_handle,
PERROR("Could not enable shadow paging for domain.\n");
goto error_out;
}
+#endif
memset(ctxt, 0, sizeof(*ctxt));
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index d4249936c9..ef5a2abfd1 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -177,6 +177,7 @@ typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
int xc_domain_create(int xc_handle,
uint32_t ssidref,
xen_domain_handle_t handle,
+ uint32_t flags,
uint32_t *pdomid);
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index 5de0f06657..3fe3a474f2 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -65,18 +65,17 @@ static PyObject *pyxc_domain_create(XcObject *self,
PyObject *args,
PyObject *kwds)
{
- uint32_t dom = 0;
- int ret, i;
- uint32_t ssidref = 0;
+ uint32_t dom = 0, ssidref = 0, flags = 0;
+ int ret, i, hvm = 0;
PyObject *pyhandle = NULL;
xen_domain_handle_t handle = {
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
- static char *kwd_list[] = { "domid", "ssidref", "handle", NULL };
+ static char *kwd_list[] = { "domid", "ssidref", "handle", "hvm", NULL };
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiO", kwd_list,
- &dom, &ssidref, &pyhandle))
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOi", kwd_list,
+ &dom, &ssidref, &pyhandle, &hvm))
return NULL;
if ( pyhandle != NULL )
@@ -94,7 +93,11 @@ static PyObject *pyxc_domain_create(XcObject *self,
}
}
- if ( (ret = xc_domain_create(self->xc_handle, ssidref, handle, &dom)) < 0 )
+ if ( hvm )
+ flags |= XEN_DOMCTL_CDF_hvm_guest;
+
+ if ( (ret = xc_domain_create(self->xc_handle, ssidref,
+ handle, flags, &dom)) < 0 )
return PyErr_SetFromErrno(xc_error);
return PyInt_FromLong(dom);
diff --git a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py
index 73c8bb9a4d..ae163a3a60 100644
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -1198,10 +1198,20 @@ class XendDomainInfo:
log.debug('XendDomainInfo.constructDomain')
+ hvm = (self._infoIsSet('image') and
+ sxp.name(self.info['image']) == "hvm")
+ if hvm:
+ info = xc.xeninfo()
+ if not 'hvm' in info['xen_caps']:
+ raise VmError("HVM guest support is unavailable: is VT/AMD-V "
+ "supported by your CPU and enabled in your "
+ "BIOS?")
+
self.domid = xc.domain_create(
domid = 0,
ssidref = security.get_security_info(self.info, 'ssidref'),
- handle = uuid.fromString(self.info['uuid']))
+ handle = uuid.fromString(self.info['uuid']),
+ hvm = int(hvm))
if self.domid < 0:
raise VmError('Creating domain failed: name=%s' %
diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c
index 94e65cfd97..76f28e0d15 100644
--- a/xen/arch/ia64/xen/xensetup.c
+++ b/xen/arch/ia64/xen/xensetup.c
@@ -422,7 +422,7 @@ void start_kernel(void)
scheduler_init();
idle_vcpu[0] = (struct vcpu*) ia64_r13;
- idle_domain = domain_create(IDLE_DOMAIN_ID);
+ idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
BUG();
@@ -502,11 +502,11 @@ printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
expose_p2m_init();
/* Create initial domain 0. */
- dom0 = domain_create(0);
+ dom0 = domain_create(0, 0);
if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
panic("Error creating domain 0\n");
- set_bit(_DOMF_privileged, &dom0->domain_flags);
+ dom0->is_privileged = 1;
/*
* We're going to setup domain0 using the module(s) that we stashed safely
diff --git a/xen/arch/powerpc/mm.c b/xen/arch/powerpc/mm.c
index f00b6f8ea9..1df18669d3 100644
--- a/xen/arch/powerpc/mm.c
+++ b/xen/arch/powerpc/mm.c
@@ -316,8 +316,7 @@ ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
int t = PFN_TYPE_NONE;
/* quick tests first */
- if (test_bit(_DOMF_privileged, &d->domain_flags) &&
- cpu_io_mfn(pfn)) {
+ if (d->is_privileged && cpu_io_mfn(pfn)) {
t = PFN_TYPE_IO;
mfn = pfn;
} else {
@@ -341,8 +340,7 @@ ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
if (t == PFN_TYPE_NONE) {
/* This hack allows dom0 to map all memory, necessary to
* initialize domU state. */
- if (test_bit(_DOMF_privileged, &d->domain_flags) &&
- mfn_valid(pfn)) {
+ if (d->is_privileged && mfn_valid(pfn)) {
struct page_info *pg;
/* page better be allocated to some domain but not the caller */
diff --git a/xen/arch/powerpc/papr/xlate.c b/xen/arch/powerpc/papr/xlate.c
index 01a0e89664..7ea5a39d5f 100644
--- a/xen/arch/powerpc/papr/xlate.c
+++ b/xen/arch/powerpc/papr/xlate.c
@@ -174,7 +174,7 @@ static void h_enter(struct cpu_user_regs *regs)
if (mtype == PFN_TYPE_IO) {
/* only a privilaged dom can access outside IO space */
- if ( !test_bit(_DOMF_privileged, &d->domain_flags) ) {
+ if ( !d->is_privileged ) {
regs->gprs[3] = H_Privilege;
printk("%s: unprivileged access to physical page: 0x%lx\n",
__func__, pfn);
diff --git a/xen/arch/powerpc/setup.c b/xen/arch/powerpc/setup.c
index 83c0aca028..5c839f698e 100644
--- a/xen/arch/powerpc/setup.c
+++ b/xen/arch/powerpc/setup.c
@@ -157,7 +157,7 @@ static void __init start_of_day(void)
scheduler_init();
/* create idle domain */
- idle_domain = domain_create(IDLE_DOMAIN_ID);
+ idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
if ((idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL))
BUG();
set_current(idle_domain->vcpu[0]);
@@ -342,7 +342,7 @@ static void __init __start_xen(multiboot_info_t *mbi)
start_of_day();
/* Create initial domain 0. */
- dom0 = domain_create(0);
+ dom0 = domain_create(0, 0);
if (dom0 == NULL)
panic("Error creating domain 0\n");
dom0->max_pages = ~0U;
@@ -355,8 +355,9 @@ static void __init __start_xen(multiboot_info_t *mbi)
* need to make sure Dom0's vVCPU 0 is pinned to the CPU */
dom0->vcpu[0]->cpu_affinity = cpumask_of_cpu(0);
- set_bit(_DOMF_privileged, &dom0->domain_flags);
- /* post-create hooks sets security label */
+ dom0->is_privileged = 1;
+
+ /* Post-create hook sets security label. */
acm_post_domain0_create(dom0->domain_id);
cmdline = (char *)(mod[0].string ? __va((ulong)mod[0].string) : NULL);
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index c1b60a3c00..d8a8f111bc 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -157,6 +157,13 @@ int arch_domain_create(struct domain *d)
int vcpuid, pdpt_order;
int i;
+ if ( is_hvm_domain(d) && !hvm_enabled )
+ {
+ gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
+ "on a non-VT/AMDV platform.\n");
+ return -EINVAL;
+ }
+
pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
if ( d->arch.mm_perdomain_pt == NULL )
@@ -258,7 +265,11 @@ int arch_set_info_guest(
unsigned long cr3_pfn = INVALID_MFN;
int i, rc;
- if ( !(c->flags & VGCF_HVM_GUEST) )
+ if ( !!(c->flags & VGCF_hvm_guest) != is_hvm_vcpu(v) )
+ return -EINVAL;
+ c->flags &= ~VGCF_hvm_guest;
+
+ if ( !is_hvm_vcpu(v) )
{
fixup_guest_stack_selector(c->user_regs.ss);
fixup_guest_stack_selector(c->kernel_ss);
@@ -272,15 +283,13 @@ int arch_set_info_guest(
for ( i = 0; i < 256; i++ )
fixup_guest_code_selector(c->trap_ctxt[i].cs);
}
- else if ( !hvm_enabled )
- return -EINVAL;
clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
- if ( c->flags & VGCF_I387_VALID )
+ if ( c->flags & VGCF_i387_valid )
set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
v->arch.flags &= ~TF_kernel_mode;
- if ( (c->flags & VGCF_IN_KERNEL) || (c->flags & VGCF_HVM_GUEST) )
+ if ( (c->flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ )
v->arch.flags |= TF_kernel_mode;
memcpy(&v->arch.guest_context, c, sizeof(*c));
@@ -291,7 +300,7 @@ int arch_set_info_guest(
init_int80_direct_trap(v);
- if ( !(c->flags & VGCF_HVM_GUEST) )
+ if ( !is_hvm_vcpu(v) )
{
/* IOPL privileges are virtualised. */
v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3;
@@ -316,7 +325,7 @@ int arch_set_info_guest(
if ( v->vcpu_id == 0 )
d->vm_assist = c->vm_assist;
- if ( !(c->flags & VGCF_HVM_GUEST) )
+ if ( !is_hvm_vcpu(v) )
{
cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c->ctrlreg[3]));
v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
@@ -325,7 +334,7 @@ int arch_set_info_guest(
if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
return rc;
- if ( c->flags & VGCF_HVM_GUEST )
+ if ( is_hvm_vcpu(v) )
{
v->arch.guest_table = pagetable_null();
@@ -745,7 +754,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
/* Re-enable interrupts before restoring state which may fault. */
local_irq_enable();
- if ( !hvm_guest(next) )
+ if ( !is_hvm_vcpu(next) )
{
load_LDT(next);
load_segments(next);
@@ -835,7 +844,7 @@ unsigned long hypercall_create_continuation(
#if defined(__i386__)
regs->eax = op;
- if ( supervisor_mode_kernel || hvm_guest(current) )
+ if ( supervisor_mode_kernel || is_hvm_vcpu(current) )
regs->eip &= ~31; /* re-execute entire hypercall entry stub */
else
regs->eip -= 2; /* re-execute 'int 0x82' */
@@ -972,7 +981,7 @@ void domain_relinquish_resources(struct domain *d)
#endif
}
- if ( d->vcpu[0] && hvm_guest(d->vcpu[0]) )
+ if ( is_hvm_domain(d) )
hvm_relinquish_guest_resources(d);
/* Tear down shadow mode stuff. */
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index ede75b31ea..f900650729 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -224,7 +224,7 @@ long arch_do_domctl(
spin_lock(&d->page_alloc_lock);
- if ( hvm_guest(d->vcpu[0]) && shadow_mode_translate(d) )
+ if ( is_hvm_domain(d) && shadow_mode_translate(d) )
{
/* HVM domain: scan P2M to get guaranteed physmap order. */
for ( i = 0, gmfn = 0;
@@ -321,7 +321,7 @@ void arch_getdomaininfo_ctxt(
{
memcpy(c, &v->arch.guest_context, sizeof(*c));
- if ( hvm_guest(v) )
+ if ( is_hvm_vcpu(v) )
{
hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg);
}
@@ -334,11 +334,11 @@ void arch_getdomaininfo_ctxt(
c->flags = 0;
if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
- c->flags |= VGCF_I387_VALID;
+ c->flags |= VGCF_i387_valid;
if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
- c->flags |= VGCF_IN_KERNEL;
- if ( hvm_guest(v) )
- c->flags |= VGCF_HVM_GUEST;
+ c->flags |= VGCF_in_kernel;
+ if ( is_hvm_vcpu(v) )
+ c->flags |= VGCF_hvm_guest;
c->ctrlreg[3] = xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table));
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index dc82e253e8..fcff7ce94a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -260,12 +260,12 @@ void hvm_release_assist_channel(struct vcpu *v)
}
-void hvm_setup_platform(struct domain* d)
+void hvm_setup_platform(struct domain *d)
{
struct hvm_domain *platform;
- struct vcpu *v=current;
+ struct vcpu *v = current;
- if ( !hvm_guest(v) || (v->vcpu_id != 0) )
+ if ( !is_hvm_domain(d) || (v->vcpu_id != 0) )
return;
hvm_zap_iommu_pages(d);
@@ -635,7 +635,7 @@ int hvm_bringup_ap(int vcpuid, int trampoline_vector)
struct vcpu_guest_context *ctxt;
int rc = 0;
- BUG_ON(!hvm_guest(bsp));
+ BUG_ON(!is_hvm_domain(d));
if ( bsp->vcpu_id != 0 )
{
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 634900f195..1ed87e3812 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -562,7 +562,6 @@ static void svm_init_ap_context(struct vcpu_guest_context *ctxt,
*/
ctxt->user_regs.eip = 0x0;
ctxt->user_regs.cs = (trampoline_vector << 8);
- ctxt->flags = VGCF_HVM_GUEST;
}
static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 209eca0407..356130bd11 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -482,20 +482,14 @@ static void vmcb_dump(unsigned char ch)
struct vcpu *v;
printk("*********** VMCB Areas **************\n");
- for_each_domain(d) {
+ for_each_domain ( d )
+ {
+ if ( !is_hvm_domain(d) )
+ continue;
printk("\n>>> Domain %d <<<\n", d->domain_id);
- for_each_vcpu(d, v) {
-
- /*
- * Presumably, if a domain is not an HVM guest,
- * the very first CPU will not pass this test
- */
- if (!hvm_guest(v)) {
- printk("\t\tNot HVM guest\n");
- break;
- }
+ for_each_vcpu ( d, v )
+ {
printk("\tVCPU %d\n", v->vcpu_id);
-
svm_dump_vmcb("key_handler", v->arch.hvm_svm.vmcb);
}
}
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 9080b21cd6..1b70b23ab1 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -218,7 +218,7 @@ void vmx_vmcs_exit(struct vcpu *v)
/* Don't confuse arch_vmx_do_resume (for @v or @current!) */
vmx_clear_vmcs(v);
- if ( hvm_guest(current) )
+ if ( is_hvm_vcpu(current) )
vmx_load_vmcs(current);
spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
@@ -709,20 +709,14 @@ static void vmcs_dump(unsigned char ch)
struct vcpu *v;
printk("*********** VMCS Areas **************\n");
- for_each_domain(d) {
+ for_each_domain ( d )
+ {
+ if ( !is_hvm_domain(d) )
+ continue;
printk("\n>>> Domain %d <<<\n", d->domain_id);
- for_each_vcpu(d, v) {
-
- /*
- * Presumably, if a domain is not an HVM guest,
- * the very first CPU will not pass this test
- */
- if (!hvm_guest(v)) {
- printk("\t\tNot HVM guest\n");
- break;
- }
+ for_each_vcpu ( d, v )
+ {
printk("\tVCPU %d\n", v->vcpu_id);
-
vmx_vmcs_enter(v);
vmcs_dump_vcpu();
vmx_vmcs_exit(v);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 7994441b3c..7676547829 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -672,8 +672,6 @@ static void vmx_init_ap_context(struct vcpu_guest_context *ctxt,
ctxt->user_regs.edx = vcpuid;
ctxt->user_regs.ebx = trampoline_vector;
- ctxt->flags = VGCF_HVM_GUEST;
-
/* Virtual IDT is empty at start-of-day. */
for ( i = 0; i < 256; i++ )
{
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e109e13397..58009872ca 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1715,7 +1715,7 @@ int new_guest_cr3(unsigned long mfn)
int okay;
unsigned long old_base_mfn;
- if ( hvm_guest(v) && !hvm_paging_enabled(v) )
+ if ( is_hvm_domain(d) && !hvm_paging_enabled(v) )
domain_crash_synchronous();
if ( shadow_mode_refcounts(d) )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index de9e9dffa7..2e48164ca7 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2286,7 +2286,7 @@ void sh_update_paging_modes(struct vcpu *v)
//
shadow_detach_old_tables(v);
- if ( !hvm_guest(v) )
+ if ( !is_hvm_domain(d) )
{
///
/// PV guest
@@ -2394,7 +2394,7 @@ void sh_update_paging_modes(struct vcpu *v)
SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
"(was g=%u s=%u)\n",
d->domain_id, v->vcpu_id,
- hvm_guest(v) ? !!hvm_paging_enabled(v) : 1,
+ is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1,
v->arch.shadow.mode->guest_levels,
v->arch.shadow.mode->shadow_levels,
old_mode ? old_mode->guest_levels : 0,
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index cea72d0ef4..a5f254af98 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -202,14 +202,14 @@ guest_supports_superpages(struct vcpu *v)
{
/* The _PAGE_PSE bit must be honoured in HVM guests, whenever
* CR4.PSE is set or the guest is in PAE or long mode */
- return (hvm_guest(v) && (GUEST_PAGING_LEVELS != 2
+ return (is_hvm_vcpu(v) && (GUEST_PAGING_LEVELS != 2
|| (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE)));
}
static inline int
guest_supports_nx(struct vcpu *v)
{
- if ( !hvm_guest(v) )
+ if ( !is_hvm_vcpu(v) )
return cpu_has_nx;
// XXX - fix this!
@@ -769,7 +769,7 @@ _sh_propagate(struct vcpu *v,
// PV guests in 64-bit mode use two different page tables for user vs
// supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
// It is always shadowed as present...
- if ( (GUEST_PAGING_LEVELS == 4) && !hvm_guest(v) )
+ if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_domain(d) )
{
sflags |= _PAGE_USER;
}
@@ -2293,7 +2293,7 @@ static int validate_gl1e(struct vcpu *v, void *new_ge, mfn_t sl1mfn, void *se)
gfn = guest_l1e_get_gfn(*new_gl1e);
gmfn = vcpu_gfn_to_mfn(v, gfn);
- mmio = (hvm_guest(v) && shadow_vcpu_mode_translate(v) && !valid_mfn(gmfn));
+ mmio = (is_hvm_vcpu(v) && shadow_vcpu_mode_translate(v) && !valid_mfn(gmfn));
l1e_propagate_from_guest(v, new_gl1e, _mfn(INVALID_MFN), gmfn, &new_sl1e,
ft_prefetch, mmio);
@@ -2523,7 +2523,7 @@ static void sh_prefetch(struct vcpu *v, walk_t *gw,
/* Look at the gfn that the l1e is pointing at */
gfn = guest_l1e_get_gfn(gl1e);
gmfn = vcpu_gfn_to_mfn(v, gfn);
- mmio = ( hvm_guest(v)
+ mmio = ( is_hvm_vcpu(v)
&& shadow_vcpu_mode_translate(v)
&& mmio_space(gfn_to_paddr(gfn)) );
@@ -2585,7 +2585,8 @@ static int sh_page_fault(struct vcpu *v,
{
if ( sh_l1e_is_gnp(sl1e) )
{
- if ( likely(!hvm_guest(v) || shadow_vcpu_mode_translate(v)) )
+ if ( likely(!is_hvm_domain(d) ||
+ shadow_vcpu_mode_translate(v)) )
{
/* Not-present in a guest PT: pass to the guest as
* a not-present fault (by flipping two bits). */
@@ -2647,7 +2648,7 @@ static int sh_page_fault(struct vcpu *v,
//
if ( unlikely(!(guest_l1e_get_flags(gw.eff_l1e) & _PAGE_PRESENT)) )
{
- if ( hvm_guest(v) && !shadow_vcpu_mode_translate(v) )
+ if ( is_hvm_domain(d) && !shadow_vcpu_mode_translate(v) )
{
/* Not present in p2m map, means this is mmio */
gpa = va;
@@ -2704,9 +2705,9 @@ static int sh_page_fault(struct vcpu *v,
/* What mfn is the guest trying to access? */
gfn = guest_l1e_get_gfn(gw.eff_l1e);
gmfn = vcpu_gfn_to_mfn(v, gfn);
- mmio = ( hvm_guest(v)
- && shadow_vcpu_mode_translate(v)
- && mmio_space(gfn_to_paddr(gfn)) );
+ mmio = (is_hvm_domain(d)
+ && shadow_vcpu_mode_translate(v)
+ && mmio_space(gfn_to_paddr(gfn)));
if ( !mmio && !valid_mfn(gmfn) )
{
@@ -2775,14 +2776,15 @@ static int sh_page_fault(struct vcpu *v,
emulate:
/* Take the register set we were called with */
emul_regs = *regs;
- if ( hvm_guest(v) )
+ if ( is_hvm_domain(d) )
{
/* Add the guest's segment selectors, rip, rsp. rflags */
hvm_store_cpu_guest_regs(v, &emul_regs, NULL);
}
emul_ctxt.regs = &emul_regs;
emul_ctxt.cr2 = va;
- emul_ctxt.mode = hvm_guest(v) ? hvm_guest_x86_mode(v) : X86EMUL_MODE_HOST;
+ emul_ctxt.mode = (is_hvm_domain(d) ?
+ hvm_guest_x86_mode(v) : X86EMUL_MODE_HOST);
SHADOW_PRINTK("emulate: eip=%#lx\n", emul_regs.eip);
@@ -2813,7 +2815,7 @@ static int sh_page_fault(struct vcpu *v,
goto not_a_shadow_fault;
/* Emulator has changed the user registers: write back */
- if ( hvm_guest(v) )
+ if ( is_hvm_domain(d) )
{
/* Write back the guest's segment selectors, rip, rsp. rflags */
hvm_load_cpu_guest_regs(v, &emul_regs);
@@ -3317,7 +3319,7 @@ sh_update_cr3(struct vcpu *v)
#ifndef NDEBUG
/* Double-check that the HVM code has sent us a sane guest_table */
- if ( hvm_guest(v) )
+ if ( is_hvm_domain(d) )
{
gfn_t gfn;
@@ -3492,7 +3494,7 @@ sh_update_cr3(struct vcpu *v)
///
if ( shadow_mode_external(d) )
{
- ASSERT(hvm_guest(v));
+ ASSERT(is_hvm_domain(d));
#if SHADOW_PAGING_LEVELS == 3
/* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
v->arch.hvm_vcpu.hw_cr3 = virt_to_maddr(&v->arch.shadow.l3table);
@@ -3890,7 +3892,7 @@ static char * sh_audit_flags(struct vcpu *v, int level,
{
if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_PRESENT) )
return "shadow is present but guest is not present";
- if ( (sflags & _PAGE_GLOBAL) && !hvm_guest(v) )
+ if ( (sflags & _PAGE_GLOBAL) && !is_hvm_vcpu(v) )
return "global bit set in PV shadow";
if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE)))
&& ((sflags & _PAGE_DIRTY) && !(gflags & _PAGE_DIRTY)) )
diff --git a/xen/arch/x86/mm/shadow/types.h b/xen/arch/x86/mm/shadow/types.h
index 1bb12bd214..2dd2908d69 100644
--- a/xen/arch/x86/mm/shadow/types.h
+++ b/xen/arch/x86/mm/shadow/types.h
@@ -205,13 +205,13 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
__sh_linear_l1_table; \
})
-// XXX -- these should not be conditional on hvm_guest(v), but rather on
+// XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on
// shadow_mode_external(d)...
//
#define sh_linear_l2_table(v) ({ \
ASSERT(current == (v)); \
((shadow_l2e_t *) \
- (hvm_guest(v) ? __linear_l1_table : __sh_linear_l1_table) + \
+ (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \
shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \
})
@@ -219,7 +219,7 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
#define sh_linear_l3_table(v) ({ \
ASSERT(current == (v)); \
((shadow_l3e_t *) \
- (hvm_guest(v) ? __linear_l2_table : __sh_linear_l2_table) + \
+ (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \
shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \
})
@@ -228,7 +228,7 @@ static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
#define sh_linear_l4_table(v) ({ \
ASSERT(current == (v)); \
((l4_pgentry_t *) \
- (hvm_guest(v) ? __linear_l3_table : __sh_linear_l3_table) + \
+ (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \
shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
})
#endif
@@ -585,7 +585,7 @@ accumulate_guest_flags(struct vcpu *v, walk_t *gw)
// In 64-bit PV guests, the _PAGE_USER bit is implied in all guest
// entries (since even the guest kernel runs in ring 3).
//
- if ( (GUEST_PAGING_LEVELS == 4) && !hvm_guest(v) )
+ if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) )
accumulated_flags |= _PAGE_USER;
return accumulated_flags;
diff --git a/xen/arch/x86/oprofile/xenoprof.c b/xen/arch/x86/oprofile/xenoprof.c
index 27c2a90297..288e7b2f74 100644
--- a/xen/arch/x86/oprofile/xenoprof.c
+++ b/xen/arch/x86/oprofile/xenoprof.c
@@ -701,7 +701,7 @@ int xenoprofile_get_mode(struct vcpu *v, struct cpu_user_regs * const regs)
if ( !guest_mode(regs) )
return 2;
- if ( hvm_guest(v) )
+ if ( is_hvm_vcpu(v) )
return ((regs->cs & 3) != 3);
return guest_kernel_mode(v, regs);
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 15c42b133c..1e47e7c3e0 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -249,7 +249,7 @@ static void __init init_idle_domain(void)
/* Domain creation requires that scheduler structures are initialised. */
scheduler_init();
- idle_domain = domain_create(IDLE_DOMAIN_ID);
+ idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
BUG();
@@ -640,12 +640,13 @@ void __init __start_xen(multiboot_info_t *mbi)
acm_init(_policy_start, _policy_len);
/* Create initial domain 0. */
- dom0 = domain_create(0);
+ dom0 = domain_create(0, 0);
if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
panic("Error creating domain 0\n");
- set_bit(_DOMF_privileged, &dom0->domain_flags);
- /* post-create hooks sets security label */
+ dom0->is_privileged = 1;
+
+ /* Post-create hook sets security label. */
acm_post_domain0_create(dom0->domain_id);
/* Grab the DOM0 command line. */
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index b8efa919e1..4b14bb5028 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -134,7 +134,7 @@ static void show_guest_stack(struct cpu_user_regs *regs)
int i;
unsigned long *stack, addr;
- if ( hvm_guest(current) )
+ if ( is_hvm_vcpu(current) )
return;
if ( vm86_mode(regs) )
diff --git a/xen/arch/x86/x86_32/domain_page.c b/xen/arch/x86/x86_32/domain_page.c
index b720003fc0..2b760ce704 100644
--- a/xen/arch/x86/x86_32/domain_page.c
+++ b/xen/arch/x86/x86_32/domain_page.c
@@ -29,7 +29,7 @@ static inline struct vcpu *mapcache_current_vcpu(void)
* then it means we are running on the idle domain's page table and must
* therefore use its mapcache.
*/
- if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !hvm_guest(v) )
+ if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !is_hvm_vcpu(v) )
{
/* If we really are idling, perform lazy context switch now. */
if ( (v = idle_vcpu[smp_processor_id()]) == current )
diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c
index 90e799a78c..c83f1774d4 100644
--- a/xen/arch/x86/x86_32/traps.c
+++ b/xen/arch/x86/x86_32/traps.c
@@ -45,7 +45,7 @@ void show_registers(struct cpu_user_regs *regs)
unsigned long fault_crs[8];
const char *context;
- if ( hvm_guest(current) && guest_mode(regs) )
+ if ( is_hvm_vcpu(current) && guest_mode(regs) )
{
context = "hvm";
hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
@@ -515,7 +515,7 @@ static void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
void hypercall_page_initialise(struct domain *d, void *hypercall_page)
{
- if ( hvm_guest(d->vcpu[0]) )
+ if ( is_hvm_domain(d) )
hvm_hypercall_page_initialise(d, hypercall_page);
else if ( supervisor_mode_kernel )
hypercall_page_initialise_ring0_kernel(hypercall_page);
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 0f1a822eb7..b527c51d99 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -42,7 +42,7 @@ void show_registers(struct cpu_user_regs *regs)
unsigned long fault_crs[8];
const char *context;
- if ( hvm_guest(current) && guest_mode(regs) )
+ if ( is_hvm_vcpu(current) && guest_mode(regs) )
{
context = "hvm";
hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
@@ -229,7 +229,7 @@ unsigned long do_iret(void)
regs->rsp = iret_saved.rsp;
regs->ss = iret_saved.ss | 3; /* force guest privilege */
- if ( !(iret_saved.flags & VGCF_IN_SYSCALL) )
+ if ( !(iret_saved.flags & VGCF_in_sycall) )
{
regs->entry_vector = 0;
regs->r11 = iret_saved.r11;
@@ -500,7 +500,7 @@ static void hypercall_page_initialise_ring3_kernel(void *hypercall_page)
void hypercall_page_initialise(struct domain *d, void *hypercall_page)
{
- if ( hvm_guest(d->vcpu[0]) )
+ if ( is_hvm_domain(d) )
hvm_hypercall_page_initialise(d, hypercall_page);
else
hypercall_page_initialise_ring3_kernel(hypercall_page);
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 2d25a7053b..91ee08b743 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -114,7 +114,7 @@ struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
d = (vcpu_id == 0) ?
- domain_create(IDLE_DOMAIN_ID) :
+ domain_create(IDLE_DOMAIN_ID, 0) :
idle_vcpu[cpu_id - vcpu_id]->domain;
BUG_ON(d == NULL);
@@ -124,13 +124,16 @@ struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
return v;
}
-struct domain *domain_create(domid_t domid)
+struct domain *domain_create(domid_t domid, unsigned int domcr_flags)
{
struct domain *d, **pd;
if ( (d = alloc_domain(domid)) == NULL )
return NULL;
+ if ( domcr_flags & DOMCRF_hvm )
+ d->is_hvm = 1;
+
rangeset_domain_initialise(d);
if ( !is_idle_domain(d) )
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index ecb77cd85d..666b241594 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -241,12 +241,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
struct domain *d;
domid_t dom;
static domid_t rover = 0;
+ unsigned int domcr_flags;
- /*
- * Running the domain 0 kernel in ring 0 is not compatible
- * with multiple guests.
- */
- if ( supervisor_mode_kernel )
+ if ( supervisor_mode_kernel ||
+ (op->u.createdomain.flags & ~XEN_DOMCTL_CDF_hvm_guest) )
return -EINVAL;
dom = op->domain;
@@ -273,8 +271,12 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
rover = dom;
}
+ domcr_flags = 0;
+ if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
+ domcr_flags |= DOMCRF_hvm;
+
ret = -ENOMEM;
- if ( (d = domain_create(dom)) == NULL )
+ if ( (d = domain_create(dom, domcr_flags)) == NULL )
break;
memcpy(d->handle, op->u.createdomain.handle,
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index c7149c1894..ff586eb6b2 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -32,7 +32,8 @@
#define HVM_DEBUG 1
#endif
-#define hvm_guest(v) ((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
+#define is_hvm_domain(d) ((d)->is_hvm)
+#define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
static inline shared_iopage_t *get_sp(struct domain *d)
{
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 09ecfab041..7a18e0d3d7 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -107,7 +107,7 @@
#define TRAP_deferred_nmi 31
/* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
-/* NB. Same as VGCF_IN_SYSCALL. No bits in common with any other TRAP_ defn. */
+/* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
#define TRAP_syscall 256
/*
diff --git a/xen/include/asm-x86/regs.h b/xen/include/asm-x86/regs.h
index 8ba10c4722..48f8b86c85 100644
--- a/xen/include/asm-x86/regs.h
+++ b/xen/include/asm-x86/regs.h
@@ -39,7 +39,7 @@ enum EFLAGS {
/* If a guest frame, it must be have guest privs (unless HVM guest). */ \
/* We permit CS==0 which can come from an uninitialised trap entry. */ \
ASSERT((diff != 0) || vm86_mode(r) || ((r->cs&3) >= GUEST_KERNEL_RPL) || \
- (r->cs == 0) || hvm_guest(current)); \
+ (r->cs == 0) || is_hvm_vcpu(current)); \
/* If not a guest frame, it must be a hypervisor frame. */ \
ASSERT((diff == 0) || (!vm86_mode(r) && (r->cs == __HYPERVISOR_CS))); \
/* Return TRUE if it's a guest frame. */ \
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 9e3bcb4caf..1cf2e5041d 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -64,7 +64,7 @@
#define shadow_mode_external(_d) ((_d)->arch.shadow.mode & SHM2_external)
/* Xen traps & emulates all reads of all page table pages:
- *not yet supported
+ * not yet supported
*/
#define shadow_mode_trap_reads(_d) ({ (void)(_d); 0; })
@@ -77,7 +77,7 @@
#ifdef __x86_64__
#define pv_32bit_guest(_v) 0 // not yet supported
#else
-#define pv_32bit_guest(_v) !hvm_guest(v)
+#define pv_32bit_guest(_v) !is_hvm_vcpu(v)
#endif
/* The shadow lock.
diff --git a/xen/include/public/arch-x86_64.h b/xen/include/public/arch-x86_64.h
index 2891cfa29e..d5706944d6 100644
--- a/xen/include/public/arch-x86_64.h
+++ b/xen/include/public/arch-x86_64.h
@@ -152,7 +152,7 @@ typedef unsigned long xen_ulong_t;
* directly with
* orb $3,1*8(%rsp)
* iretq
- * If flags contains VGCF_IN_SYSCALL:
+ * If flags contains VGCF_in_syscall:
* Restore RAX, RIP, RFLAGS, RSP.
* Discard R11, RCX, CS, SS.
* Otherwise:
@@ -160,7 +160,9 @@ typedef unsigned long xen_ulong_t;
* All other registers are saved on hypercall entry and restored to user.
*/
/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
-#define VGCF_IN_SYSCALL (1<<8)
+#define _VGCF_in_syscall 8
+#define VGCF_in_syscall (1<<_VGCF_in_syscall)
+#define VGCF_IN_SYSCALL VGCF_in_syscall
struct iret_context {
/* Top of stack (%rsp at point of hypercall). */
uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 6ad8918757..9dfefc2d9d 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -16,7 +16,7 @@
#include "xen.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000003
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000004
struct xenctl_cpumap {
XEN_GUEST_HANDLE(uint8_t) bitmap;
@@ -32,6 +32,9 @@ struct xen_domctl_createdomain {
/* IN parameters */
uint32_t ssidref;
xen_domain_handle_t handle;
+#define _XEN_DOMCTL_CDF_hvm_guest 0
+#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
+ uint32_t flags;
};
typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 93abe4e17f..ae283e05b9 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -144,6 +144,12 @@ struct domain
unsigned long domain_flags;
+ /* Boolean: Is this an HVM guest? */
+ char is_hvm;
+
+ /* Boolean: Is this guest fully privileged (aka dom0)? */
+ char is_privileged;
+
spinlock_t pause_lock;
unsigned int pause_count;
@@ -237,26 +243,30 @@ static inline void get_knownalive_domain(struct domain *d)
ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED));
}
-extern struct domain *domain_create(domid_t domid);
-extern int construct_dom0(
+struct domain *domain_create(domid_t domid, unsigned int domcr_flags);
+ /* DOMCRF_hvm: Create an HVM domain, as opposed to a PV domain. */
+#define _DOMCRF_hvm 0
+#define DOMCRF_hvm (1U<<_DOMCRF_hvm)
+
+int construct_dom0(
struct domain *d,
unsigned long image_start, unsigned long image_len,
unsigned long initrd_start, unsigned long initrd_len,
char *cmdline);
-extern int set_info_guest(struct domain *d, xen_domctl_vcpucontext_t *);
+int set_info_guest(struct domain *d, xen_domctl_vcpucontext_t *);
struct domain *find_domain_by_id(domid_t dom);
-extern void domain_destroy(struct domain *d);
-extern void domain_kill(struct domain *d);
-extern void domain_shutdown(struct domain *d, u8 reason);
-extern void domain_pause_for_debugger(void);
+void domain_destroy(struct domain *d);
+void domain_kill(struct domain *d);
+void domain_shutdown(struct domain *d, u8 reason);
+void domain_pause_for_debugger(void);
/*
* Mark specified domain as crashed. This function always returns, even if the
* caller is the specified domain. The domain is not synchronously descheduled
* from any processor.
*/
-extern void __domain_crash(struct domain *d);
+void __domain_crash(struct domain *d);
#define domain_crash(d) do { \
printk("domain_crash called from %s:%d\n", __FILE__, __LINE__); \
__domain_crash(d); \
@@ -266,7 +276,7 @@ extern void __domain_crash(struct domain *d);
* Mark current domain as crashed and synchronously deschedule from the local
* processor. This function never returns.
*/
-extern void __domain_crash_synchronous(void) __attribute__((noreturn));
+void __domain_crash_synchronous(void) __attribute__((noreturn));
#define domain_crash_synchronous() do { \
printk("domain_crash_sync called from %s:%d\n", __FILE__, __LINE__); \
__domain_crash_synchronous(); \
@@ -293,7 +303,7 @@ void vcpu_sleep_sync(struct vcpu *d);
* this call will ensure that all its state is committed to memory and that
* no CPU is using critical state (e.g., page tables) belonging to the VCPU.
*/
-extern void sync_vcpu_execstate(struct vcpu *v);
+void sync_vcpu_execstate(struct vcpu *v);
/*
* Called by the scheduler to switch to another VCPU. This function must
@@ -302,7 +312,7 @@ extern void sync_vcpu_execstate(struct vcpu *v);
* implementing lazy context switching, it suffices to ensure that invoking
* sync_vcpu_execstate() will switch and commit @prev's state.
*/
-extern void context_switch(
+void context_switch(
struct vcpu *prev,
struct vcpu *next);
@@ -312,10 +322,10 @@ extern void context_switch(
* saved to memory. Alternatively, if implementing lazy context switching,
* ensure that invoking sync_vcpu_execstate() will switch and commit @prev.
*/
-extern void context_saved(struct vcpu *prev);
+void context_saved(struct vcpu *prev);
/* Called by the scheduler to continue running the current VCPU. */
-extern void continue_running(
+void continue_running(
struct vcpu *same);
void startup_cpu_idle_loop(void);
@@ -396,26 +406,23 @@ extern struct domain *domain_list;
/*
* Per-domain flags (domain_flags).
*/
- /* Is this domain privileged? */
-#define _DOMF_privileged 0
-#define DOMF_privileged (1UL<<_DOMF_privileged)
/* Guest shut itself down for some reason. */
-#define _DOMF_shutdown 1
+#define _DOMF_shutdown 0
#define DOMF_shutdown (1UL<<_DOMF_shutdown)
/* Death rattle. */
-#define _DOMF_dying 2
+#define _DOMF_dying 1
#define DOMF_dying (1UL<<_DOMF_dying)
/* Domain is paused by controller software. */
-#define _DOMF_ctrl_pause 3
+#define _DOMF_ctrl_pause 2
#define DOMF_ctrl_pause (1UL<<_DOMF_ctrl_pause)
/* Domain is being debugged by controller software. */
-#define _DOMF_debugging 4
+#define _DOMF_debugging 3
#define DOMF_debugging (1UL<<_DOMF_debugging)
/* Are any VCPUs polling event channels (SCHEDOP_poll)? */
-#define _DOMF_polling 5
+#define _DOMF_polling 4
#define DOMF_polling (1UL<<_DOMF_polling)
/* Domain is paused by the hypervisor? */
-#define _DOMF_paused 6
+#define _DOMF_paused 5
#define DOMF_paused (1UL<<_DOMF_paused)
static inline int vcpu_runnable(struct vcpu *v)
@@ -450,8 +457,7 @@ static inline void vcpu_unblock(struct vcpu *v)
vcpu_wake(v);
}
-#define IS_PRIV(_d) \
- (test_bit(_DOMF_privileged, &(_d)->domain_flags))
+#define IS_PRIV(_d) ((_d)->is_privileged)
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))