aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-11-01 18:37:23 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2006-11-01 18:37:23 +0000
commit8082f8f7840a679655053ecfd95f64c64bb5c661 (patch)
tree317cac4c0985f30034f4071af997faaef3fe9afa
parent8bdc427fa2dc6e53881d80bd57638f56f8ac39ad (diff)
downloadxen-8082f8f7840a679655053ecfd95f64c64bb5c661.tar.gz
xen-8082f8f7840a679655053ecfd95f64c64bb5c661.tar.bz2
xen-8082f8f7840a679655053ecfd95f64c64bb5c661.zip
[HVM] Move shadow initialisation into domain-creation hypercall.
Allocate HVM guest memory in the libxc builder function rather than in xend. Clean up fall out from these changes. Todo: Fix ia64. Move PV builder to same model (it should allocate the memory rather than xend doing so -- then it can avoid using xc_get_pfn_list()). Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--tools/ioemu/vl.c9
-rw-r--r--tools/libxc/xc_hvm_build.c109
-rw-r--r--tools/python/xen/xend/XendDomainInfo.py8
-rw-r--r--tools/python/xen/xend/image.py14
-rw-r--r--xen/arch/x86/domain.c36
-rw-r--r--xen/arch/x86/domctl.c45
-rw-r--r--xen/arch/x86/mm/shadow/common.c4
-rw-r--r--xen/include/asm-x86/shadow.h3
8 files changed, 64 insertions, 164 deletions
diff --git a/tools/ioemu/vl.c b/tools/ioemu/vl.c
index fafb325835..c6798e5c92 100644
--- a/tools/ioemu/vl.c
+++ b/tools/ioemu/vl.c
@@ -6420,15 +6420,14 @@ int main(int argc, char **argv)
}
#if defined(__i386__) || defined(__x86_64__)
- if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages) {
+ for ( i = 0; i < tmp_nr_pages; i++)
+ page_array[i] = i;
+ if (xc_domain_translate_gpfn_list(xc_handle, domid, tmp_nr_pages,
+ page_array, page_array)) {
fprintf(logfile, "xc_get_pfn_list returned error %d\n", errno);
exit(-1);
}
- if (ram_size > HVM_BELOW_4G_RAM_END)
- for (i = 0; i < nr_pages - (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT); i++)
- page_array[tmp_nr_pages - 1 - i] = page_array[nr_pages - 1 - i];
-
phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
PROT_READ|PROT_WRITE, page_array,
tmp_nr_pages);
diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
index f2bd062952..52e935d3e0 100644
--- a/tools/libxc/xc_hvm_build.c
+++ b/tools/libxc/xc_hvm_build.c
@@ -196,7 +196,6 @@ static int set_hvm_info(int xc_handle, uint32_t dom,
static int setup_guest(int xc_handle,
uint32_t dom, int memsize,
char *image, unsigned long image_size,
- unsigned long nr_pages,
vcpu_guest_context_t *ctxt,
unsigned long shared_info_frame,
unsigned int vcpus,
@@ -207,18 +206,13 @@ static int setup_guest(int xc_handle,
unsigned long *store_mfn)
{
xen_pfn_t *page_array = NULL;
- unsigned long count, i;
- unsigned long long ptr;
- xc_mmu_t *mmu = NULL;
-
+ unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT);
+ unsigned long shared_page_nr;
shared_info_t *shared_info;
void *e820_page;
-
struct domain_setup_info dsi;
uint64_t v_end;
- unsigned long shared_page_nr;
-
memset(&dsi, 0, sizeof(struct domain_setup_info));
if ( (parseelfimage(image, image_size, &dsi)) != 0 )
@@ -230,7 +224,6 @@ static int setup_guest(int xc_handle,
goto error_out;
}
- /* memsize is in megabytes */
v_end = (unsigned long long)memsize << 20;
IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
@@ -255,53 +248,27 @@ static int setup_guest(int xc_handle,
goto error_out;
}
- if ( xc_get_pfn_list(xc_handle, dom, page_array, nr_pages) != nr_pages )
+ for ( i = 0; i < nr_pages; i++ )
+ page_array[i] = i;
+ for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
+ page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
+
+ if ( xc_domain_memory_populate_physmap(xc_handle, dom, nr_pages,
+ 0, 0, page_array) )
{
- PERROR("Could not get the page frame list.\n");
+ PERROR("Could not allocate memory for HVM guest.\n");
goto error_out;
}
- /* HVM domains must be put into shadow mode at the start of day. */
- /* XXX *After* xc_get_pfn_list()!! */
- if ( xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_ENABLE,
- NULL, 0, NULL,
- XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT |
- XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
- XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL,
- NULL) )
+ if ( xc_domain_translate_gpfn_list(xc_handle, dom, nr_pages,
+ page_array, page_array) )
{
- PERROR("Could not enable shadow paging for domain.\n");
+ PERROR("Could not translate addresses of HVM guest.\n");
goto error_out;
- }
+ }
loadelfimage(image, xc_handle, dom, page_array, &dsi);
- if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
- goto error_out;
-
- /* Write the machine->phys table entries. */
- for ( count = 0; count < nr_pages; count++ )
- {
- unsigned long gpfn_count_skip;
-
- ptr = (unsigned long long)page_array[count] << PAGE_SHIFT;
-
- gpfn_count_skip = 0;
-
- /*
- * physical address space from HVM_BELOW_4G_RAM_END to 4G is reserved
- * for PCI devices MMIO. So if HVM has more than HVM_BELOW_4G_RAM_END
- * RAM, memory beyond HVM_BELOW_4G_RAM_END will go to 4G above.
- */
- if ( count >= (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) )
- gpfn_count_skip = HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
-
- if ( xc_add_mmu_update(xc_handle, mmu,
- ptr | MMU_MACHPHYS_UPDATE,
- count + gpfn_count_skip) )
- goto error_out;
- }
-
if ( set_hvm_info(xc_handle, dom, page_array, vcpus, acpi) )
{
ERROR("Couldn't set hvm info for HVM guest.\n");
@@ -352,22 +319,13 @@ static int setup_guest(int xc_handle,
if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) )
goto error_out;
- /* Send the page update requests down to the hypervisor. */
- if ( xc_finish_mmu_updates(xc_handle, mmu) )
- goto error_out;
-
- free(mmu);
free(page_array);
- /*
- * Initial register values:
- */
ctxt->user_regs.eip = dsi.v_kernentry;
return 0;
error_out:
- free(mmu);
free(page_array);
return -1;
}
@@ -387,8 +345,6 @@ static int xc_hvm_build_internal(int xc_handle,
struct xen_domctl launch_domctl, domctl;
int rc, i;
vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt;
- unsigned long nr_pages;
- xen_capabilities_info_t xen_caps;
if ( (image == NULL) || (image_size == 0) )
{
@@ -396,25 +352,6 @@ static int xc_hvm_build_internal(int xc_handle,
goto error_out;
}
- if ( (rc = xc_version(xc_handle, XENVER_capabilities, &xen_caps)) != 0 )
- {
- PERROR("Failed to get xen version info");
- goto error_out;
- }
-
- if ( !strstr(xen_caps, "hvm") )
- {
- PERROR("CPU doesn't support HVM extensions or "
- "the extensions are not enabled");
- goto error_out;
- }
-
- if ( (nr_pages = xc_get_tot_pages(xc_handle, domid)) < 0 )
- {
- PERROR("Could not find total pages for domain");
- goto error_out;
- }
-
if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) )
{
PERROR("%s: ctxt mlock failed", __func__);
@@ -430,24 +367,10 @@ static int xc_hvm_build_internal(int xc_handle,
goto error_out;
}
-#if 0
- /* HVM domains must be put into shadow mode at the start of day */
- if ( xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_ENABLE,
- NULL, 0, NULL,
- XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT |
- XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
- XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL,
- NULL) )
- {
- PERROR("Could not enable shadow paging for domain.\n");
- goto error_out;
- }
-#endif
-
memset(ctxt, 0, sizeof(*ctxt));
-
ctxt->flags = VGCF_HVM_GUEST;
- if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
+
+ if ( setup_guest(xc_handle, domid, memsize, image, image_size,
ctxt, domctl.u.getdomaininfo.shared_info_frame,
vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
{
diff --git a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py
index ae163a3a60..5d9a9a98be 100644
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -1295,9 +1295,11 @@ class XendDomainInfo:
shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
self.info['shadow_memory'] = shadow_cur
- # initial memory reservation
- xc.domain_memory_increase_reservation(self.domid, reservation, 0,
- 0)
+ # Initial memory reservation
+ if not (self._infoIsSet('image') and
+ sxp.name(self.info['image']) == "hvm"):
+ xc.domain_memory_increase_reservation(
+ self.domid, reservation, 0, 0)
self._createChannels()
diff --git a/tools/python/xen/xend/image.py b/tools/python/xen/xend/image.py
index 0d07fbe928..cd380e02e1 100644
--- a/tools/python/xen/xend/image.py
+++ b/tools/python/xen/xend/image.py
@@ -478,22 +478,12 @@ class X86_HVM_ImageHandler(HVMImageHandler):
def getRequiredAvailableMemory(self, mem_kb):
# Add 8 MiB overhead for QEMU's video RAM.
- return self.getRequiredInitialReservation(mem_kb) + 8192
+ return mem_kb + 8192
def getRequiredInitialReservation(self, mem_kb):
- page_kb = 4
- # This was derived emperically:
- # 2.4 MB overhead per 1024 MB RAM
- # + 4 to avoid low-memory condition
- extra_mb = (2.4/1024) * (mem_kb/1024.0) + 4;
- extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
- return mem_kb + extra_pages * page_kb
+ return mem_kb
def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
- # The given value is the configured value -- we need to include the
- # overhead due to getRequiredInitialReservation.
- maxmem_kb = self.getRequiredInitialReservation(maxmem_kb)
-
# 256 pages (1MB) per vcpu,
# plus 1 page per MiB of RAM for the P2M map,
# plus 1 page per MiB of RAM to shadow the resident processes.
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index d8a8f111bc..4b8a124270 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -155,19 +155,12 @@ int arch_domain_create(struct domain *d)
{
l1_pgentry_t gdt_l1e;
int vcpuid, pdpt_order;
- int i;
-
- if ( is_hvm_domain(d) && !hvm_enabled )
- {
- gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
- "on a non-VT/AMDV platform.\n");
- return -EINVAL;
- }
+ int i, rc = -ENOMEM;
pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
if ( d->arch.mm_perdomain_pt == NULL )
- goto fail_nomem;
+ goto fail;
memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order);
/*
@@ -192,7 +185,7 @@ int arch_domain_create(struct domain *d)
d->arch.mm_perdomain_l3 = alloc_xenheap_page();
if ( (d->arch.mm_perdomain_l2 == NULL) ||
(d->arch.mm_perdomain_l3 == NULL) )
- goto fail_nomem;
+ goto fail;
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
for ( i = 0; i < (1 << pdpt_order); i++ )
@@ -219,26 +212,41 @@ int arch_domain_create(struct domain *d)
d->arch.ioport_caps =
rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);
if ( d->arch.ioport_caps == NULL )
- goto fail_nomem;
+ goto fail;
if ( (d->shared_info = alloc_xenheap_page()) == NULL )
- goto fail_nomem;
+ goto fail;
memset(d->shared_info, 0, PAGE_SIZE);
share_xen_page_with_guest(
virt_to_page(d->shared_info), d, XENSHARE_writable);
}
+ if ( is_hvm_domain(d) )
+ {
+ if ( !hvm_enabled )
+ {
+ gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
+ "on a non-VT/AMDV platform.\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ rc = shadow_enable(d, SHM2_refcounts|SHM2_translate|SHM2_external);
+ if ( rc != 0 )
+ goto fail;
+ }
+
return 0;
- fail_nomem:
+ fail:
free_xenheap_page(d->shared_info);
#ifdef __x86_64__
free_xenheap_page(d->arch.mm_perdomain_l2);
free_xenheap_page(d->arch.mm_perdomain_l3);
#endif
free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
- return -ENOMEM;
+ return rc;
}
void arch_domain_destroy(struct domain *d)
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index f900650729..362365dd2f 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -224,45 +224,18 @@ long arch_do_domctl(
spin_lock(&d->page_alloc_lock);
- if ( is_hvm_domain(d) && shadow_mode_translate(d) )
+ list_ent = d->page_list.next;
+ for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
{
- /* HVM domain: scan P2M to get guaranteed physmap order. */
- for ( i = 0, gmfn = 0;
- (i < max_pfns) && (i < d->tot_pages);
- i++, gmfn++ )
+ mfn = page_to_mfn(list_entry(
+ list_ent, struct page_info, list));
+ if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
+ i, &mfn, 1) )
{
- if ( unlikely(i == (HVM_BELOW_4G_MMIO_START>>PAGE_SHIFT)) )
- {
- /* skip MMIO range */
- gmfn += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
- }
- mfn = gmfn_to_mfn(d, gmfn);
- if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
- i, &mfn, 1) )
- {
- ret = -EFAULT;
- break;
- }
- }
- }
- else
- {
- /* Other guests: return in order of ownership list. */
- list_ent = d->page_list.next;
- for ( i = 0;
- (i < max_pfns) && (list_ent != &d->page_list);
- i++ )
- {
- mfn = page_to_mfn(list_entry(
- list_ent, struct page_info, list));
- if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
- i, &mfn, 1) )
- {
- ret = -EFAULT;
- break;
- }
- list_ent = mfn_to_page(mfn)->list.next;
+ ret = -EFAULT;
+ break;
}
+ list_ent = mfn_to_page(mfn)->list.next;
}
spin_unlock(&d->page_alloc_lock);
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2e48164ca7..aa217975ed 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2461,7 +2461,7 @@ static void sh_new_mode(struct domain *d, u32 new_mode)
sh_update_paging_modes(v);
}
-static int shadow_enable(struct domain *d, u32 mode)
+int shadow_enable(struct domain *d, u32 mode)
/* Turn on "permanent" shadow features: external, translate, refcount.
* Can only be called once on a domain, and these features cannot be
* disabled.
@@ -3092,6 +3092,8 @@ int shadow_domctl(struct domain *d,
if ( shadow_mode_log_dirty(d) )
if ( (rc = shadow_log_dirty_disable(d)) != 0 )
return rc;
+ if ( is_hvm_domain(d) )
+ return -EINVAL;
if ( d->arch.shadow.mode & SHM2_enable )
if ( (rc = shadow_test_disable(d)) != 0 )
return rc;
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 1cf2e5041d..292ed43916 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -313,6 +313,9 @@ static inline int shadow_guest_paging_levels(struct vcpu *v)
/**************************************************************************/
/* Entry points into the shadow code */
+/* Enable arbitrary shadow mode. */
+int shadow_enable(struct domain *d, u32 mode);
+
/* Turning on shadow test mode */
int shadow_test_enable(struct domain *d);