diff options
author | Tim Deegan <tim@xen.org> | 2012-03-08 16:40:05 +0000 |
---|---|---|
committer | Tim Deegan <tim@xen.org> | 2012-03-08 16:40:05 +0000 |
commit | 770d1e858de56ba8f2e0d7e45c08f48d599528e5 (patch) | |
tree | fcae677231c44b4c20c0816f41cbd51074a5412e | |
parent | 7a3de767373545388a9fded238b3450c5b21066d (diff) | |
download | xen-770d1e858de56ba8f2e0d7e45c08f48d599528e5.tar.gz xen-770d1e858de56ba8f2e0d7e45c08f48d599528e5.tar.bz2 xen-770d1e858de56ba8f2e0d7e45c08f48d599528e5.zip |
Use a reserved pfn in the guest address space to store mem event rings
This solves a long-standing issue in which the pages backing these rings were
pages belonging to dom0 user-space processes. Thus, if the process would die
unexpectedly, Xen would keep posting events to a page now belonging to some
other process.
We update all API-consumers in tree (xenpaging and xen-access).
This is an API/ABI change, so please speak up if it breaks your accumptions.
The patch touches tools, hypervisor x86/hvm bits, and hypervisor x86/mm bits.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Committed-by: Tim Deegan <tim@xen.org>
-rw-r--r-- | tools/libxc/xc_domain_restore.c | 42 | ||||
-rw-r--r-- | tools/libxc/xc_domain_save.c | 36 | ||||
-rw-r--r-- | tools/libxc/xc_hvm_build.c | 21 | ||||
-rw-r--r-- | tools/libxc/xc_mem_access.c | 6 | ||||
-rw-r--r-- | tools/libxc/xc_mem_event.c | 3 | ||||
-rw-r--r-- | tools/libxc/xc_mem_paging.c | 6 | ||||
-rw-r--r-- | tools/libxc/xenctrl.h | 8 | ||||
-rw-r--r-- | tools/libxc/xg_save_restore.h | 4 | ||||
-rw-r--r-- | tools/tests/xen-access/xen-access.c | 83 | ||||
-rw-r--r-- | tools/xenpaging/xenpaging.c | 52 | ||||
-rw-r--r-- | xen/arch/x86/mm/mem_event.c | 50 | ||||
-rw-r--r-- | xen/include/public/domctl.h | 1 | ||||
-rw-r--r-- | xen/include/public/hvm/params.h | 7 | ||||
-rw-r--r-- | xen/include/xen/sched.h | 1 |
14 files changed, 214 insertions, 106 deletions
diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c index 06bea86718..3e4d5183a5 100644 --- a/tools/libxc/xc_domain_restore.c +++ b/tools/libxc/xc_domain_restore.c @@ -677,6 +677,9 @@ typedef struct { int max_vcpu_id; uint64_t vcpumap; uint64_t identpt; + uint64_t paging_ring_pfn; + uint64_t access_ring_pfn; + uint64_t sharing_ring_pfn; uint64_t vm86_tss; uint64_t console_pfn; uint64_t acpi_ioport_location; @@ -750,6 +753,39 @@ static int pagebuf_get_one(xc_interface *xch, struct restore_ctx *ctx, // DPRINTF("EPT identity map address: %llx\n", buf->identpt); return pagebuf_get_one(xch, ctx, buf, fd, dom); + case XC_SAVE_ID_HVM_PAGING_RING_PFN: + /* Skip padding 4 bytes then read the paging ring location. */ + if ( RDEXACT(fd, &buf->paging_ring_pfn, sizeof(uint32_t)) || + RDEXACT(fd, &buf->paging_ring_pfn, sizeof(uint64_t)) ) + { + PERROR("error read the paging ring pfn"); + return -1; + } + // DPRINTF("paging ring pfn address: %llx\n", buf->paging_ring_pfn); + return pagebuf_get_one(xch, ctx, buf, fd, dom); + + case XC_SAVE_ID_HVM_ACCESS_RING_PFN: + /* Skip padding 4 bytes then read the mem access ring location. */ + if ( RDEXACT(fd, &buf->access_ring_pfn, sizeof(uint32_t)) || + RDEXACT(fd, &buf->access_ring_pfn, sizeof(uint64_t)) ) + { + PERROR("error read the access ring pfn"); + return -1; + } + // DPRINTF("access ring pfn address: %llx\n", buf->access_ring_pfn); + return pagebuf_get_one(xch, ctx, buf, fd, dom); + + case XC_SAVE_ID_HVM_SHARING_RING_PFN: + /* Skip padding 4 bytes then read the sharing ring location. */ + if ( RDEXACT(fd, &buf->sharing_ring_pfn, sizeof(uint32_t)) || + RDEXACT(fd, &buf->sharing_ring_pfn, sizeof(uint64_t)) ) + { + PERROR("error read the sharing ring pfn"); + return -1; + } + // DPRINTF("sharing ring pfn address: %llx\n", buf->sharing_ring_pfn); + return pagebuf_get_one(xch, ctx, buf, fd, dom); + case XC_SAVE_ID_HVM_VM86_TSS: /* Skip padding 4 bytes then read the vm86 TSS location. */ if ( RDEXACT(fd, &buf->vm86_tss, sizeof(uint32_t)) || @@ -1460,6 +1496,12 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, /* should this be deferred? does it change? */ if ( pagebuf.identpt ) xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT, pagebuf.identpt); + if ( pagebuf.paging_ring_pfn ) + xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN, pagebuf.paging_ring_pfn); + if ( pagebuf.access_ring_pfn ) + xc_set_hvm_param(xch, dom, HVM_PARAM_ACCESS_RING_PFN, pagebuf.access_ring_pfn); + if ( pagebuf.sharing_ring_pfn ) + xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN, pagebuf.sharing_ring_pfn); if ( pagebuf.vm86_tss ) xc_set_hvm_param(xch, dom, HVM_PARAM_VM86_TSS, pagebuf.vm86_tss); if ( pagebuf.console_pfn ) diff --git a/tools/libxc/xc_domain_save.c b/tools/libxc/xc_domain_save.c index 9fa39b2f1c..a9216ddf71 100644 --- a/tools/libxc/xc_domain_save.c +++ b/tools/libxc/xc_domain_save.c @@ -1639,6 +1639,42 @@ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iter goto out; } + chunk.id = XC_SAVE_ID_HVM_PAGING_RING_PFN; + chunk.data = 0; + xc_get_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN, + (unsigned long *)&chunk.data); + + if ( (chunk.data != 0) && + wrexact(io_fd, &chunk, sizeof(chunk)) ) + { + PERROR("Error when writing the paging ring pfn for guest"); + goto out; + } + + chunk.id = XC_SAVE_ID_HVM_ACCESS_RING_PFN; + chunk.data = 0; + xc_get_hvm_param(xch, dom, HVM_PARAM_ACCESS_RING_PFN, + (unsigned long *)&chunk.data); + + if ( (chunk.data != 0) && + wrexact(io_fd, &chunk, sizeof(chunk)) ) + { + PERROR("Error when writing the access ring pfn for guest"); + goto out; + } + + chunk.id = XC_SAVE_ID_HVM_SHARING_RING_PFN; + chunk.data = 0; + xc_get_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN, + (unsigned long *)&chunk.data); + + if ( (chunk.data != 0) && + wrexact(io_fd, &chunk, sizeof(chunk)) ) + { + PERROR("Error when writing the sharing ring pfn for guest"); + goto out; + } + chunk.id = XC_SAVE_ID_HVM_VM86_TSS; chunk.data = 0; xc_get_hvm_param(xch, dom, HVM_PARAM_VM86_TSS, diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c index 780b23f5b2..696c012291 100644 --- a/tools/libxc/xc_hvm_build.c +++ b/tools/libxc/xc_hvm_build.c @@ -38,12 +38,15 @@ #define SUPERPAGE_1GB_SHIFT 18 #define SUPERPAGE_1GB_NR_PFNS (1UL << SUPERPAGE_1GB_SHIFT) -#define SPECIALPAGE_BUFIOREQ 0 -#define SPECIALPAGE_XENSTORE 1 -#define SPECIALPAGE_IOREQ 2 -#define SPECIALPAGE_IDENT_PT 3 -#define SPECIALPAGE_CONSOLE 4 -#define NR_SPECIAL_PAGES 5 +#define SPECIALPAGE_PAGING 0 +#define SPECIALPAGE_ACCESS 1 +#define SPECIALPAGE_SHARING 2 +#define SPECIALPAGE_BUFIOREQ 3 +#define SPECIALPAGE_XENSTORE 4 +#define SPECIALPAGE_IOREQ 5 +#define SPECIALPAGE_IDENT_PT 6 +#define SPECIALPAGE_CONSOLE 7 +#define NR_SPECIAL_PAGES 8 #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x)) static void build_hvm_info(void *hvm_info_page, uint64_t mem_size, @@ -356,6 +359,12 @@ static int setup_guest(xc_interface *xch, special_pfn(SPECIALPAGE_IOREQ)); xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN, special_pfn(SPECIALPAGE_CONSOLE)); + xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN, + special_pfn(SPECIALPAGE_PAGING)); + xc_set_hvm_param(xch, dom, HVM_PARAM_ACCESS_RING_PFN, + special_pfn(SPECIALPAGE_ACCESS)); + xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN, + special_pfn(SPECIALPAGE_SHARING)); /* * Identity-map page table is required for running with CR0.PG=0 when diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c index a3786025f5..01fcf41a40 100644 --- a/tools/libxc/xc_mem_access.c +++ b/tools/libxc/xc_mem_access.c @@ -25,7 +25,7 @@ int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, - uint32_t *port, void *ring_page) + uint32_t *port) { if ( !port ) { @@ -36,7 +36,7 @@ int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE, XEN_DOMCTL_MEM_EVENT_OP_ACCESS, - port, ring_page); + port); } int xc_mem_access_disable(xc_interface *xch, domid_t domain_id) @@ -44,7 +44,7 @@ int xc_mem_access_disable(xc_interface *xch, domid_t domain_id) return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE, XEN_DOMCTL_MEM_EVENT_OP_ACCESS, - NULL, NULL); + NULL); } int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn) diff --git a/tools/libxc/xc_mem_event.c b/tools/libxc/xc_mem_event.c index fcca47c764..d43a0af33e 100644 --- a/tools/libxc/xc_mem_event.c +++ b/tools/libxc/xc_mem_event.c @@ -24,7 +24,7 @@ #include "xc_private.h" int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op, - unsigned int mode, uint32_t *port, void *ring_page) + unsigned int mode, uint32_t *port) { DECLARE_DOMCTL; int rc; @@ -33,7 +33,6 @@ int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op, domctl.domain = domain_id; domctl.u.mem_event_op.op = op; domctl.u.mem_event_op.mode = mode; - domctl.u.mem_event_op.ring_addr = (unsigned long) ring_page; rc = do_domctl(xch, &domctl); if ( !rc && port ) diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c index f8eaa870b3..ab8c5ba5c6 100644 --- a/tools/libxc/xc_mem_paging.c +++ b/tools/libxc/xc_mem_paging.c @@ -25,7 +25,7 @@ int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, - uint32_t *port, void *ring_page) + uint32_t *port) { if ( !port ) { @@ -36,7 +36,7 @@ int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE, XEN_DOMCTL_MEM_EVENT_OP_PAGING, - port, ring_page); + port); } int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id) @@ -44,7 +44,7 @@ int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id) return xc_mem_event_control(xch, domain_id, XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE, XEN_DOMCTL_MEM_EVENT_OP_PAGING, - NULL, NULL); + NULL); } int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long gfn) diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h index 823d47a2ac..b4dd9e80e0 100644 --- a/tools/libxc/xenctrl.h +++ b/tools/libxc/xenctrl.h @@ -1892,13 +1892,12 @@ int xc_tmem_restore_extra(xc_interface *xch, int dom, int fd); * mem_event operations */ int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op, - unsigned int mode, uint32_t *port, void *ring_page); + unsigned int mode, uint32_t *port); int xc_mem_event_memop(xc_interface *xch, domid_t domain_id, unsigned int op, unsigned int mode, uint64_t gfn, void *buffer); -int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, - uint32_t *port, void *ring_page); +int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, uint32_t *port); int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id); int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long gfn); @@ -1907,8 +1906,7 @@ int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long gfn); int xc_mem_paging_load(xc_interface *xch, domid_t domain_id, unsigned long gfn, void *buffer); -int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, - uint32_t *port, void *ring_page); +int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port); int xc_mem_access_disable(xc_interface *xch, domid_t domain_id); int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned long gfn); diff --git a/tools/libxc/xg_save_restore.h b/tools/libxc/xg_save_restore.h index 6286b6814a..89f3504911 100644 --- a/tools/libxc/xg_save_restore.h +++ b/tools/libxc/xg_save_restore.h @@ -254,6 +254,10 @@ #define XC_SAVE_ID_COMPRESSED_DATA -12 /* Marker to indicate arrival of compressed data */ #define XC_SAVE_ID_ENABLE_COMPRESSION -13 /* Marker to enable compression logic at receiver side */ #define XC_SAVE_ID_HVM_GENERATION_ID_ADDR -14 +/* Markers for the pfn's hosting these mem event rings */ +#define XC_SAVE_ID_HVM_PAGING_RING_PFN -15 +#define XC_SAVE_ID_HVM_ACCESS_RING_PFN -16 +#define XC_SAVE_ID_HVM_SHARING_RING_PFN -17 /* ** We process save/restore/migrate in batches of pages; the below diff --git a/tools/tests/xen-access/xen-access.c b/tools/tests/xen-access/xen-access.c index d297529e42..fce886432a 100644 --- a/tools/tests/xen-access/xen-access.c +++ b/tools/tests/xen-access/xen-access.c @@ -166,36 +166,13 @@ int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned lon err: return -errno; } - -static void *init_page(void) -{ - void *buffer; - int ret; - - /* Allocated page memory */ - ret = posix_memalign(&buffer, PAGE_SIZE, PAGE_SIZE); - if ( ret != 0 ) - goto out_alloc; - - /* Lock buffer in memory so it can't be paged out */ - ret = mlock(buffer, PAGE_SIZE); - if ( ret != 0 ) - goto out_lock; - - return buffer; - - munlock(buffer, PAGE_SIZE); - out_lock: - free(buffer); - out_alloc: - return NULL; -} xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id) { xenaccess_t *xenaccess; xc_interface *xch; int rc; + unsigned long ring_pfn, mmap_pfn; xch = xc_interface_open(NULL, NULL, 0); if ( !xch ) @@ -214,28 +191,42 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id) /* Set domain id */ xenaccess->mem_event.domain_id = domain_id; - /* Initialise ring page */ - xenaccess->mem_event.ring_page = init_page(); - if ( xenaccess->mem_event.ring_page == NULL ) - { - ERROR("Error initialising ring page"); - goto err; - } - - - /* Initialise ring */ - SHARED_RING_INIT((mem_event_sring_t *)xenaccess->mem_event.ring_page); - BACK_RING_INIT(&xenaccess->mem_event.back_ring, - (mem_event_sring_t *)xenaccess->mem_event.ring_page, - PAGE_SIZE); - /* Initialise lock */ mem_event_ring_lock_init(&xenaccess->mem_event); + /* Map the ring page */ + xc_get_hvm_param(xch, xenaccess->mem_event.domain_id, + HVM_PARAM_ACCESS_RING_PFN, &ring_pfn); + mmap_pfn = ring_pfn; + xenaccess->mem_event.ring_page = + xc_map_foreign_batch(xch, xenaccess->mem_event.domain_id, + PROT_READ | PROT_WRITE, &mmap_pfn, 1); + if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB ) + { + /* Map failed, populate ring page */ + rc = xc_domain_populate_physmap_exact(xenaccess->xc_handle, + xenaccess->mem_event.domain_id, + 1, 0, 0, &ring_pfn); + if ( rc != 0 ) + { + PERROR("Failed to populate ring gfn\n"); + goto err; + } + + mmap_pfn = ring_pfn; + xenaccess->mem_event.ring_page = + xc_map_foreign_batch(xch, xenaccess->mem_event.domain_id, + PROT_READ | PROT_WRITE, &mmap_pfn, 1); + if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB ) + { + PERROR("Could not map the ring page\n"); + goto err; + } + } + /* Initialise Xen */ rc = xc_mem_access_enable(xenaccess->xc_handle, xenaccess->mem_event.domain_id, - &xenaccess->mem_event.evtchn_port, - xenaccess->mem_event.ring_page); + &xenaccess->mem_event.evtchn_port); if ( rc != 0 ) { switch ( errno ) { @@ -272,6 +263,12 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id) xenaccess->mem_event.port = rc; + /* Initialise ring */ + SHARED_RING_INIT((mem_event_sring_t *)xenaccess->mem_event.ring_page); + BACK_RING_INIT(&xenaccess->mem_event.back_ring, + (mem_event_sring_t *)xenaccess->mem_event.ring_page, + PAGE_SIZE); + /* Get platform info */ xenaccess->platform_info = malloc(sizeof(xc_platform_info_t)); if ( xenaccess->platform_info == NULL ) @@ -316,8 +313,7 @@ xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id) { if ( xenaccess->mem_event.ring_page ) { - munlock(xenaccess->mem_event.ring_page, PAGE_SIZE); - free(xenaccess->mem_event.ring_page); + munmap(xenaccess->mem_event.ring_page, PAGE_SIZE); } free(xenaccess->platform_info); @@ -337,6 +333,7 @@ int xenaccess_teardown(xc_interface *xch, xenaccess_t *xenaccess) return 0; /* Tear down domain xenaccess in Xen */ + munmap(xenaccess->mem_event.ring_page, PAGE_SIZE); rc = xc_mem_access_disable(xenaccess->xc_handle, xenaccess->mem_event.domain_id); if ( rc != 0 ) { diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c index 06ccdd9960..b9ba00fa11 100644 --- a/tools/xenpaging/xenpaging.c +++ b/tools/xenpaging/xenpaging.c @@ -281,6 +281,7 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[]) xentoollog_logger *dbg = NULL; char *p; int rc; + unsigned long ring_pfn, mmap_pfn; /* Allocate memory */ paging = calloc(1, sizeof(struct xenpaging)); @@ -337,24 +338,39 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[]) goto err; } - /* Initialise ring page */ - paging->mem_event.ring_page = init_page(); - if ( paging->mem_event.ring_page == NULL ) + /* Map the ring page */ + xc_get_hvm_param(xch, paging->mem_event.domain_id, + HVM_PARAM_PAGING_RING_PFN, &ring_pfn); + mmap_pfn = ring_pfn; + paging->mem_event.ring_page = + xc_map_foreign_batch(xch, paging->mem_event.domain_id, + PROT_READ | PROT_WRITE, &mmap_pfn, 1); + if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB ) { - PERROR("Error initialising ring page"); - goto err; - } + /* Map failed, populate ring page */ + rc = xc_domain_populate_physmap_exact(paging->xc_handle, + paging->mem_event.domain_id, + 1, 0, 0, &ring_pfn); + if ( rc != 0 ) + { + PERROR("Failed to populate ring gfn\n"); + goto err; + } - /* Initialise ring */ - SHARED_RING_INIT((mem_event_sring_t *)paging->mem_event.ring_page); - BACK_RING_INIT(&paging->mem_event.back_ring, - (mem_event_sring_t *)paging->mem_event.ring_page, - PAGE_SIZE); + mmap_pfn = ring_pfn; + paging->mem_event.ring_page = + xc_map_foreign_batch(xch, paging->mem_event.domain_id, + PROT_READ | PROT_WRITE, &mmap_pfn, 1); + if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB ) + { + PERROR("Could not map the ring page\n"); + goto err; + } + } /* Initialise Xen */ rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id, - &paging->mem_event.evtchn_port, - paging->mem_event.ring_page); + &paging->mem_event.evtchn_port); if ( rc != 0 ) { switch ( errno ) { @@ -394,6 +410,12 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[]) paging->mem_event.port = rc; + /* Initialise ring */ + SHARED_RING_INIT((mem_event_sring_t *)paging->mem_event.ring_page); + BACK_RING_INIT(&paging->mem_event.back_ring, + (mem_event_sring_t *)paging->mem_event.ring_page, + PAGE_SIZE); + /* Get max_pages from guest if not provided via cmdline */ if ( !paging->max_pages ) { @@ -469,8 +491,7 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[]) if ( paging->mem_event.ring_page ) { - munlock(paging->mem_event.ring_page, PAGE_SIZE); - free(paging->mem_event.ring_page); + munmap(paging->mem_event.ring_page, PAGE_SIZE); } free(dom_path); @@ -495,6 +516,7 @@ static void xenpaging_teardown(struct xenpaging *paging) paging->xc_handle = NULL; /* Tear down domain paging in Xen */ + munmap(paging->mem_event.ring_page, PAGE_SIZE); rc = xc_mem_paging_disable(xch, paging->mem_event.domain_id); if ( rc != 0 ) { diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c index 9a8b26b07d..b6dde8d0de 100644 --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -44,16 +44,11 @@ static int mem_event_enable( xen_domctl_mem_event_op_t *mec, struct mem_event_domain *med, int pause_flag, + int param, xen_event_channel_notification_t notification_fn) { int rc; - struct domain *dom_mem_event = current->domain; - struct vcpu *v = current; - unsigned long ring_addr = mec->ring_addr; - l1_pgentry_t l1e; - unsigned long ring_gfn = 0; /* gcc ... */ - p2m_type_t p2mt; - mfn_t ring_mfn; + unsigned long ring_gfn = d->arch.hvm_domain.params[param]; /* Only one helper at a time. If the helper crashed, * the ring is in an undefined state and so is the guest. @@ -61,22 +56,18 @@ static int mem_event_enable( if ( med->ring_page ) return -EBUSY; - /* Get MFN of ring page */ - guest_get_eff_l1e(v, ring_addr, &l1e); - ring_gfn = l1e_get_pfn(l1e); - ring_mfn = get_gfn(dom_mem_event, ring_gfn, &p2mt); - - if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) ) - { - put_gfn(dom_mem_event, ring_gfn); - return -EINVAL; - } + /* The parameter defaults to zero, and it should be + * set to something */ + if ( ring_gfn == 0 ) + return -ENOSYS; mem_event_ring_lock_init(med); + mem_event_ring_lock(med); - /* Map ring page */ - med->ring_page = map_domain_page(mfn_x(ring_mfn)); - put_gfn(dom_mem_event, ring_gfn); + rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct, + &med->ring_page); + if ( rc < 0 ) + goto err; /* Set the number of currently blocked vCPUs to 0. */ med->blocked = 0; @@ -101,11 +92,13 @@ static int mem_event_enable( /* Initialize the last-chance wait queue. */ init_waitqueue_head(&med->wq); + mem_event_ring_unlock(med); return 0; err: - unmap_domain_page(med->ring_page); - med->ring_page = NULL; + destroy_ring_for_helper(&med->ring_page, + med->ring_pg_struct); + mem_event_ring_unlock(med); return rc; } @@ -221,9 +214,6 @@ static int mem_event_disable(struct domain *d, struct mem_event_domain *med) /* Free domU's event channel and leave the other one unbound */ free_xen_event_channel(d->vcpu[0], med->xen_port); - - unmap_domain_page(med->ring_page); - med->ring_page = NULL; /* Unblock all vCPUs */ for_each_vcpu ( d, v ) @@ -235,6 +225,8 @@ static int mem_event_disable(struct domain *d, struct mem_event_domain *med) } } + destroy_ring_for_helper(&med->ring_page, + med->ring_pg_struct); mem_event_ring_unlock(med); } @@ -548,7 +540,9 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, if ( p2m->pod.entry_count ) break; - rc = mem_event_enable(d, mec, med, _VPF_mem_paging, mem_paging_notification); + rc = mem_event_enable(d, mec, med, _VPF_mem_paging, + HVM_PARAM_PAGING_RING_PFN, + mem_paging_notification); } break; @@ -584,7 +578,9 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) break; - rc = mem_event_enable(d, mec, med, _VPF_mem_access, mem_access_notification); + rc = mem_event_enable(d, mec, med, _VPF_mem_access, + HVM_PARAM_ACCESS_RING_PFN, + mem_access_notification); } break; diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index 941acc3590..a3ed8d76be 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -748,7 +748,6 @@ struct xen_domctl_mem_event_op { uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */ uint32_t port; /* OUT: event channel for ring */ - uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */ }; typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t); diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h index 6699788eae..55c1b578f7 100644 --- a/xen/include/public/hvm/params.h +++ b/xen/include/public/hvm/params.h @@ -142,6 +142,11 @@ /* Boolean: Enable nestedhvm (hvm only) */ #define HVM_PARAM_NESTEDHVM 24 -#define HVM_NR_PARAMS 27 +/* Params for the mem event rings */ +#define HVM_PARAM_PAGING_RING_PFN 27 +#define HVM_PARAM_ACCESS_RING_PFN 28 +#define HVM_PARAM_SHARING_RING_PFN 29 + +#define HVM_NR_PARAMS 30 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */ diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 8c5931368d..65219cf988 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -192,6 +192,7 @@ struct mem_event_domain unsigned char target_producers; /* shared ring page */ void *ring_page; + struct page_info *ring_pg_struct; /* front-end ring */ mem_event_front_ring_t front_ring; /* event channel port (vcpu0 only) */ |