diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2010-10-18 16:54:42 +0100 |
---|---|---|
committer | Ian Campbell <ian.campbell@citrix.com> | 2010-10-18 16:54:42 +0100 |
commit | 2674a296982598b97ffdd90fa407e4cdd1df3e13 (patch) | |
tree | ed8d08336c8bc9ea7a82ba3975d5074e1766b185 /tools/libxc/xc_domain.c | |
parent | f5c808b428244f42b2310ddf173c624d9a2a842c (diff) | |
download | xen-2674a296982598b97ffdd90fa407e4cdd1df3e13.tar.gz xen-2674a296982598b97ffdd90fa407e4cdd1df3e13.tar.bz2 xen-2674a296982598b97ffdd90fa407e4cdd1df3e13.zip |
libxc: wrappers for XENMEM {increase,decrease}_reservation and populate_physmap
Currently the wrappers for these hypercalls swallow partial success
and return failure to the caller.
In order to use these functions more widely instead of open-coding
uses of XENMEM_* and xc_memory_op add variants which return the actual
hypercall result.
Therefore add the following functions:
xc_domain_increase_reservation
xc_domain_decrease_reservation
xc_domain_populate_physmap
and implement the existing semantics using these new functions as
xc_domain_increase_reservation_exact
xc_domain_decrease_reservation_exact
xc_domain_populate_physmap_exact
replacing the existing xc_domain_memory_* functions.
Use these new functions to replace all open coded uses of
XENMEM_increase_reservation, XENMEM_decrease_reservation and
XENMEM_populate_physmap.
Also rename xc_domain_memory_*_pod_target to xc_domain_*_pod_target
for consistency.
Temporarily add a compatibility macro for
xc_domain_memory_populate_physmap to allow time for qemu to catch up.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Diffstat (limited to 'tools/libxc/xc_domain.c')
-rw-r--r-- | tools/libxc/xc_domain.c | 150 |
1 files changed, 98 insertions, 52 deletions
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index cca8165a63..18a346d960 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -579,12 +579,12 @@ int xc_domain_get_tsc_info(xc_interface *xch, } -int xc_domain_memory_increase_reservation(xc_interface *xch, - uint32_t domid, - unsigned long nr_extents, - unsigned int extent_order, - unsigned int mem_flags, - xen_pfn_t *extent_start) +int xc_domain_increase_reservation(xc_interface *xch, + uint32_t domid, + unsigned long nr_extents, + unsigned int extent_order, + unsigned int mem_flags, + xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { @@ -598,6 +598,22 @@ int xc_domain_memory_increase_reservation(xc_interface *xch, set_xen_guest_handle(reservation.extent_start, extent_start); err = xc_memory_op(xch, XENMEM_increase_reservation, &reservation); + + return err; +} + +int xc_domain_increase_reservation_exact(xc_interface *xch, + uint32_t domid, + unsigned long nr_extents, + unsigned int extent_order, + unsigned int mem_flags, + xen_pfn_t *extent_start) +{ + int err; + + err = xc_domain_increase_reservation(xch, domid, nr_extents, + extent_order, mem_flags, extent_start); + if ( err == nr_extents ) return 0; @@ -613,11 +629,11 @@ int xc_domain_memory_increase_reservation(xc_interface *xch, return err; } -int xc_domain_memory_decrease_reservation(xc_interface *xch, - uint32_t domid, - unsigned long nr_extents, - unsigned int extent_order, - xen_pfn_t *extent_start) +int xc_domain_decrease_reservation(xc_interface *xch, + uint32_t domid, + unsigned long nr_extents, + unsigned int extent_order, + xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { @@ -637,6 +653,21 @@ int xc_domain_memory_decrease_reservation(xc_interface *xch, } err = xc_memory_op(xch, XENMEM_decrease_reservation, &reservation); + + return err; +} + +int xc_domain_decrease_reservation_exact(xc_interface *xch, + uint32_t domid, + unsigned long nr_extents, + unsigned int extent_order, + xen_pfn_t *extent_start) +{ + int err; + + err = xc_domain_decrease_reservation(xch, domid, nr_extents, + extent_order, extent_start); + if ( err == nr_extents ) return 0; @@ -651,12 +682,12 @@ int xc_domain_memory_decrease_reservation(xc_interface *xch, return err; } -int xc_domain_memory_populate_physmap(xc_interface *xch, - uint32_t domid, - unsigned long nr_extents, - unsigned int extent_order, - unsigned int mem_flags, - xen_pfn_t *extent_start) +int xc_domain_populate_physmap(xc_interface *xch, + uint32_t domid, + unsigned long nr_extents, + unsigned int extent_order, + unsigned int mem_flags, + xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { @@ -668,6 +699,21 @@ int xc_domain_memory_populate_physmap(xc_interface *xch, set_xen_guest_handle(reservation.extent_start, extent_start); err = xc_memory_op(xch, XENMEM_populate_physmap, &reservation); + + return err; +} + +int xc_domain_populate_physmap_exact(xc_interface *xch, + uint32_t domid, + unsigned long nr_extents, + unsigned int extent_order, + unsigned int mem_flags, + xen_pfn_t *extent_start) +{ + int err; + + err = xc_domain_populate_physmap(xch, domid, nr_extents, + extent_order, mem_flags, extent_start); if ( err == nr_extents ) return 0; @@ -682,13 +728,13 @@ int xc_domain_memory_populate_physmap(xc_interface *xch, return err; } -static int xc_domain_memory_pod_target(xc_interface *xch, - int op, - uint32_t domid, - uint64_t target_pages, - uint64_t *tot_pages, - uint64_t *pod_cache_pages, - uint64_t *pod_entries) +static int xc_domain_pod_target(xc_interface *xch, + int op, + uint32_t domid, + uint64_t target_pages, + uint64_t *tot_pages, + uint64_t *pod_cache_pages, + uint64_t *pod_entries) { int err; @@ -701,7 +747,7 @@ static int xc_domain_memory_pod_target(xc_interface *xch, if ( err < 0 ) { - DPRINTF("Failed %s_memory_target dom %d\n", + DPRINTF("Failed %s_pod_target dom %d\n", (op==XENMEM_set_pod_target)?"set":"get", domid); errno = -err; @@ -719,37 +765,37 @@ static int xc_domain_memory_pod_target(xc_interface *xch, return err; } - - -int xc_domain_memory_set_pod_target(xc_interface *xch, - uint32_t domid, - uint64_t target_pages, - uint64_t *tot_pages, - uint64_t *pod_cache_pages, - uint64_t *pod_entries) + + +int xc_domain_set_pod_target(xc_interface *xch, + uint32_t domid, + uint64_t target_pages, + uint64_t *tot_pages, + uint64_t *pod_cache_pages, + uint64_t *pod_entries) { - return xc_domain_memory_pod_target(xch, - XENMEM_set_pod_target, - domid, - target_pages, - tot_pages, - pod_cache_pages, - pod_entries); + return xc_domain_pod_target(xch, + XENMEM_set_pod_target, + domid, + target_pages, + tot_pages, + pod_cache_pages, + pod_entries); } -int xc_domain_memory_get_pod_target(xc_interface *xch, - uint32_t domid, - uint64_t *tot_pages, - uint64_t *pod_cache_pages, - uint64_t *pod_entries) +int xc_domain_get_pod_target(xc_interface *xch, + uint32_t domid, + uint64_t *tot_pages, + uint64_t *pod_cache_pages, + uint64_t *pod_entries) { - return xc_domain_memory_pod_target(xch, - XENMEM_get_pod_target, - domid, - -1, - tot_pages, - pod_cache_pages, - pod_entries); + return xc_domain_pod_target(xch, + XENMEM_get_pod_target, + domid, + -1, + tot_pages, + pod_cache_pages, + pod_entries); } int xc_domain_max_vcpus(xc_interface *xch, uint32_t domid, unsigned int max) |