diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2010-10-18 17:35:59 +0100 |
---|---|---|
committer | Ian Campbell <ian.campbell@citrix.com> | 2010-10-18 17:35:59 +0100 |
commit | 35e135f116d77823e960671db82ca667f125d617 (patch) | |
tree | 991c3aa7f22952398f8f9799e9432e888e2ca9ec /tools/libxc/xc_private.c | |
parent | 932b3645d2c3d0d105c2dc0a3a8191bad606c48f (diff) | |
download | xen-35e135f116d77823e960671db82ca667f125d617.tar.gz xen-35e135f116d77823e960671db82ca667f125d617.tar.bz2 xen-35e135f116d77823e960671db82ca667f125d617.zip |
libxc: make xc_memory_op library private
Now that all XENMEM_* callers go via an op specific function make
xc_memory_op private to libxc (and rename to do_memory_op for
consistency with other private functions).
Also change the interface to take a size parameter so that
do_memory_op knows how much memory to lock for the top-level argument,
removing some of the introspection.
[ Compatibility note: this breaks qemu-xen before 706bc8c588cb ]
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Diffstat (limited to 'tools/libxc/xc_private.c')
-rw-r--r-- | tools/libxc/xc_private.c | 51 |
1 files changed, 12 insertions, 39 deletions
diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c index 0cbfc867f6..8ce66c5440 100644 --- a/tools/libxc/xc_private.c +++ b/tools/libxc/xc_private.c @@ -421,9 +421,7 @@ int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu) return flush_mmu_updates(xch, mmu); } -int xc_memory_op(xc_interface *xch, - int cmd, - void *arg) +int do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len) { DECLARE_HYPERCALL; struct xen_memory_reservation *reservation = arg; @@ -435,16 +433,17 @@ int xc_memory_op(xc_interface *xch, hypercall.arg[0] = (unsigned long)cmd; hypercall.arg[1] = (unsigned long)arg; + if ( len && lock_pages(xch, arg, len) != 0 ) + { + PERROR("Could not lock memory for XENMEM hypercall"); + goto out1; + } + switch ( cmd ) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: - if ( lock_pages(xch, reservation, sizeof(*reservation)) != 0 ) - { - PERROR("Could not lock"); - goto out1; - } get_xen_guest_handle(extent_start, reservation->extent_start); if ( (extent_start != NULL) && (lock_pages(xch, extent_start, @@ -456,11 +455,6 @@ int xc_memory_op(xc_interface *xch, } break; case XENMEM_machphys_mfn_list: - if ( lock_pages(xch, xmml, sizeof(*xmml)) != 0 ) - { - PERROR("Could not lock"); - goto out1; - } get_xen_guest_handle(extent_start, xmml->extent_start); if ( lock_pages(xch, extent_start, xmml->max_extents * sizeof(xen_pfn_t)) != 0 ) @@ -471,61 +465,40 @@ int xc_memory_op(xc_interface *xch, } break; case XENMEM_add_to_physmap: - if ( lock_pages(xch, arg, sizeof(struct xen_add_to_physmap)) ) - { - PERROR("Could not lock"); - goto out1; - } - break; case XENMEM_current_reservation: case XENMEM_maximum_reservation: case XENMEM_maximum_gpfn: - if ( lock_pages(xch, arg, sizeof(domid_t)) ) - { - PERROR("Could not lock"); - goto out1; - } - break; case XENMEM_set_pod_target: case XENMEM_get_pod_target: - if ( lock_pages(xch, arg, sizeof(struct xen_pod_target)) ) - { - PERROR("Could not lock"); - goto out1; - } break; } ret = do_xen_hypercall(xch, &hypercall); + if ( len ) + unlock_pages(xch, arg, len); + switch ( cmd ) { case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: - unlock_pages(xch, reservation, sizeof(*reservation)); get_xen_guest_handle(extent_start, reservation->extent_start); if ( extent_start != NULL ) unlock_pages(xch, extent_start, reservation->nr_extents * sizeof(xen_pfn_t)); break; case XENMEM_machphys_mfn_list: - unlock_pages(xch, xmml, sizeof(*xmml)); get_xen_guest_handle(extent_start, xmml->extent_start); unlock_pages(xch, extent_start, xmml->max_extents * sizeof(xen_pfn_t)); break; case XENMEM_add_to_physmap: - unlock_pages(xch, arg, sizeof(struct xen_add_to_physmap)); - break; case XENMEM_current_reservation: case XENMEM_maximum_reservation: case XENMEM_maximum_gpfn: - unlock_pages(xch, arg, sizeof(domid_t)); - break; case XENMEM_set_pod_target: case XENMEM_get_pod_target: - unlock_pages(xch, arg, sizeof(struct xen_pod_target)); break; } @@ -535,7 +508,7 @@ int xc_memory_op(xc_interface *xch, long xc_maximum_ram_page(xc_interface *xch) { - return xc_memory_op(xch, XENMEM_maximum_ram_page, NULL); + return do_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0); } long long xc_domain_get_cpu_usage( xc_interface *xch, domid_t domid, int vcpu ) @@ -562,7 +535,7 @@ int xc_machphys_mfn_list(xc_interface *xch, .max_extents = max_extents, }; set_xen_guest_handle(xmml.extent_start, extent_start); - rc = xc_memory_op(xch, XENMEM_machphys_mfn_list, &xmml); + rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml)); if (rc || xmml.nr_extents != max_extents) return -1; return 0; |