diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2010-01-22 10:59:03 +0000 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2010-01-22 10:59:03 +0000 |
commit | d6aaa9ee0f8ba5d2d8ff1187b05ed9becee0b40c (patch) | |
tree | 3b69d194167117b66f08f86321756f58703a44e7 /tools/libxc/xc_private.c | |
parent | 379e63ed3da8f6d874d9bc5d6fa05a85afb60238 (diff) | |
download | xen-d6aaa9ee0f8ba5d2d8ff1187b05ed9becee0b40c.tar.gz xen-d6aaa9ee0f8ba5d2d8ff1187b05ed9becee0b40c.tar.bz2 xen-d6aaa9ee0f8ba5d2d8ff1187b05ed9becee0b40c.zip |
libxc: New hcall_buf_{prep,release} pre-mlock interface
Allow certain performance-critical hypercall wrappers to register data
buffers via a new interface which allows them to be 'bounced' into a
pre-mlock'ed page-sized per-thread data area. This saves the cost of
mlock/munlock on every such hypercall, which can be very expensive on
modern kernels.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'tools/libxc/xc_private.c')
-rw-r--r-- | tools/libxc/xc_private.c | 135 |
1 files changed, 123 insertions, 12 deletions
diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c index 7bde4bbe04..aaa1a39d87 100644 --- a/tools/libxc/xc_private.c +++ b/tools/libxc/xc_private.c @@ -8,6 +8,9 @@ #include "xc_private.h" #include "xg_private.h" #include <stdarg.h> +#include <stdlib.h> +#include <malloc.h> +#include <unistd.h> #include <pthread.h> static pthread_key_t last_error_pkey; @@ -126,28 +129,120 @@ void xc_set_error(int code, const char *fmt, ...) } } +#ifdef __sun__ + +int lock_pages(void *addr, size_t len) { return 0; } +void unlock_pages(void *addr, size_t len) { } + +int hcall_buf_prep(void **addr, size_t len) { return 0; } +void hcall_buf_release(void **addr, size_t len) { } + +#else /* !__sun__ */ + int lock_pages(void *addr, size_t len) { - int e = 0; -#ifndef __sun__ + int e; void *laddr = (void *)((unsigned long)addr & PAGE_MASK); size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) + PAGE_SIZE - 1) & PAGE_MASK; e = mlock(laddr, llen); -#endif return e; } void unlock_pages(void *addr, size_t len) { -#ifndef __sun__ void *laddr = (void *)((unsigned long)addr & PAGE_MASK); size_t llen = (len + ((unsigned long)addr - (unsigned long)laddr) + PAGE_SIZE - 1) & PAGE_MASK; safe_munlock(laddr, llen); -#endif } +static pthread_key_t hcall_buf_pkey; +static pthread_once_t hcall_buf_pkey_once = PTHREAD_ONCE_INIT; +struct hcall_buf { + void *buf; + void *oldbuf; +}; + +static void _xc_clean_hcall_buf(void *m) +{ + struct hcall_buf *hcall_buf = m; + + if ( hcall_buf ) + { + if ( hcall_buf->buf ) + { + unlock_pages(hcall_buf->buf, PAGE_SIZE); + free(hcall_buf->buf); + } + + free(hcall_buf); + } + + pthread_setspecific(hcall_buf_pkey, NULL); +} + +static void _xc_init_hcall_buf(void) +{ + pthread_key_create(&hcall_buf_pkey, _xc_clean_hcall_buf); +} + +int hcall_buf_prep(void **addr, size_t len) +{ + struct hcall_buf *hcall_buf; + + pthread_once(&hcall_buf_pkey_once, _xc_init_hcall_buf); + + hcall_buf = pthread_getspecific(hcall_buf_pkey); + if ( !hcall_buf ) + { + hcall_buf = calloc(1, sizeof(*hcall_buf)); + if ( !hcall_buf ) + goto out; + pthread_setspecific(hcall_buf_pkey, hcall_buf); + } + + if ( !hcall_buf->buf ) + { + hcall_buf->buf = xc_memalign(PAGE_SIZE, PAGE_SIZE); + if ( !hcall_buf->buf || lock_pages(hcall_buf->buf, PAGE_SIZE) ) + { + free(hcall_buf->buf); + hcall_buf->buf = NULL; + goto out; + } + } + + if ( (len < PAGE_SIZE) && !hcall_buf->oldbuf ) + { + memcpy(hcall_buf->buf, *addr, len); + hcall_buf->oldbuf = *addr; + *addr = hcall_buf->buf; + return 0; + } + + out: + return lock_pages(*addr, len); +} + +void hcall_buf_release(void **addr, size_t len) +{ + struct hcall_buf *hcall_buf = pthread_getspecific(hcall_buf_pkey); + + if ( hcall_buf && (hcall_buf->buf == *addr) ) + { + memcpy(hcall_buf->oldbuf, *addr, len); + *addr = hcall_buf->oldbuf; + hcall_buf->oldbuf = NULL; + } + else + { + unlock_pages(*addr, len); + } +} + +#endif + /* NB: arr must be locked */ int xc_get_pfn_type_batch(int xc_handle, uint32_t dom, unsigned int num, xen_pfn_t *arr) @@ -169,21 +264,21 @@ int xc_mmuext_op( DECLARE_HYPERCALL; long ret = -EINVAL; + if ( hcall_buf_prep((void **)&op, nr_ops*sizeof(*op)) != 0 ) + { + PERROR("Could not lock memory for Xen hypercall"); + goto out1; + } + hypercall.op = __HYPERVISOR_mmuext_op; hypercall.arg[0] = (unsigned long)op; hypercall.arg[1] = (unsigned long)nr_ops; hypercall.arg[2] = (unsigned long)0; hypercall.arg[3] = (unsigned long)dom; - if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 ) - { - PERROR("Could not lock memory for Xen hypercall"); - goto out1; - } - ret = do_xen_hypercall(xc_handle, &hypercall); - unlock_pages(op, nr_ops*sizeof(*op)); + hcall_buf_release((void **)&op, nr_ops*sizeof(*op)); out1: return ret; @@ -656,6 +751,22 @@ int xc_ffs64(uint64_t x) return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0; } +void *xc_memalign(size_t alignment, size_t size) +{ +#if defined(_POSIX_C_SOURCE) && !defined(__sun__) + int ret; + void *ptr; + ret = posix_memalign(&ptr, alignment, size); + if (ret != 0) + return NULL; + return ptr; +#elif defined(__NetBSD__) || defined(__OpenBSD__) + return valloc(size); +#else + return memalign(alignment, size); +#endif +} + /* * Local variables: * mode: C |