diff options
author | Olaf Hering <olaf@aepfle.de> | 2011-06-10 10:47:03 +0200 |
---|---|---|
committer | Olaf Hering <olaf@aepfle.de> | 2011-06-10 10:47:03 +0200 |
commit | aa1355f971287932e2ba09dfb04a6122ecc3951f (patch) | |
tree | b84ec9861d1b3409fed90ebaa7e6b8d857af4a3b /tools/xenpaging | |
parent | 9c1ebbba309d04e15c8bc768843127c2c8b84c5f (diff) | |
download | xen-aa1355f971287932e2ba09dfb04a6122ecc3951f.tar.gz xen-aa1355f971287932e2ba09dfb04a6122ecc3951f.tar.bz2 xen-aa1355f971287932e2ba09dfb04a6122ecc3951f.zip |
tools: merge several bitop functions into xc_bitops.h
Bitmaps are used in save/restore, xenpaging and blktap2. Merge the code into a
private xc_bitops.h file. All users are single threaded, so locking is not an
issue. The array of bits is handled as volatile because the x86 save/restore
code passes the bitmap to the hypervisor which in turn modifies the bitmap.
blktap2 uses a private bitmap. There was a possible overflow in the
bitmap_size() function, the remainder was not considered.
ia64 save/restore uses a bitmap to send the number of vcpus to the host.
x86 save/restore uses a bitmap to track dirty pages. This bitmap is shared with
the hypervisor. An unused function count_bits() was removed and a new
bitmap_size() function is now used.
xenpaging uses 3 private bitmaps to track the gfns which are in paged-out
state. It had a copy of some Linux bitops.h, which is now obsolete. Also the
BITS_PER_LONG macro was hardcoded to 64 which made it impossible to run 32bit
tools on a 64bit host. Wether this works at all has to be tested, yet.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
Committed-by: Ian Jackson <ian.jackson.citrix.com>
Diffstat (limited to 'tools/xenpaging')
-rw-r--r-- | tools/xenpaging/bitops.h | 448 | ||||
-rw-r--r-- | tools/xenpaging/policy_default.c | 20 | ||||
-rw-r--r-- | tools/xenpaging/xc.c | 14 | ||||
-rw-r--r-- | tools/xenpaging/xc.h | 2 | ||||
-rw-r--r-- | tools/xenpaging/xenpaging.c | 9 | ||||
-rw-r--r-- | tools/xenpaging/xenpaging.h | 1 |
6 files changed, 10 insertions, 484 deletions
diff --git a/tools/xenpaging/bitops.h b/tools/xenpaging/bitops.h deleted file mode 100644 index aa5b8a7178..0000000000 --- a/tools/xenpaging/bitops.h +++ /dev/null @@ -1,448 +0,0 @@ -#ifndef _X86_BITOPS_H -#define _X86_BITOPS_H - -/* - * Copyright 1992, Linus Torvalds. - */ - -//#include <xen/config.h> - -#ifdef CONFIG_SMP -#define LOCK_PREFIX "lock ; " -#else -#define LOCK_PREFIX "" -#endif - -/* - * We specify the memory operand as both input and output because the memory - * operand is both read from and written to. Since the operand is in fact a - * word array, we also specify "memory" in the clobbers list to indicate that - * words other than the one directly addressed by the memory operand may be - * modified. We don't use "+m" because the gcc manual says that it should be - * used only when the constraint allows the operand to reside in a register. - */ - -#define ADDR (*(volatile long *) addr) -#define CONST_ADDR (*(const volatile long *) addr) - -extern void __bitop_bad_size(void); -#define bitop_bad_size(addr) (sizeof(*(addr)) < 4) - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile void *addr) -{ - asm volatile ( - LOCK_PREFIX - "btsl %1,%0" - : "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); -} -#define set_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - set_bit(nr, addr); \ -}) - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(int nr, volatile void *addr) -{ - asm volatile ( - "btsl %1,%0" - : "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); -} -#define __set_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __set_bit(nr, addr); \ -}) - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile void *addr) -{ - asm volatile ( - LOCK_PREFIX - "btrl %1,%0" - : "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); -} -#define clear_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - clear_bit(nr, addr); \ -}) - -/** - * __clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * Unlike clear_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __clear_bit(int nr, volatile void *addr) -{ - asm volatile ( - "btrl %1,%0" - : "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); -} -#define __clear_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __clear_bit(nr, addr); \ -}) - -#define smp_mb__before_clear_bit() ((void)0) -#define smp_mb__after_clear_bit() ((void)0) - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(int nr, volatile void *addr) -{ - asm volatile ( - "btcl %1,%0" - : "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); -} -#define __change_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __change_bit(nr, addr); \ -}) - -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile void *addr) -{ - asm volatile ( - LOCK_PREFIX - "btcl %1,%0" - : "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); -} -#define change_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - change_bit(nr, addr); \ -}) - -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( - LOCK_PREFIX - "btsl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); - return oldbit; -} -#define test_and_set_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - test_and_set_bit(nr, addr); \ -}) - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( - "btsl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); - return oldbit; -} -#define __test_and_set_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __test_and_set_bit(nr, addr); \ -}) - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( - LOCK_PREFIX - "btrl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); - return oldbit; -} -#define test_and_clear_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - test_and_clear_bit(nr, addr); \ -}) - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( - "btrl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); - return oldbit; -} -#define __test_and_clear_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __test_and_clear_bit(nr, addr); \ -}) - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( - "btcl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); - return oldbit; -} -#define __test_and_change_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - __test_and_change_bit(nr, addr); \ -}) - -/** - * test_and_change_bit - Change a bit and return its new value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_change_bit(int nr, volatile void *addr) -{ - int oldbit; - - asm volatile ( - LOCK_PREFIX - "btcl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit), "=m" (ADDR) - : "Ir" (nr), "m" (ADDR) : "memory"); - return oldbit; -} -#define test_and_change_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - test_and_change_bit(nr, addr); \ -}) - -static inline int constant_test_bit(int nr, const volatile void *addr) -{ - return ((1U << (nr & 31)) & - (((const volatile unsigned int *)addr)[nr >> 5])) != 0; -} - -static inline int variable_test_bit(int nr, const volatile void *addr) -{ - int oldbit; - - asm volatile ( - "btl %2,%1\n\tsbbl %0,%0" - : "=r" (oldbit) - : "m" (CONST_ADDR), "Ir" (nr) : "memory" ); - return oldbit; -} - -#define test_bit(nr, addr) ({ \ - if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ - (__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))); \ -}) - -extern unsigned int __find_first_bit( - const unsigned long *addr, unsigned int size); -extern unsigned int __find_next_bit( - const unsigned long *addr, unsigned int size, unsigned int offset); -extern unsigned int __find_first_zero_bit( - const unsigned long *addr, unsigned int size); -extern unsigned int __find_next_zero_bit( - const unsigned long *addr, unsigned int size, unsigned int offset); - -static inline unsigned int __scanbit(unsigned long val, unsigned long max) -{ - asm ( "bsf %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max) ); - return (unsigned int)val; -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -#define find_first_bit(addr,size) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - (__scanbit(*(const unsigned long *)addr, size)) : \ - __find_first_bit(addr,size))) - -/** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -#define find_next_bit(addr,size,off) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - ((off) + (__scanbit((*(const unsigned long *)addr) >> (off), size))) : \ - __find_next_bit(addr,size,off))) - -/** - * find_first_zero_bit - find the first zero bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first zero bit, not the number of the byte - * containing a bit. - */ -#define find_first_zero_bit(addr,size) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - (__scanbit(~*(const unsigned long *)addr, size)) : \ - __find_first_zero_bit(addr,size))) - -/** - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -#define find_next_zero_bit(addr,size,off) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - ((off)+(__scanbit(~(((*(const unsigned long *)addr)) >> (off)), size))) : \ - __find_next_zero_bit(addr,size,off))) - - -/** - * find_first_set_bit - find the first set bit in @word - * @word: the word to search - * - * Returns the bit-number of the first set bit. The input must *not* be zero. - */ -static inline unsigned int find_first_set_bit(unsigned long word) -{ - asm ( "bsf %1,%0" : "=r" (word) : "r" (word) ); - return (unsigned int)word; -} - -/** - * ffs - find first bit set - * @x: the word to search - * - * This is defined the same way as the libc and compiler builtin ffs routines. - */ -#if 0 -static inline int ffs(unsigned long x) -{ - long r; - - asm ( "bsf %1,%0\n\t" - "jnz 1f\n\t" - "mov $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return (int)r+1; -} -#endif - -/** - * fls - find last bit set - * @x: the word to search - * - * This is defined the same way as ffs. - */ -static inline int fls(unsigned long x) -{ - long r; - - asm ( "bsr %1,%0\n\t" - "jnz 1f\n\t" - "mov $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return (int)r+1; -} - -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) - -#endif /* _X86_BITOPS_H */ diff --git a/tools/xenpaging/policy_default.c b/tools/xenpaging/policy_default.c index a53f5560f6..55a0ae8470 100644 --- a/tools/xenpaging/policy_default.c +++ b/tools/xenpaging/policy_default.c @@ -21,8 +21,7 @@ */ -#include "bitops.h" -#include "xc.h" +#include "xc_bitops.h" #include "policy.h" @@ -35,26 +34,23 @@ static unsigned int mru_size; static unsigned long *bitmap; static unsigned long *unconsumed; static unsigned long current_gfn; -static unsigned long bitmap_size; static unsigned long max_pages; int policy_init(xenpaging_t *paging) { int i; - int rc; + int rc = -ENOMEM; /* Allocate bitmap for pages not to page out */ - rc = alloc_bitmap(&bitmap, paging->bitmap_size); - if ( rc != 0 ) + bitmap = bitmap_alloc(paging->domain_info->max_pages); + if ( !bitmap ) goto out; /* Allocate bitmap to track unusable pages */ - rc = alloc_bitmap(&unconsumed, paging->bitmap_size); - if ( rc != 0 ) + unconsumed = bitmap_alloc(paging->domain_info->max_pages); + if ( !unconsumed ) goto out; - /* record bitmap_size */ - bitmap_size = paging->bitmap_size; max_pages = paging->domain_info->max_pages; /* Initialise MRU list of paged in pages */ @@ -65,10 +61,7 @@ int policy_init(xenpaging_t *paging) mru = malloc(sizeof(*mru) * mru_size); if ( mru == NULL ) - { - rc = -ENOMEM; goto out; - } for ( i = 0; i < mru_size; i++ ) mru[i] = INVALID_MFN; @@ -76,6 +69,7 @@ int policy_init(xenpaging_t *paging) /* Don't page out page 0 */ set_bit(0, bitmap); + rc = 0; out: return rc; } diff --git a/tools/xenpaging/xc.c b/tools/xenpaging/xc.c index d1dcb6847f..f72e4e6e82 100644 --- a/tools/xenpaging/xc.c +++ b/tools/xenpaging/xc.c @@ -31,20 +31,6 @@ #include "xc.h" -int alloc_bitmap(unsigned long **bitmap, unsigned long bitmap_size) -{ - if ( *bitmap == NULL ) - { - *bitmap = calloc(bitmap_size / BITS_PER_LONG, sizeof(unsigned long)); - - if ( *bitmap == NULL ) - return -ENOMEM; - } - - memset(*bitmap, 0, bitmap_size / 8); - - return 0; -} int xc_mem_paging_flush_ioemu_cache(domid_t domain_id) { diff --git a/tools/xenpaging/xc.h b/tools/xenpaging/xc.h index 41cf310c47..27181cd309 100644 --- a/tools/xenpaging/xc.h +++ b/tools/xenpaging/xc.h @@ -39,7 +39,6 @@ #endif -#define BITS_PER_LONG 64 typedef struct xc_platform_info { @@ -50,7 +49,6 @@ typedef struct xc_platform_info { } xc_platform_info_t; -int alloc_bitmap(unsigned long **bitmap, unsigned long bitmap_size); int xc_mem_paging_flush_ioemu_cache(domid_t domain_id); int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce); diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c index ec10b36923..d83448f8f5 100644 --- a/tools/xenpaging/xenpaging.c +++ b/tools/xenpaging/xenpaging.c @@ -31,7 +31,7 @@ #include <xen/mem_event.h> -#include "bitops.h" +#include "xc_bitops.h" #include "file_ops.h" #include "xc.h" @@ -200,11 +200,8 @@ static xenpaging_t *xenpaging_init(domid_t domain_id) } /* Allocate bitmap for tracking pages that have been paged out */ - paging->bitmap_size = (paging->domain_info->max_pages + BITS_PER_LONG) & - ~(BITS_PER_LONG - 1); - - rc = alloc_bitmap(&paging->bitmap, paging->bitmap_size); - if ( rc != 0 ) + paging->bitmap = bitmap_alloc(paging->domain_info->max_pages); + if ( !paging->bitmap ) { ERROR("Error allocating bitmap"); goto err; diff --git a/tools/xenpaging/xenpaging.h b/tools/xenpaging/xenpaging.h index d521b9ece1..855966772a 100644 --- a/tools/xenpaging/xenpaging.h +++ b/tools/xenpaging/xenpaging.h @@ -40,7 +40,6 @@ typedef struct xenpaging { xc_platform_info_t *platform_info; xc_domaininfo_t *domain_info; - unsigned long bitmap_size; unsigned long *bitmap; mem_event_t mem_event; |