diff options
author | Keir Fraser <keir.fraser@citrix.com> | 2009-08-10 13:27:54 +0100 |
---|---|---|
committer | Keir Fraser <keir.fraser@citrix.com> | 2009-08-10 13:27:54 +0100 |
commit | 81d70f1a53806178596e48a8f81f870a5f9c74e0 (patch) | |
tree | 52943c0142ba3cb9eafc2362a6d9aaf3a1adb0c1 /xen/include/xen/tmem_xen.h | |
parent | 37541c41651ee9b29ffeda3870cb3ba704d7e756 (diff) | |
download | xen-81d70f1a53806178596e48a8f81f870a5f9c74e0.tar.gz xen-81d70f1a53806178596e48a8f81f870a5f9c74e0.tar.bz2 xen-81d70f1a53806178596e48a8f81f870a5f9c74e0.zip |
tmem: expose freeable memory
Expose tmem "freeable" memory for use by management tools.
Management tools looking for a machine with available
memory often look at free_memory to determine if there
is enough physical memory to house a new or migrating
guest. Since tmem absorbs much or all free memory,
and since "ephemeral" tmem memory can be synchronously
freed, management tools need more data -- not only how
much memory is "free" but also how much memory is
"freeable" by tmem if tmem is told (via an already
existing tmem hypercall) to relinquish freeable memory.
This patch provides that extra piece of data (in MB).
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Diffstat (limited to 'xen/include/xen/tmem_xen.h')
-rw-r--r-- | xen/include/xen/tmem_xen.h | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index 8970327563..787e1467ff 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -35,6 +35,7 @@ extern unsigned int tmh_mempool_maxalloc; extern struct page_list_head tmh_page_list; extern spinlock_t tmh_page_list_lock; extern unsigned long tmh_page_list_pages; +extern atomic_t freeable_page_count; extern spinlock_t tmem_lock; extern spinlock_t tmem_spinlock; @@ -102,7 +103,7 @@ static inline unsigned long tmh_avail_pages(void) } /* - * Ephemeral memory allocation for persistent data + * Memory allocation for persistent data */ static inline bool_t domain_fully_allocated(struct domain *d) @@ -228,6 +229,8 @@ static inline struct page_info *tmh_alloc_page(void *pool, int no_heap) if ( pi == NULL && !no_heap ) pi = alloc_domheap_pages(0,0,MEMF_tmem); ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); + if ( pi != NULL ) + atomic_inc(&freeable_page_count); return pi; } @@ -235,6 +238,7 @@ static inline void tmh_free_page(struct page_info *pi) { ASSERT(IS_VALID_PAGE(pi)); tmh_page_list_put(pi); + atomic_dec(&freeable_page_count); } static inline unsigned int tmem_subpage_maxsize(void) @@ -242,6 +246,26 @@ static inline unsigned int tmem_subpage_maxsize(void) return tmh_mempool_maxalloc; } +static inline unsigned long tmh_freeable_mb(void) +{ + return (tmh_avail_pages() + _atomic_read(freeable_page_count)) >> + (20 - PAGE_SHIFT); +} + +/* + * Memory allocation for "infrastructure" data + */ + +static inline void *tmh_alloc_infra(size_t size, size_t align) +{ + return _xmalloc(size,align); +} + +static inline void tmh_free_infra(void *p) +{ + return xfree(p); +} + #define tmh_lock_all opt_tmem_lock #define tmh_flush_dups opt_tmem_flush_dups #define tmh_called_from_tmem(_memflags) (_memflags & MEMF_tmem) |