aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/mm.h
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-06-09 15:25:29 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-06-09 15:25:29 +0000
commit7607ba2da73d75631f7c5c9f6bb5495f78002ea8 (patch)
tree318bf59c4ee0798966c2edef7ea5f487a9b52589 /xen/include/xen/mm.h
parent34446ff07c752abb53e912fc6ea0b6899613c83f (diff)
downloadxen-7607ba2da73d75631f7c5c9f6bb5495f78002ea8.tar.gz
xen-7607ba2da73d75631f7c5c9f6bb5495f78002ea8.tar.bz2
xen-7607ba2da73d75631f7c5c9f6bb5495f78002ea8.zip
bitkeeper revision 1.1699.1.1 (42a85f6955KSFCuD5KSRtCwU-dzakQ)
Clean up the page allocator interface a little. In particular physical addresses are now passed as physaddr_t rather than unsigned long (required for 32-bit pae mode). Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/include/xen/mm.h')
-rw-r--r--xen/include/xen/mm.h21
1 files changed, 11 insertions, 10 deletions
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 4e7f570643..1919b5e9e7 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -3,6 +3,7 @@
#define __XEN_MM_H__
#include <xen/config.h>
+#include <xen/types.h>
#include <xen/list.h>
#include <xen/spinlock.h>
@@ -10,9 +11,9 @@ struct domain;
struct pfn_info;
/* Boot-time allocator. Turns into generic allocator after bootstrap. */
-unsigned long init_boot_allocator(unsigned long bitmap_start);
-void init_boot_pages(unsigned long ps, unsigned long pe);
-unsigned long alloc_boot_pages(unsigned long size, unsigned long align);
+physaddr_t init_boot_allocator(physaddr_t bitmap_start);
+void init_boot_pages(physaddr_t ps, physaddr_t pe);
+unsigned long alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
void end_boot_allocator(void);
/* Generic allocator. These functions are *not* interrupt-safe. */
@@ -24,19 +25,19 @@ void free_heap_pages(
void scrub_heap_pages(void);
/* Xen suballocator. These functions are interrupt-safe. */
-void init_xenheap_pages(unsigned long ps, unsigned long pe);
-unsigned long alloc_xenheap_pages(unsigned int order);
-void free_xenheap_pages(unsigned long p, unsigned int order);
+void init_xenheap_pages(physaddr_t ps, physaddr_t pe);
+void *alloc_xenheap_pages(unsigned int order);
+void free_xenheap_pages(void *v, unsigned int order);
#define alloc_xenheap_page() (alloc_xenheap_pages(0))
-#define free_xenheap_page(_p) (free_xenheap_pages(_p,0))
+#define free_xenheap_page(v) (free_xenheap_pages(v,0))
/* Domain suballocator. These functions are *not* interrupt-safe.*/
-void init_domheap_pages(unsigned long ps, unsigned long pe);
+void init_domheap_pages(physaddr_t ps, physaddr_t pe);
struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order);
void free_domheap_pages(struct pfn_info *pg, unsigned int order);
unsigned long avail_domheap_pages(void);
-#define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0))
-#define free_domheap_page(_p) (free_domheap_pages(_p,0))
+#define alloc_domheap_page(d) (alloc_domheap_pages(d,0))
+#define free_domheap_page(p) (free_domheap_pages(p,0))
/* Automatic page scrubbing for dead domains. */
extern struct list_head page_scrub_list;