aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/acpi.c4
-rw-r--r--xen/arch/x86/apic.c2
-rw-r--r--xen/arch/x86/domain.c4
-rw-r--r--xen/arch/x86/irq.c4
-rw-r--r--xen/arch/x86/mpparse.c6
-rw-r--r--xen/arch/x86/pci-pc.c8
-rw-r--r--xen/arch/x86/pdb-stub.c4
-rw-r--r--xen/arch/x86/shadow.c12
-rw-r--r--xen/arch/x86/smpboot.c4
-rw-r--r--xen/arch/x86/x86_32/mm.c6
-rw-r--r--xen/common/ac_timer.c6
-rw-r--r--xen/common/dom0_ops.c4
-rw-r--r--xen/common/domain.c6
-rw-r--r--xen/common/event_channel.c8
-rw-r--r--xen/common/kernel.c8
-rw-r--r--xen/common/page_alloc.c6
-rw-r--r--xen/common/physdev.c6
-rw-r--r--xen/common/resource.c8
-rw-r--r--xen/common/sched_atropos.c10
-rw-r--r--xen/common/sched_bvt.c12
-rw-r--r--xen/common/sched_fair_bvt.c12
-rw-r--r--xen/common/sched_rrobin.c10
-rw-r--r--xen/common/schedule.c8
-rw-r--r--xen/common/slab.c298
-rw-r--r--xen/common/trace.c2
-rw-r--r--xen/drivers/char/console.c4
-rw-r--r--xen/drivers/pci/pci.c10
-rw-r--r--xen/drivers/pci/setup-bus.c2
-rw-r--r--xen/drivers/pci/setup-res.c4
-rw-r--r--xen/include/asm-x86/domain.h2
-rw-r--r--xen/include/asm-x86/io.h2
-rw-r--r--xen/include/asm-x86/shadow.h2
-rw-r--r--xen/include/asm-x86/types.h2
-rw-r--r--xen/include/xen/mm.h10
-rw-r--r--xen/include/xen/pci.h2
-rw-r--r--xen/include/xen/slab.h30
36 files changed, 263 insertions, 265 deletions
diff --git a/xen/arch/x86/acpi.c b/xen/arch/x86/acpi.c
index 16e79d00c4..8fda8e371b 100644
--- a/xen/arch/x86/acpi.c
+++ b/xen/arch/x86/acpi.c
@@ -578,7 +578,7 @@ static void acpi_create_identity_pmd (void)
pgd_t *pgd;
int i;
- ptep = (pte_t*)__get_free_page();
+ ptep = (pte_t*)alloc_xenheap_page();
/* fill page with low mapping */
for (i = 0; i < PTRS_PER_PTE; i++)
@@ -607,7 +607,7 @@ static void acpi_restore_pmd (void)
{
set_pmd(pmd, saved_pmd);
local_flush_tlb();
- free_page((unsigned long)ptep);
+ free_xenheap_page((unsigned long)ptep);
}
/**
diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c
index 64974f670d..feb4c53547 100644
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -445,7 +445,7 @@ void __init init_apic_mappings(void)
* simulate the local APIC and another one for the IO-APIC.
*/
if (!smp_found_config && detect_init_APIC()) {
- apic_phys = get_free_page();
+ apic_phys = alloc_xenheap_page();
apic_phys = __pa(apic_phys);
} else
apic_phys = mp_lapic_addr;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index e58c74d164..eb0590f9d2 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -212,13 +212,13 @@ void machine_halt(void)
void arch_do_createdomain(struct domain *d)
{
- d->shared_info = (void *)get_free_page();
+ d->shared_info = (void *)alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
PAGE_SHIFT] = 0x80000000UL; /* debug */
- d->mm.perdomain_pt = (l1_pgentry_t *)get_free_page();
+ d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
memset(d->mm.perdomain_pt, 0, PAGE_SIZE);
machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >>
PAGE_SHIFT] = 0x0fffdeadUL; /* debug */
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index af0b0de1fb..7835061a3a 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -258,7 +258,7 @@ int pirq_guest_bind(struct domain *p, int irq, int will_share)
goto out;
}
- action = kmalloc(sizeof(irq_guest_action_t));
+ action = xmalloc(sizeof(irq_guest_action_t));
if ( (desc->action = (struct irqaction *)action) == NULL )
{
DPRINTK("Cannot bind IRQ %d to guest. Out of memory.\n", irq);
@@ -320,7 +320,7 @@ int pirq_guest_unbind(struct domain *p, int irq)
if ( action->nr_guests == 1 )
{
desc->action = NULL;
- kfree(action);
+ xfree(action);
desc->depth = 1;
desc->status |= IRQ_DISABLED;
desc->status &= ~IRQ_GUEST;
diff --git a/xen/arch/x86/mpparse.c b/xen/arch/x86/mpparse.c
index d10e4ce9dd..c371424e8e 100644
--- a/xen/arch/x86/mpparse.c
+++ b/xen/arch/x86/mpparse.c
@@ -509,7 +509,7 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
count = (max_mp_busses * sizeof(int)) * 4;
count += (max_irq_sources * sizeof(struct mpc_config_intsrc));
- bus_data = (void *)__get_free_pages(get_order(count));
+ bus_data = (void *)alloc_xenheap_pages(get_order(count));
if (!bus_data) {
printk(KERN_ERR "SMP mptable: out of memory!\n");
return 0;
@@ -694,7 +694,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
} *bus_data;
- bus_data = (void *)__get_free_pages(get_order(sizeof(*bus_data)));
+ bus_data = (void *)alloc_xenheap_pages(get_order(sizeof(*bus_data)));
if (!bus_data)
panic("SMP mptable: out of memory!\n");
mp_bus_id_to_type = bus_data->mp_bus_id_to_type;
@@ -1171,7 +1171,7 @@ void __init mp_config_acpi_legacy_irqs (void)
count = (MAX_MP_BUSSES * sizeof(int)) * 4;
count += (MAX_IRQ_SOURCES * sizeof(int)) * 4;
- bus_data = (void *)__get_free_pages(get_order(count));
+ bus_data = (void *)alloc_xenheap_pages(get_order(count));
if (!bus_data) {
panic("Fatal: can't allocate bus memory for ACPI legacy IRQ!");
}
diff --git a/xen/arch/x86/pci-pc.c b/xen/arch/x86/pci-pc.c
index 575c3312f2..261606bf91 100644
--- a/xen/arch/x86/pci-pc.c
+++ b/xen/arch/x86/pci-pc.c
@@ -1003,7 +1003,7 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
if (!pci_bios_present)
return NULL;
- page = __get_free_page();
+ page = alloc_xenheap_page();
if (!page)
return NULL;
opt.table = (struct irq_info *) page;
@@ -1030,7 +1030,7 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
if (ret & 0xff00)
printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff);
else if (opt.size) {
- rt = kmalloc(sizeof(struct irq_routing_table) + opt.size);
+ rt = xmalloc(sizeof(struct irq_routing_table) + opt.size);
if (rt) {
memset(rt, 0, sizeof(struct irq_routing_table));
rt->size = opt.size + sizeof(struct irq_routing_table);
@@ -1039,7 +1039,7 @@ struct irq_routing_table * __devinit pcibios_get_irq_routing_table(void)
printk(KERN_INFO "PCI: Using BIOS Interrupt Routing Table\n");
}
}
- free_page(page);
+ free_xenheap_page(page);
return rt;
}
@@ -1109,7 +1109,7 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
if (d->devfn >= mirror) {
list_del(&d->global_list);
list_del(&d->bus_list);
- kfree(d);
+ xfree(d);
} else
ln = ln->next;
}
diff --git a/xen/arch/x86/pdb-stub.c b/xen/arch/x86/pdb-stub.c
index 51cb898dc5..17fdc03d0f 100644
--- a/xen/arch/x86/pdb-stub.c
+++ b/xen/arch/x86/pdb-stub.c
@@ -836,7 +836,7 @@ struct pdb_breakpoint breakpoints;
void pdb_bkpt_add (unsigned long cr3, unsigned long address)
{
- struct pdb_breakpoint *bkpt = kmalloc(sizeof(*bkpt));
+ struct pdb_breakpoint *bkpt = xmalloc(sizeof(*bkpt));
bkpt->cr3 = cr3;
bkpt->address = address;
list_add(&bkpt->list, &breakpoints.list);
@@ -877,7 +877,7 @@ int pdb_bkpt_remove (unsigned long cr3, unsigned long address)
if ( bkpt->cr3 == cr3 && bkpt->address == address )
{
list_del(&bkpt->list);
- kfree(bkpt);
+ xfree(bkpt);
return 0;
}
}
diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
index a611f93c42..e8da050918 100644
--- a/xen/arch/x86/shadow.c
+++ b/xen/arch/x86/shadow.c
@@ -244,7 +244,7 @@ int shadow_mode_enable( struct domain *p, unsigned int mode )
m->shadow_mode = mode;
// allocate hashtable
- m->shadow_ht = kmalloc(shadow_ht_buckets *
+ m->shadow_ht = xmalloc(shadow_ht_buckets *
sizeof(struct shadow_status));
if( m->shadow_ht == NULL )
goto nomem;
@@ -252,7 +252,7 @@ int shadow_mode_enable( struct domain *p, unsigned int mode )
memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status));
// allocate space for first lot of extra nodes
- m->shadow_ht_extras = kmalloc(sizeof(void*) +
+ m->shadow_ht_extras = xmalloc(sizeof(void*) +
(shadow_ht_extra_size *
sizeof(struct shadow_status)));
if( m->shadow_ht_extras == NULL )
@@ -278,7 +278,7 @@ int shadow_mode_enable( struct domain *p, unsigned int mode )
{
m->shadow_dirty_bitmap_size = (p->max_pages+63)&(~63);
m->shadow_dirty_bitmap =
- kmalloc( m->shadow_dirty_bitmap_size/8);
+ xmalloc( m->shadow_dirty_bitmap_size/8);
if( m->shadow_dirty_bitmap == NULL )
{
m->shadow_dirty_bitmap_size = 0;
@@ -313,20 +313,20 @@ void __shadow_mode_disable(struct domain *d)
struct shadow_status * this = next;
m->shadow_extras_count--;
next = *((struct shadow_status **)(&next[shadow_ht_extra_size]));
- kfree(this);
+ xfree(this);
}
SH_LOG("freed extras, now %d", m->shadow_extras_count);
if ( m->shadow_dirty_bitmap )
{
- kfree( m->shadow_dirty_bitmap );
+ xfree( m->shadow_dirty_bitmap );
m->shadow_dirty_bitmap = 0;
m->shadow_dirty_bitmap_size = 0;
}
// free the hashtable itself
- kfree( &m->shadow_ht[0] );
+ xfree( &m->shadow_ht[0] );
}
static int shadow_mode_table_op(struct domain *d,
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index ef7a39df89..bd9c0951ec 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -406,7 +406,7 @@ void __init start_secondary(void)
* At this point, boot CPU has fully initialised the IDT. It is
* now safe to make ourselves a private copy.
*/
- idt_tables[cpu] = kmalloc(IDT_ENTRIES*8);
+ idt_tables[cpu] = xmalloc(IDT_ENTRIES*8);
memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*8);
*(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*8)-1;
*(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
@@ -671,7 +671,7 @@ static void __init do_boot_cpu (int apicid)
/* So we see what's up. */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
- stack = __pa(__get_free_pages(1));
+ stack = __pa(alloc_xenheap_pages(1));
stack_start.esp = stack + STACK_SIZE - STACK_RESERVED;
/* Debug build: detect stack overflow by setting up a guard page. */
diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c
index a77ad89e2b..61a8554e05 100644
--- a/xen/arch/x86/x86_32/mm.c
+++ b/xen/arch/x86/x86_32/mm.c
@@ -70,7 +70,7 @@ static void __init fixrange_init(unsigned long start,
{
if ( l2_pgentry_val(*l2e) != 0 )
continue;
- page = (unsigned long)get_free_page();
+ page = (unsigned long)alloc_xenheap_page();
clear_page(page);
*l2e = mk_l2_pgentry(__pa(page) | __PAGE_HYPERVISOR);
vaddr += 1 << L2_PAGETABLE_SHIFT;
@@ -97,7 +97,7 @@ void __init paging_init(void)
fixrange_init(addr, 0, idle_pg_table);
/* Create page table for ioremap(). */
- ioremap_pt = (void *)get_free_page();
+ ioremap_pt = (void *)alloc_xenheap_page();
clear_page(ioremap_pt);
idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
@@ -109,7 +109,7 @@ void __init paging_init(void)
~_PAGE_RW);
/* Set up mapping cache for domain pages. */
- mapcache = (unsigned long *)get_free_page();
+ mapcache = (unsigned long *)alloc_xenheap_page();
clear_page(mapcache);
idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
diff --git a/xen/common/ac_timer.c b/xen/common/ac_timer.c
index f28a783400..658fea3457 100644
--- a/xen/common/ac_timer.c
+++ b/xen/common/ac_timer.c
@@ -130,13 +130,13 @@ static int add_entry(struct ac_timer **heap, struct ac_timer *t)
if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
{
int i, limit = (GET_HEAP_LIMIT(heap)+1) << 1;
- struct ac_timer **new_heap = kmalloc(limit*sizeof(struct ac_timer *));
+ struct ac_timer **new_heap = xmalloc(limit*sizeof(struct ac_timer *));
if ( new_heap == NULL ) BUG();
memcpy(new_heap, heap, (limit>>1)*sizeof(struct ac_timer *));
for ( i = 0; i < smp_num_cpus; i++ )
if ( ac_timers[i].heap == heap )
ac_timers[i].heap = new_heap;
- kfree(heap);
+ xfree(heap);
heap = new_heap;
SET_HEAP_LIMIT(heap, limit-1);
}
@@ -278,7 +278,7 @@ void __init ac_timer_init(void)
for ( i = 0; i < smp_num_cpus; i++ )
{
- ac_timers[i].heap = kmalloc(
+ ac_timers[i].heap = xmalloc(
(DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *));
if ( ac_timers[i].heap == NULL ) BUG();
SET_HEAP_SIZE(ac_timers[i].heap, 0);
diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
index c1f5486908..946cfb106a 100644
--- a/xen/common/dom0_ops.c
+++ b/xen/common/dom0_ops.c
@@ -392,7 +392,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
if ( op->u.getdomaininfo.ctxt != NULL )
{
- if ( (c = kmalloc(sizeof(*c))) == NULL )
+ if ( (c = xmalloc(sizeof(*c))) == NULL )
{
ret = -ENOMEM;
put_domain(d);
@@ -411,7 +411,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
ret = -EINVAL;
if ( c != NULL )
- kfree(c);
+ xfree(c);
}
if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 7082f07bed..111210c5e7 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -365,7 +365,7 @@ void domain_destruct(struct domain *d)
destroy_event_channels(d);
free_perdomain_pt(d);
- free_page((unsigned long)d->shared_info);
+ free_xenheap_page((unsigned long)d->shared_info);
free_domain_struct(d);
}
@@ -381,7 +381,7 @@ int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
int rc = 0;
full_execution_context_t *c;
- if ( (c = kmalloc(sizeof(*c))) == NULL )
+ if ( (c = xmalloc(sizeof(*c))) == NULL )
return -ENOMEM;
if ( test_bit(DF_CONSTRUCTED, &p->flags) )
@@ -405,6 +405,6 @@ int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
out:
if ( c != NULL )
- kfree(c);
+ xfree(c);
return rc;
}
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 159c06119a..c8a2560201 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -48,7 +48,7 @@ static int get_free_port(struct domain *d)
max *= 2;
- chn = kmalloc(max * sizeof(event_channel_t));
+ chn = xmalloc(max * sizeof(event_channel_t));
if ( unlikely(chn == NULL) )
return -ENOMEM;
@@ -57,7 +57,7 @@ static int get_free_port(struct domain *d)
if ( d->event_channel != NULL )
{
memcpy(chn, d->event_channel, (max/2) * sizeof(event_channel_t));
- kfree(d->event_channel);
+ xfree(d->event_channel);
}
d->event_channel = chn;
@@ -477,7 +477,7 @@ long do_event_channel_op(evtchn_op_t *uop)
int init_event_channels(struct domain *d)
{
spin_lock_init(&d->event_channel_lock);
- d->event_channel = kmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
+ d->event_channel = xmalloc(INIT_EVENT_CHANNELS * sizeof(event_channel_t));
if ( unlikely(d->event_channel == NULL) )
return -ENOMEM;
d->max_event_channel = INIT_EVENT_CHANNELS;
@@ -495,6 +495,6 @@ void destroy_event_channels(struct domain *d)
{
for ( i = 0; i < d->max_event_channel; i++ )
(void)__evtchn_close(d, i);
- kfree(d->event_channel);
+ xfree(d->event_channel);
}
}
diff --git a/xen/common/kernel.c b/xen/common/kernel.c
index 7facb69cac..489269f862 100644
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -28,7 +28,7 @@
unsigned long xenheap_phys_end;
-kmem_cache_t *domain_struct_cachep;
+xmem_cache_t *domain_struct_cachep;
struct e820entry {
unsigned long addr_lo, addr_hi; /* start of memory segment */
@@ -268,10 +268,10 @@ void cmain(multiboot_info_t *mbi)
init_page_allocator(__pa(heap_start), xenheap_phys_end);
/* Initialise the slab allocator. */
- kmem_cache_init();
- kmem_cache_sizes_init(max_page);
+ xmem_cache_init();
+ xmem_cache_sizes_init(max_page);
- domain_struct_cachep = kmem_cache_create(
+ domain_struct_cachep = xmem_cache_create(
"domain_cache", sizeof(struct domain),
0, SLAB_HWCACHE_ALIGN, NULL, NULL);
if ( domain_struct_cachep == NULL )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 113d2f5fba..3dc3940f38 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -263,7 +263,7 @@ void __init init_page_allocator(unsigned long min, unsigned long max)
/* Allocate 2^@order contiguous pages. Returns a VIRTUAL address. */
-unsigned long __get_free_pages(int order)
+unsigned long alloc_xenheap_pages(int order)
{
int i, attempts = 0;
chunk_head_t *alloc_ch, *spare_ch;
@@ -321,7 +321,7 @@ retry:
if ( attempts++ < 8 )
{
- kmem_cache_reap();
+ xmem_cache_reap();
goto retry;
}
@@ -333,7 +333,7 @@ retry:
/* Free 2^@order pages at VIRTUAL address @p. */
-void __free_pages(unsigned long p, int order)
+void free_xenheap_pages(unsigned long p, int order)
{
unsigned long size = 1 << (order + PAGE_SHIFT);
chunk_head_t *ch;
diff --git a/xen/common/physdev.c b/xen/common/physdev.c
index 2cbfd9ec35..a3852f4850 100644
--- a/xen/common/physdev.c
+++ b/xen/common/physdev.c
@@ -98,7 +98,7 @@ static void add_dev_to_task(struct domain *p,
return;
}
- if ( (pdev = kmalloc(sizeof(phys_dev_t))) == NULL )
+ if ( (pdev = xmalloc(sizeof(phys_dev_t))) == NULL )
{
INFO("Error allocating pdev structure.\n");
return;
@@ -171,7 +171,7 @@ int physdev_pci_access_modify(
if ( p->io_bitmap == NULL )
{
- if ( (p->io_bitmap = kmalloc(IO_BITMAP_BYTES)) == NULL )
+ if ( (p->io_bitmap = xmalloc(IO_BITMAP_BYTES)) == NULL )
{
rc = -ENOMEM;
goto out;
@@ -737,7 +737,7 @@ void physdev_init_dom0(struct domain *p)
/* Skip bridges and other peculiarities for now. */
if ( dev->hdr_type != PCI_HEADER_TYPE_NORMAL )
continue;
- pdev = kmalloc(sizeof(phys_dev_t));
+ pdev = xmalloc(sizeof(phys_dev_t));
pdev->dev = dev;
pdev->flags = ACC_WRITE;
pdev->state = 0;
diff --git a/xen/common/resource.c b/xen/common/resource.c
index 26855806a5..8df57772a6 100644
--- a/xen/common/resource.c
+++ b/xen/common/resource.c
@@ -220,7 +220,7 @@ int allocate_resource(struct resource *root, struct resource *new,
*/
struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
{
- struct resource *res = kmalloc(sizeof(*res));
+ struct resource *res = xmalloc(sizeof(*res));
if (res) {
memset(res, 0, sizeof(*res));
@@ -244,7 +244,7 @@ struct resource * __request_region(struct resource *parent, unsigned long start,
}
/* Uhhuh, that didn't work out.. */
- kfree(res);
+ xfree(res);
res = NULL;
break;
}
@@ -262,7 +262,7 @@ int __check_region(struct resource *parent, unsigned long start, unsigned long n
return -EBUSY;
release_resource(res);
- kfree(res);
+ xfree(res);
return 0;
}
@@ -287,7 +287,7 @@ void __release_region(struct resource *parent, unsigned long start, unsigned lon
if (res->start != start || res->end != end)
break;
*p = res->sibling;
- kfree(res);
+ xfree(res);
return;
}
p = &res->sibling;
diff --git a/xen/common/sched_atropos.c b/xen/common/sched_atropos.c
index 35a91e5b33..53e4519b9f 100644
--- a/xen/common/sched_atropos.c
+++ b/xen/common/sched_atropos.c
@@ -75,7 +75,7 @@ struct at_cpu_info
/* SLAB cache for struct at_dom_info objects */
-static kmem_cache_t *dom_info_cache;
+static xmem_cache_t *dom_info_cache;
/** calculate the length of a linked list */
@@ -528,14 +528,14 @@ static int at_init_scheduler()
for ( i = 0; i < NR_CPUS; i++ )
{
- schedule_data[i].sched_priv = kmalloc(sizeof(struct at_cpu_info));
+ schedule_data[i].sched_priv = xmalloc(sizeof(struct at_cpu_info));
if ( schedule_data[i].sched_priv == NULL )
return -1;
WAITQ(i)->next = WAITQ(i);
WAITQ(i)->prev = WAITQ(i);
}
- dom_info_cache = kmem_cache_create("Atropos dom info",
+ dom_info_cache = xmem_cache_create("Atropos dom info",
sizeof(struct at_dom_info),
0, 0, NULL, NULL);
@@ -591,7 +591,7 @@ static int at_alloc_task(struct domain *p)
{
ASSERT(p != NULL);
- p->sched_priv = kmem_cache_alloc(dom_info_cache);
+ p->sched_priv = xmem_cache_alloc(dom_info_cache);
if( p->sched_priv == NULL )
return -1;
@@ -604,7 +604,7 @@ static int at_alloc_task(struct domain *p)
/* free memory associated with a task */
static void at_free_task(struct domain *p)
{
- kmem_cache_free( dom_info_cache, DOM_INFO(p) );
+ xmem_cache_free( dom_info_cache, DOM_INFO(p) );
}
diff --git a/xen/common/sched_bvt.c b/xen/common/sched_bvt.c
index d7e3ec5daa..14f9a3017c 100644
--- a/xen/common/sched_bvt.c
+++ b/xen/common/sched_bvt.c
@@ -62,7 +62,7 @@ struct bvt_cpu_info
static s32 ctx_allow = (s32)MILLISECS(5); /* context switch allowance */
/* SLAB cache for struct bvt_dom_info objects */
-static kmem_cache_t *dom_info_cache;
+static xmem_cache_t *dom_info_cache;
/*
* Calculate the effective virtual time for a domain. Take into account
@@ -102,7 +102,7 @@ static void __calc_evt(struct bvt_dom_info *inf)
*/
int bvt_alloc_task(struct domain *p)
{
- p->sched_priv = kmem_cache_alloc(dom_info_cache);
+ p->sched_priv = xmem_cache_alloc(dom_info_cache);
if ( p->sched_priv == NULL )
return -1;
@@ -164,7 +164,7 @@ int bvt_init_idle_task(struct domain *p)
void bvt_free_task(struct domain *p)
{
ASSERT( p->sched_priv != NULL );
- kmem_cache_free( dom_info_cache, p->sched_priv );
+ xmem_cache_free( dom_info_cache, p->sched_priv );
}
@@ -437,7 +437,7 @@ static void bvt_dump_cpu_state(int i)
this functions makes sure that the run_list
is initialised properly. The new domain needs
NOT to appear as to be on the runqueue */
-static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
+static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
{
struct bvt_dom_info *dom_inf = (struct bvt_dom_info*)arg1;
dom_inf->run_list.next = NULL;
@@ -451,7 +451,7 @@ int bvt_init_scheduler()
for ( i = 0; i < NR_CPUS; i++ )
{
- schedule_data[i].sched_priv = kmalloc(sizeof(struct bvt_cpu_info));
+ schedule_data[i].sched_priv = xmalloc(sizeof(struct bvt_cpu_info));
INIT_LIST_HEAD(RUNQUEUE(i));
if ( schedule_data[i].sched_priv == NULL )
@@ -463,7 +463,7 @@ int bvt_init_scheduler()
CPU_SVT(i) = 0; /* XXX do I really need to do this? */
}
- dom_info_cache = kmem_cache_create("BVT dom info",
+ dom_info_cache = xmem_cache_create("BVT dom info",
sizeof(struct bvt_dom_info),
0, 0, cache_constructor, NULL);
diff --git a/xen/common/sched_fair_bvt.c b/xen/common/sched_fair_bvt.c
index 46115b6495..6442f8489e 100644
--- a/xen/common/sched_fair_bvt.c
+++ b/xen/common/sched_fair_bvt.c
@@ -74,7 +74,7 @@ static s32 ctx_allow = (s32)MILLISECS(5); /* context switch allowance */
static s32 max_vtb = (s32)MILLISECS(5);
/* SLAB cache for struct fbvt_dom_info objects */
-static kmem_cache_t *dom_info_cache;
+static xmem_cache_t *dom_info_cache;
/*
* Calculate the effective virtual time for a domain. Take into account
@@ -114,7 +114,7 @@ static void __calc_evt(struct fbvt_dom_info *inf)
*/
int fbvt_alloc_task(struct domain *p)
{
- p->sched_priv = kmem_cache_alloc(dom_info_cache);
+ p->sched_priv = xmem_cache_alloc(dom_info_cache);
if ( p->sched_priv == NULL )
return -1;
@@ -178,7 +178,7 @@ int fbvt_init_idle_task(struct domain *p)
void fbvt_free_task(struct domain *p)
{
ASSERT( p->sched_priv != NULL );
- kmem_cache_free( dom_info_cache, p->sched_priv );
+ xmem_cache_free( dom_info_cache, p->sched_priv );
}
/*
@@ -503,7 +503,7 @@ static void fbvt_dump_cpu_state(int i)
this functions makes sure that the run_list
is initialised properly. The new domain needs
NOT to appear as to be on the runqueue */
-static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
+static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
{
struct fbvt_dom_info *dom_inf = (struct fbvt_dom_info*)arg1;
dom_inf->run_list.next = NULL;
@@ -519,7 +519,7 @@ int fbvt_init_scheduler()
for ( i = 0; i < NR_CPUS; i++ )
{
- schedule_data[i].sched_priv = kmalloc(sizeof(struct fbvt_cpu_info));
+ schedule_data[i].sched_priv = xmalloc(sizeof(struct fbvt_cpu_info));
INIT_LIST_HEAD(RUNQUEUE(i));
if ( schedule_data[i].sched_priv == NULL )
{
@@ -530,7 +530,7 @@ int fbvt_init_scheduler()
CPU_SVT(i) = 0; /* XXX do I really need to do this? */
}
- dom_info_cache = kmem_cache_create("FBVT dom info",
+ dom_info_cache = xmem_cache_create("FBVT dom info",
sizeof(struct fbvt_dom_info),
0, 0, cache_constructor, NULL);
diff --git a/xen/common/sched_rrobin.c b/xen/common/sched_rrobin.c
index 678eb2ed0a..8ddd828314 100644
--- a/xen/common/sched_rrobin.c
+++ b/xen/common/sched_rrobin.c
@@ -31,11 +31,11 @@ struct rrobin_dom_info
static void rr_dump_cpu_state(int cpu);
/* SLAB cache for struct rrobin_dom_info objects */
-static kmem_cache_t *dom_info_cache;
+static xmem_cache_t *dom_info_cache;
/* Ensures proper initialisation of the dom_info */
-static void cache_constructor(void *arg1, kmem_cache_t *arg2, unsigned long arg3)
+static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
{
struct rrobin_dom_info *dom_inf = (struct rrobin_dom_info*)arg1;
dom_inf->run_list.next = NULL;
@@ -51,7 +51,7 @@ static int rr_init_scheduler()
for ( i = 0; i < NR_CPUS; i++ )
INIT_LIST_HEAD(RUNQUEUE(i));
- dom_info_cache = kmem_cache_create("FBVT dom info",
+ dom_info_cache = xmem_cache_create("FBVT dom info",
sizeof(struct rrobin_dom_info),
0, 0, cache_constructor, NULL);
@@ -66,7 +66,7 @@ static int rr_init_scheduler()
/* Allocates memory for per domain private scheduling data*/
static int rr_alloc_task(struct domain *d)
{
- d->sched_priv = kmem_cache_alloc(dom_info_cache);
+ d->sched_priv = xmem_cache_alloc(dom_info_cache);
if ( d->sched_priv == NULL )
return -1;
@@ -85,7 +85,7 @@ static void rr_add_task(struct domain *p)
static void rr_free_task(struct domain *p)
{
ASSERT( p->sched_priv != NULL );
- kmem_cache_free( dom_info_cache, p->sched_priv );
+ xmem_cache_free( dom_info_cache, p->sched_priv );
}
/* Initialises idle task */
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 0bfabf4627..96a80e0638 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -96,26 +96,26 @@ static struct ac_timer t_timer[NR_CPUS];
*/
static struct ac_timer fallback_timer[NR_CPUS];
-extern kmem_cache_t *domain_struct_cachep;
+extern xmem_cache_t *domain_struct_cachep;
void free_domain_struct(struct domain *d)
{
SCHED_OP(free_task, d);
- kmem_cache_free(domain_struct_cachep, d);
+ xmem_cache_free(domain_struct_cachep, d);
}
struct domain *alloc_domain_struct(void)
{
struct domain *d;
- if ( (d = kmem_cache_alloc(domain_struct_cachep)) == NULL )
+ if ( (d = xmem_cache_alloc(domain_struct_cachep)) == NULL )
return NULL;
memset(d, 0, sizeof(*d));
if ( SCHED_OP(alloc_task, d) < 0 )
{
- kmem_cache_free(domain_struct_cachep, d);
+ xmem_cache_free(domain_struct_cachep, d);
return NULL;
}
diff --git a/xen/common/slab.c b/xen/common/slab.c
index ab97bc5c33..172b7ee117 100644
--- a/xen/common/slab.c
+++ b/xen/common/slab.c
@@ -3,7 +3,7 @@
* Written by Mark Hemment, 1996/97.
* (markhe@nextd.demon.co.uk)
*
- * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
+ * xmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
*
* Major cleanup, different bufctl logic, per-cpu arrays
* (c) 2000 Manfred Spraul
@@ -31,8 +31,8 @@
* If partial slabs exist, then new allocations come from these slabs,
* otherwise from empty slabs or new slabs are allocated.
*
- * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
- * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
+ * xmem_cache_destroy() CAN CRASH if you try to allocate from the cache
+ * during xmem_cache_destroy(). The caller must prevent concurrent allocs.
*
* On SMP systems, each cache has a short per-cpu head array, most allocs
* and frees go into that array, and if that array overflows, then 1/2
@@ -43,7 +43,7 @@
*
* SMP synchronization:
* constructors and destructors are called without any locking.
- * Several members in kmem_cache_t and slab_t never change, they
+ * Several members in xmem_cache_t and slab_t never change, they
* are accessed without any locking.
* The per-cpu arrays are never accessed from the wrong cpu, no locking.
* The non-constant members are protected with a per-cache irq spinlock.
@@ -61,7 +61,7 @@
#include <xen/sched.h>
/*
- * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
+ * DEBUG - 1 for xmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
* SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
*
@@ -81,7 +81,7 @@
#endif
/*
- * Parameters for kmem_cache_reap
+ * Parameters for xmem_cache_reap
*/
#define REAP_SCANLEN 10
#define REAP_PERFECT 10
@@ -89,7 +89,7 @@
/* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *)
-/* Legal flag mask for kmem_cache_create(). */
+/* Legal flag mask for xmem_cache_create(). */
#if DEBUG
#define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
@@ -99,7 +99,7 @@
#endif
/*
- * kmem_bufctl_t:
+ * xmem_bufctl_t:
*
* Bufctl's are used for linking objs within a slab
* linked offsets.
@@ -117,12 +117,12 @@
* is less than 512 (PAGE_SIZE<<3), but greater than 256.
*/
-#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
-#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
-#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2)
+#define BUFCTL_END (((xmem_bufctl_t)(~0U))-0)
+#define BUFCTL_FREE (((xmem_bufctl_t)(~0U))-1)
+#define SLAB_LIMIT (((xmem_bufctl_t)(~0U))-2)
/* Max number of objs-per-slab for caches which use off-slab slabs.
- * Needed to avoid a possible looping condition in kmem_cache_grow().
+ * Needed to avoid a possible looping condition in xmem_cache_grow().
*/
static unsigned long offslab_limit;
@@ -138,11 +138,11 @@ typedef struct slab_s {
unsigned long colouroff;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
- kmem_bufctl_t free;
+ xmem_bufctl_t free;
} slab_t;
#define slab_bufctl(slabp) \
- ((kmem_bufctl_t *)(((slab_t*)slabp)+1))
+ ((xmem_bufctl_t *)(((slab_t*)slabp)+1))
/*
* cpucache_t
@@ -161,14 +161,14 @@ typedef struct cpucache_s {
#define cc_data(cachep) \
((cachep)->cpudata[smp_processor_id()])
/*
- * kmem_cache_t
+ * xmem_cache_t
*
* manages a cache.
*/
#define CACHE_NAMELEN 20 /* max name length for a slab cache */
-struct kmem_cache_s {
+struct xmem_cache_s {
/* 1) each alloc & free */
/* full, partial first, then free */
struct list_head slabs_full;
@@ -188,15 +188,15 @@ struct kmem_cache_s {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
unsigned int colour_next; /* cache colouring */
- kmem_cache_t *slabp_cache;
+ xmem_cache_t *slabp_cache;
unsigned int growing;
unsigned int dflags; /* dynamic flags */
/* constructor func */
- void (*ctor)(void *, kmem_cache_t *, unsigned long);
+ void (*ctor)(void *, xmem_cache_t *, unsigned long);
/* de-constructor func */
- void (*dtor)(void *, kmem_cache_t *, unsigned long);
+ void (*dtor)(void *, xmem_cache_t *, unsigned long);
unsigned long failures;
@@ -297,17 +297,17 @@ static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
/* Macros for storing/retrieving the cachep and or slab from the
* global 'mem_map'. These are used to find the slab an obj belongs to.
- * With kfree(), these are used to find the cache which an obj belongs to.
+ * With xfree(), these are used to find the cache which an obj belongs to.
*/
#define SET_PAGE_CACHE(pg,x) ((pg)->list.next = (struct list_head *)(x))
-#define GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->list.next)
+#define GET_PAGE_CACHE(pg) ((xmem_cache_t *)(pg)->list.next)
#define SET_PAGE_SLAB(pg,x) ((pg)->list.prev = (struct list_head *)(x))
#define GET_PAGE_SLAB(pg) ((slab_t *)(pg)->list.prev)
/* Size description struct for general caches. */
typedef struct cache_sizes {
size_t cs_size;
- kmem_cache_t *cs_cachep;
+ xmem_cache_t *cs_cachep;
} cache_sizes_t;
static cache_sizes_t cache_sizes[] = {
@@ -325,15 +325,15 @@ static cache_sizes_t cache_sizes[] = {
};
/* internal cache of cache description objs */
-static kmem_cache_t cache_cache = {
+static xmem_cache_t cache_cache = {
slabs_full: LIST_HEAD_INIT(cache_cache.slabs_full),
slabs_partial: LIST_HEAD_INIT(cache_cache.slabs_partial),
slabs_free: LIST_HEAD_INIT(cache_cache.slabs_free),
- objsize: sizeof(kmem_cache_t),
+ objsize: sizeof(xmem_cache_t),
flags: SLAB_NO_REAP,
spinlock: SPIN_LOCK_UNLOCKED,
colour_off: L1_CACHE_BYTES,
- name: "kmem_cache"
+ name: "xmem_cache"
};
/* Guard access to the cache-chain. */
@@ -344,7 +344,7 @@ static spinlock_t cache_chain_sem;
#define up(_m) spin_unlock_irqrestore(_m,spin_flags)
/* Place maintainer for reaping. */
-static kmem_cache_t *clock_searchp = &cache_cache;
+static xmem_cache_t *clock_searchp = &cache_cache;
#define cache_chain (cache_cache.next)
@@ -355,12 +355,12 @@ static kmem_cache_t *clock_searchp = &cache_cache;
*/
static int g_cpucache_up;
-static void enable_cpucache (kmem_cache_t *cachep);
+static void enable_cpucache (xmem_cache_t *cachep);
static void enable_all_cpucaches (void);
#endif
/* Cal the num objs, wastage, and bytes left over for a given slab size. */
-static void kmem_cache_estimate (unsigned long gfporder, size_t size,
+static void xmem_cache_estimate (unsigned long gfporder, size_t size,
int flags, size_t *left_over, unsigned int *num)
{
int i;
@@ -370,7 +370,7 @@ static void kmem_cache_estimate (unsigned long gfporder, size_t size,
if (!(flags & CFLGS_OFF_SLAB)) {
base = sizeof(slab_t);
- extra = sizeof(kmem_bufctl_t);
+ extra = sizeof(xmem_bufctl_t);
}
i = 0;
while (i*size + L1_CACHE_ALIGN(base+i*extra) <= wastage)
@@ -388,14 +388,14 @@ static void kmem_cache_estimate (unsigned long gfporder, size_t size,
}
/* Initialisation - setup the `cache' cache. */
-void __init kmem_cache_init(void)
+void __init xmem_cache_init(void)
{
size_t left_over;
init_MUTEX(&cache_chain_sem);
INIT_LIST_HEAD(&cache_chain);
- kmem_cache_estimate(0, cache_cache.objsize, 0,
+ xmem_cache_estimate(0, cache_cache.objsize, 0,
&left_over, &cache_cache.num);
if (!cache_cache.num)
BUG();
@@ -408,7 +408,7 @@ void __init kmem_cache_init(void)
/* Initialisation - setup remaining internal and general caches.
* Called after the gfp() functions have been enabled, and before smp_init().
*/
-void __init kmem_cache_sizes_init(unsigned long num_physpages)
+void __init xmem_cache_sizes_init(unsigned long num_physpages)
{
cache_sizes_t *sizes = cache_sizes;
char name[20];
@@ -426,7 +426,7 @@ void __init kmem_cache_sizes_init(unsigned long num_physpages)
* allow tighter packing of the smaller caches. */
sprintf(name,"size-%Zd",sizes->cs_size);
if (!(sizes->cs_cachep =
- kmem_cache_create(name, sizes->cs_size,
+ xmem_cache_create(name, sizes->cs_size,
0, SLAB_HWCACHE_ALIGN, NULL, NULL))) {
BUG();
}
@@ -440,7 +440,7 @@ void __init kmem_cache_sizes_init(unsigned long num_physpages)
} while (sizes->cs_size);
}
-int __init kmem_cpucache_init(void)
+int __init xmem_cpucache_init(void)
{
#ifdef CONFIG_SMP
g_cpucache_up = 1;
@@ -449,15 +449,15 @@ int __init kmem_cpucache_init(void)
return 0;
}
-/*__initcall(kmem_cpucache_init);*/
+/*__initcall(xmem_cpucache_init);*/
/* Interface to system's page allocator. No need to hold the cache-lock.
*/
-static inline void *kmem_getpages(kmem_cache_t *cachep)
+static inline void *xmem_getpages(xmem_cache_t *cachep)
{
void *addr;
- addr = (void*) __get_free_pages(cachep->gfporder);
+ addr = (void*) alloc_xenheap_pages(cachep->gfporder);
/* Assume that now we have the pages no one else can legally
* messes with the 'struct page's.
* However vm_scan() might try to test the structure to see if
@@ -468,12 +468,12 @@ static inline void *kmem_getpages(kmem_cache_t *cachep)
}
/* Interface to system's page release. */
-static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
+static inline void xmem_freepages (xmem_cache_t *cachep, void *addr)
{
unsigned long i = (1<<cachep->gfporder);
struct pfn_info *page = virt_to_page(addr);
- /* free_pages() does not clear the type bit - we do that.
+ /* free_xenheap_pages() does not clear the type bit - we do that.
* The pages have been unlinked from their cache-slab,
* but their 'struct page's might be accessed in
* vm_scan(). Shouldn't be a worry.
@@ -483,11 +483,11 @@ static inline void kmem_freepages (kmem_cache_t *cachep, void *addr)
page++;
}
- free_pages((unsigned long)addr, cachep->gfporder);
+ free_xenheap_pages((unsigned long)addr, cachep->gfporder);
}
#if DEBUG
-static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr)
+static inline void xmem_poison_obj (xmem_cache_t *cachep, void *addr)
{
int size = cachep->objsize;
if (cachep->flags & SLAB_RED_ZONE) {
@@ -498,7 +498,7 @@ static inline void kmem_poison_obj (kmem_cache_t *cachep, void *addr)
*(unsigned char *)(addr+size-1) = POISON_END;
}
-static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr)
+static inline int xmem_check_poison_obj (xmem_cache_t *cachep, void *addr)
{
int size = cachep->objsize;
void *end;
@@ -517,7 +517,7 @@ static inline int kmem_check_poison_obj (kmem_cache_t *cachep, void *addr)
* Before calling the slab must have been unlinked from the cache.
* The cache-lock is not held/needed.
*/
-static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
+static void xmem_slab_destroy (xmem_cache_t *cachep, slab_t *slabp)
{
if (cachep->dtor
#if DEBUG
@@ -544,19 +544,19 @@ static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
objp -= BYTES_PER_WORD;
}
if ((cachep->flags & SLAB_POISON) &&
- kmem_check_poison_obj(cachep, objp))
+ xmem_check_poison_obj(cachep, objp))
BUG();
#endif
}
}
- kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
+ xmem_freepages(cachep, slabp->s_mem-slabp->colouroff);
if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slabp);
+ xmem_cache_free(cachep->slabp_cache, slabp);
}
/**
- * kmem_cache_create - Create a cache.
+ * xmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @offset: The offset to use within the page.
@@ -583,15 +583,15 @@ static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp)
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-kmem_cache_t *
-kmem_cache_create (const char *name, size_t size, size_t offset,
+xmem_cache_t *
+xmem_cache_create (const char *name, size_t size, size_t offset,
unsigned long flags,
- void (*ctor)(void*, kmem_cache_t *, unsigned long),
- void (*dtor)(void*, kmem_cache_t *, unsigned long))
+ void (*ctor)(void*, xmem_cache_t *, unsigned long),
+ void (*dtor)(void*, xmem_cache_t *, unsigned long))
{
- const char *func_nm = KERN_ERR "kmem_create: ";
+ const char *func_nm = KERN_ERR "xmem_create: ";
size_t left_over, align, slab_size;
- kmem_cache_t *cachep = NULL;
+ xmem_cache_t *cachep = NULL;
unsigned long spin_flags;
/*
@@ -639,10 +639,10 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
BUG();
/* Get cache's description obj. */
- cachep = (kmem_cache_t *)kmem_cache_alloc(&cache_cache);
+ cachep = (xmem_cache_t *)xmem_cache_alloc(&cache_cache);
if (!cachep)
goto opps;
- memset(cachep, 0, sizeof(kmem_cache_t));
+ memset(cachep, 0, sizeof(xmem_cache_t));
/* Check that size is in terms of words. This is needed to avoid
* unaligned accesses for some archs when redzoning is used, and makes
@@ -693,7 +693,7 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
do {
unsigned int break_flag = 0;
cal_wastage:
- kmem_cache_estimate(cachep->gfporder, size, flags,
+ xmem_cache_estimate(cachep->gfporder, size, flags,
&left_over, &cachep->num);
if (break_flag)
break;
@@ -722,12 +722,12 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
} while (1);
if (!cachep->num) {
- printk("kmem_cache_create: couldn't create cache %s.\n", name);
- kmem_cache_free(&cache_cache, cachep);
+ printk("xmem_cache_create: couldn't create cache %s.\n", name);
+ xmem_cache_free(&cache_cache, cachep);
cachep = NULL;
goto opps;
}
- slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(kmem_bufctl_t) +
+ slab_size = L1_CACHE_ALIGN(cachep->num*sizeof(xmem_bufctl_t) +
sizeof(slab_t));
/*
@@ -759,7 +759,7 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
INIT_LIST_HEAD(&cachep->slabs_free);
if (flags & CFLGS_OFF_SLAB)
- cachep->slabp_cache = kmem_find_general_cachep(slab_size);
+ cachep->slabp_cache = xmem_find_general_cachep(slab_size);
cachep->ctor = ctor;
cachep->dtor = dtor;
/* Copy name over so we don't have problems with unloaded modules */
@@ -775,7 +775,7 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
struct list_head *p;
list_for_each(p, &cache_chain) {
- kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
+ xmem_cache_t *pc = list_entry(p, xmem_cache_t, next);
/* The name field is constant - no lock needed. */
if (!strcmp(pc->name, name))
@@ -795,10 +795,10 @@ kmem_cache_create (const char *name, size_t size, size_t offset,
#if DEBUG
/*
- * This check if the kmem_cache_t pointer is chained in the cache_cache
+ * This check if the xmem_cache_t pointer is chained in the cache_cache
* list. -arca
*/
-static int is_chained_kmem_cache(kmem_cache_t * cachep)
+static int is_chained_xmem_cache(xmem_cache_t * cachep)
{
struct list_head *p;
int ret = 0;
@@ -817,7 +817,7 @@ static int is_chained_kmem_cache(kmem_cache_t * cachep)
return ret;
}
#else
-#define is_chained_kmem_cache(x) 1
+#define is_chained_xmem_cache(x) 1
#endif
#ifdef CONFIG_SMP
@@ -835,7 +835,7 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
}
typedef struct ccupdate_struct_s
{
- kmem_cache_t *cachep;
+ xmem_cache_t *cachep;
cpucache_t *new[NR_CPUS];
} ccupdate_struct_t;
@@ -848,9 +848,9 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old;
}
-static void free_block (kmem_cache_t* cachep, void** objpp, int len);
+static void free_block (xmem_cache_t* cachep, void** objpp, int len);
-static void drain_cpu_caches(kmem_cache_t *cachep)
+static void drain_cpu_caches(xmem_cache_t *cachep)
{
ccupdate_struct_t new;
int i;
@@ -880,7 +880,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
#define drain_cpu_caches(cachep) do { } while (0)
#endif
-static int __kmem_cache_shrink(kmem_cache_t *cachep)
+static int __xmem_cache_shrink(xmem_cache_t *cachep)
{
slab_t *slabp;
int ret;
@@ -905,7 +905,7 @@ static int __kmem_cache_shrink(kmem_cache_t *cachep)
list_del(&slabp->list);
spin_unlock_irq(&cachep->spinlock);
- kmem_slab_destroy(cachep, slabp);
+ xmem_slab_destroy(cachep, slabp);
spin_lock_irq(&cachep->spinlock);
}
ret = (!list_empty(&cachep->slabs_full) ||
@@ -915,25 +915,25 @@ static int __kmem_cache_shrink(kmem_cache_t *cachep)
}
/**
- * kmem_cache_shrink - Shrink a cache.
+ * xmem_cache_shrink - Shrink a cache.
* @cachep: The cache to shrink.
*
* Releases as many slabs as possible for a cache.
* To help debugging, a zero exit status indicates all slabs were released.
*/
-int kmem_cache_shrink(kmem_cache_t *cachep)
+int xmem_cache_shrink(xmem_cache_t *cachep)
{
- if (!cachep || !is_chained_kmem_cache(cachep))
+ if (!cachep || !is_chained_xmem_cache(cachep))
BUG();
- return __kmem_cache_shrink(cachep);
+ return __xmem_cache_shrink(cachep);
}
/**
- * kmem_cache_destroy - delete a cache
+ * xmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
- * Remove a kmem_cache_t object from the slab cache.
+ * Remove a xmem_cache_t object from the slab cache.
* Returns 0 on success.
*
* It is expected this function will be called by a module when it is
@@ -942,9 +942,9 @@ int kmem_cache_shrink(kmem_cache_t *cachep)
* module doesn't have persistent in-kernel storage across loads and unloads.
*
* The caller must guarantee that noone will allocate memory from the cache
- * during the kmem_cache_destroy().
+ * during the xmem_cache_destroy().
*/
-int kmem_cache_destroy (kmem_cache_t * cachep)
+int xmem_cache_destroy (xmem_cache_t * cachep)
{
unsigned long spin_flags;
@@ -956,12 +956,12 @@ int kmem_cache_destroy (kmem_cache_t * cachep)
/* the chain is never empty, cache_cache is never destroyed */
if (clock_searchp == cachep)
clock_searchp = list_entry(cachep->next.next,
- kmem_cache_t, next);
+ xmem_cache_t, next);
list_del(&cachep->next);
up(&cache_chain_sem);
- if (__kmem_cache_shrink(cachep)) {
- printk(KERN_ERR "kmem_cache_destroy: Can't free all objects %p\n",
+ if (__xmem_cache_shrink(cachep)) {
+ printk(KERN_ERR "xmem_cache_destroy: Can't free all objects %p\n",
cachep);
down(&cache_chain_sem);
list_add(&cachep->next,&cache_chain);
@@ -972,16 +972,16 @@ int kmem_cache_destroy (kmem_cache_t * cachep)
{
int i;
for (i = 0; i < NR_CPUS; i++)
- kfree(cachep->cpudata[i]);
+ xfree(cachep->cpudata[i]);
}
#endif
- kmem_cache_free(&cache_cache, cachep);
+ xmem_cache_free(&cache_cache, cachep);
return 0;
}
/* Get the memory for a slab management obj. */
-static inline slab_t *kmem_cache_slabmgmt(kmem_cache_t *cachep,
+static inline slab_t *xmem_cache_slabmgmt(xmem_cache_t *cachep,
void *objp, int colour_off,
int local_flags)
{
@@ -989,7 +989,7 @@ static inline slab_t *kmem_cache_slabmgmt(kmem_cache_t *cachep,
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- slabp = kmem_cache_alloc(cachep->slabp_cache);
+ slabp = xmem_cache_alloc(cachep->slabp_cache);
if (!slabp)
return NULL;
} else {
@@ -999,7 +999,7 @@ static inline slab_t *kmem_cache_slabmgmt(kmem_cache_t *cachep,
*/
slabp = objp+colour_off;
colour_off += L1_CACHE_ALIGN(cachep->num *
- sizeof(kmem_bufctl_t) + sizeof(slab_t));
+ sizeof(xmem_bufctl_t) + sizeof(slab_t));
}
slabp->inuse = 0;
slabp->colouroff = colour_off;
@@ -1008,7 +1008,7 @@ static inline slab_t *kmem_cache_slabmgmt(kmem_cache_t *cachep,
return slabp;
}
-static inline void kmem_cache_init_objs(kmem_cache_t *cachep,
+static inline void xmem_cache_init_objs(xmem_cache_t *cachep,
slab_t *slabp,
unsigned long ctor_flags)
{
@@ -1037,7 +1037,7 @@ static inline void kmem_cache_init_objs(kmem_cache_t *cachep,
objp -= BYTES_PER_WORD;
if (cachep->flags & SLAB_POISON)
/* need to poison the objs */
- kmem_poison_obj(cachep, objp);
+ xmem_poison_obj(cachep, objp);
if (cachep->flags & SLAB_RED_ZONE) {
if (*((unsigned long*)(objp)) != RED_MAGIC1)
BUG();
@@ -1054,9 +1054,9 @@ static inline void kmem_cache_init_objs(kmem_cache_t *cachep,
/*
* Grow (by 1) the number of slabs within a cache. This is called by
- * kmem_cache_alloc() when there are no active objs left in a cache.
+ * xmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int kmem_cache_grow(kmem_cache_t * cachep)
+static int xmem_cache_grow(xmem_cache_t * cachep)
{
slab_t *slabp;
struct pfn_info *page; unsigned int i;
@@ -1086,16 +1086,16 @@ static int kmem_cache_grow(kmem_cache_t * cachep)
* held, but the incrementing c_growing prevents this
* cache from being reaped or shrunk.
* Note: The cache could be selected in for reaping in
- * kmem_cache_reap(), but when the final test is made the
+ * xmem_cache_reap(), but when the final test is made the
* growing value will be seen.
*/
/* Get mem for the objs. */
- if (!(objp = kmem_getpages(cachep)))
+ if (!(objp = xmem_getpages(cachep)))
goto failed;
/* Get slab management. */
- if (!(slabp = kmem_cache_slabmgmt(cachep, objp, offset, 0)))
+ if (!(slabp = xmem_cache_slabmgmt(cachep, objp, offset, 0)))
goto opps1;
/* Nasty!!!!!! I hope this is OK. */
@@ -1108,7 +1108,7 @@ static int kmem_cache_grow(kmem_cache_t * cachep)
page++;
} while (--i);
- kmem_cache_init_objs(cachep, slabp, ctor_flags);
+ xmem_cache_init_objs(cachep, slabp, ctor_flags);
spin_lock_irqsave(&cachep->spinlock, save_flags);
cachep->growing--;
@@ -1121,7 +1121,7 @@ static int kmem_cache_grow(kmem_cache_t * cachep)
spin_unlock_irqrestore(&cachep->spinlock, save_flags);
return 1;
opps1:
- kmem_freepages(cachep, objp);
+ xmem_freepages(cachep, objp);
failed:
spin_lock_irqsave(&cachep->spinlock, save_flags);
cachep->growing--;
@@ -1137,7 +1137,7 @@ static int kmem_cache_grow(kmem_cache_t * cachep)
*/
#if DEBUG
-static int kmem_extra_free_checks (kmem_cache_t * cachep,
+static int xmem_extra_free_checks (xmem_cache_t * cachep,
slab_t *slabp, void * objp)
{
int i;
@@ -1157,7 +1157,7 @@ static int kmem_extra_free_checks (kmem_cache_t * cachep,
}
#endif
-static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
+static inline void * xmem_cache_alloc_one_tail (xmem_cache_t *cachep,
slab_t *slabp)
{
void *objp;
@@ -1177,7 +1177,7 @@ static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
}
#if DEBUG
if (cachep->flags & SLAB_POISON)
- if (kmem_check_poison_obj(cachep, objp))
+ if (xmem_check_poison_obj(cachep, objp))
BUG();
if (cachep->flags & SLAB_RED_ZONE) {
/* Set alloc red-zone, and check old one. */
@@ -1198,7 +1198,7 @@ static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
* caller must guarantee synchronization
* #define for the goto optimization 8-)
*/
-#define kmem_cache_alloc_one(cachep) \
+#define xmem_cache_alloc_one(cachep) \
({ \
struct list_head * slabs_partial, * entry; \
slab_t *slabp; \
@@ -1216,11 +1216,11 @@ static inline void * kmem_cache_alloc_one_tail (kmem_cache_t *cachep,
} \
\
slabp = list_entry(entry, slab_t, list); \
- kmem_cache_alloc_one_tail(cachep, slabp); \
+ xmem_cache_alloc_one_tail(cachep, slabp); \
})
#ifdef CONFIG_SMP
-void* kmem_cache_alloc_batch(kmem_cache_t* cachep)
+void* xmem_cache_alloc_batch(xmem_cache_t* cachep)
{
int batchcount = cachep->batchcount;
cpucache_t* cc = cc_data(cachep);
@@ -1244,7 +1244,7 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep)
slabp = list_entry(entry, slab_t, list);
cc_entry(cc)[cc->avail++] =
- kmem_cache_alloc_one_tail(cachep, slabp);
+ xmem_cache_alloc_one_tail(cachep, slabp);
}
spin_unlock(&cachep->spinlock);
@@ -1254,7 +1254,7 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep)
}
#endif
-static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
+static inline void *__xmem_cache_alloc(xmem_cache_t *cachep)
{
unsigned long flags;
void* objp;
@@ -1271,18 +1271,18 @@ static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
objp = cc_entry(cc)[--cc->avail];
} else {
STATS_INC_ALLOCMISS(cachep);
- objp = kmem_cache_alloc_batch(cachep);
+ objp = xmem_cache_alloc_batch(cachep);
if (!objp)
goto alloc_new_slab_nolock;
}
} else {
spin_lock(&cachep->spinlock);
- objp = kmem_cache_alloc_one(cachep);
+ objp = xmem_cache_alloc_one(cachep);
spin_unlock(&cachep->spinlock);
}
}
#else
- objp = kmem_cache_alloc_one(cachep);
+ objp = xmem_cache_alloc_one(cachep);
#endif
local_irq_restore(flags);
return objp;
@@ -1292,7 +1292,7 @@ static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
alloc_new_slab_nolock:
#endif
local_irq_restore(flags);
- if (kmem_cache_grow(cachep))
+ if (xmem_cache_grow(cachep))
/* Someone may have stolen our objs. Doesn't matter, we'll
* just come back here again.
*/
@@ -1310,7 +1310,7 @@ static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
# define CHECK_NR(pg) \
do { \
if (!VALID_PAGE(pg)) { \
- printk(KERN_ERR "kfree: out of range ptr %lxh.\n", \
+ printk(KERN_ERR "xfree: out of range ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
} \
@@ -1319,7 +1319,7 @@ static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
do { \
CHECK_NR(page); \
if (!PageSlab(page)) { \
- printk(KERN_ERR "kfree: bad ptr %lxh.\n", \
+ printk(KERN_ERR "xfree: bad ptr %lxh.\n", \
(unsigned long)objp); \
BUG(); \
} \
@@ -1329,7 +1329,7 @@ static inline void *__kmem_cache_alloc(kmem_cache_t *cachep)
# define CHECK_PAGE(pg) do { } while (0)
#endif
-static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
+static inline void xmem_cache_free_one(xmem_cache_t *cachep, void *objp)
{
slab_t* slabp;
@@ -1361,8 +1361,8 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
BUG();
}
if (cachep->flags & SLAB_POISON)
- kmem_poison_obj(cachep, objp);
- if (kmem_extra_free_checks(cachep, slabp, objp))
+ xmem_poison_obj(cachep, objp);
+ if (xmem_extra_free_checks(cachep, slabp, objp))
return;
#endif
{
@@ -1389,14 +1389,14 @@ static inline void kmem_cache_free_one(kmem_cache_t *cachep, void *objp)
}
#ifdef CONFIG_SMP
-static inline void __free_block (kmem_cache_t* cachep,
+static inline void __free_block (xmem_cache_t* cachep,
void** objpp, int len)
{
for ( ; len > 0; len--, objpp++)
- kmem_cache_free_one(cachep, *objpp);
+ xmem_cache_free_one(cachep, *objpp);
}
-static void free_block (kmem_cache_t* cachep, void** objpp, int len)
+static void free_block (xmem_cache_t* cachep, void** objpp, int len)
{
spin_lock(&cachep->spinlock);
__free_block(cachep, objpp, len);
@@ -1405,10 +1405,10 @@ static void free_block (kmem_cache_t* cachep, void** objpp, int len)
#endif
/*
- * __kmem_cache_free
+ * __xmem_cache_free
* called with disabled ints
*/
-static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
+static inline void __xmem_cache_free (xmem_cache_t *cachep, void* objp)
{
#ifdef CONFIG_SMP
cpucache_t *cc = cc_data(cachep);
@@ -1432,47 +1432,47 @@ static inline void __kmem_cache_free (kmem_cache_t *cachep, void* objp)
free_block(cachep, &objp, 1);
}
#else
- kmem_cache_free_one(cachep, objp);
+ xmem_cache_free_one(cachep, objp);
#endif
}
/**
- * kmem_cache_alloc - Allocate an object
+ * xmem_cache_alloc - Allocate an object
* @cachep: The cache to allocate from.
*
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
-void *kmem_cache_alloc(kmem_cache_t *cachep)
+void *xmem_cache_alloc(xmem_cache_t *cachep)
{
- return __kmem_cache_alloc(cachep);
+ return __xmem_cache_alloc(cachep);
}
/**
- * kmalloc - allocate memory
+ * xmalloc - allocate memory
* @size: how many bytes of memory are required.
*/
-void *kmalloc(size_t size)
+void *xmalloc(size_t size)
{
cache_sizes_t *csizep = cache_sizes;
for (; csizep->cs_size; csizep++) {
if (size > csizep->cs_size)
continue;
- return __kmem_cache_alloc(csizep->cs_cachep);
+ return __xmem_cache_alloc(csizep->cs_cachep);
}
return NULL;
}
/**
- * kmem_cache_free - Deallocate an object
+ * xmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
* @objp: The previously allocated object.
*
* Free an object which was previously allocated from this
* cache.
*/
-void kmem_cache_free (kmem_cache_t *cachep, void *objp)
+void xmem_cache_free (xmem_cache_t *cachep, void *objp)
{
unsigned long flags;
#if DEBUG
@@ -1482,20 +1482,20 @@ void kmem_cache_free (kmem_cache_t *cachep, void *objp)
#endif
local_irq_save(flags);
- __kmem_cache_free(cachep, objp);
+ __xmem_cache_free(cachep, objp);
local_irq_restore(flags);
}
/**
- * kfree - free previously allocated memory
- * @objp: pointer returned by kmalloc.
+ * xfree - free previously allocated memory
+ * @objp: pointer returned by xmalloc.
*
- * Don't free memory not originally allocated by kmalloc()
+ * Don't free memory not originally allocated by xmalloc()
* or you will run into trouble.
*/
-void kfree (const void *objp)
+void xfree (const void *objp)
{
- kmem_cache_t *c;
+ xmem_cache_t *c;
unsigned long flags;
if (!objp)
@@ -1503,11 +1503,11 @@ void kfree (const void *objp)
local_irq_save(flags);
CHECK_PAGE(virt_to_page(objp));
c = GET_PAGE_CACHE(virt_to_page(objp));
- __kmem_cache_free(c, (void*)objp);
+ __xmem_cache_free(c, (void*)objp);
local_irq_restore(flags);
}
-kmem_cache_t *kmem_find_general_cachep(size_t size)
+xmem_cache_t *xmem_find_general_cachep(size_t size)
{
cache_sizes_t *csizep = cache_sizes;
@@ -1526,7 +1526,7 @@ kmem_cache_t *kmem_find_general_cachep(size_t size)
#ifdef CONFIG_SMP
/* called with cache_chain_sem acquired. */
-static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount)
+static int xmem_tune_cpucache (xmem_cache_t* cachep, int limit, int batchcount)
{
ccupdate_struct_t new;
int i;
@@ -1548,7 +1548,7 @@ static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount)
for (i = 0; i< smp_num_cpus; i++) {
cpucache_t* ccnew;
- ccnew = kmalloc(sizeof(void*)*limit+sizeof(cpucache_t));
+ ccnew = xmalloc(sizeof(void*)*limit+sizeof(cpucache_t));
if (!ccnew)
goto oom;
ccnew->limit = limit;
@@ -1570,16 +1570,16 @@ static int kmem_tune_cpucache (kmem_cache_t* cachep, int limit, int batchcount)
local_irq_disable();
free_block(cachep, cc_entry(ccold), ccold->avail);
local_irq_enable();
- kfree(ccold);
+ xfree(ccold);
}
return 0;
oom:
for (i--; i >= 0; i--)
- kfree(new.new[cpu_logical_map(i)]);
+ xfree(new.new[cpu_logical_map(i)]);
return -ENOMEM;
}
-static void enable_cpucache (kmem_cache_t *cachep)
+static void enable_cpucache (xmem_cache_t *cachep)
{
int err;
int limit;
@@ -1594,7 +1594,7 @@ static void enable_cpucache (kmem_cache_t *cachep)
else
limit = 252;
- err = kmem_tune_cpucache(cachep, limit, limit/2);
+ err = xmem_tune_cpucache(cachep, limit, limit/2);
if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
cachep->name, -err);
@@ -1609,7 +1609,7 @@ static void enable_all_cpucaches (void)
p = &cache_cache.next;
do {
- kmem_cache_t* cachep = list_entry(p, kmem_cache_t, next);
+ xmem_cache_t* cachep = list_entry(p, xmem_cache_t, next);
enable_cpucache(cachep);
p = cachep->next.next;
@@ -1620,13 +1620,13 @@ static void enable_all_cpucaches (void)
#endif
/**
- * kmem_cache_reap - Reclaim memory from caches.
+ * xmem_cache_reap - Reclaim memory from caches.
*/
-int kmem_cache_reap(void)
+int xmem_cache_reap(void)
{
slab_t *slabp;
- kmem_cache_t *searchp;
- kmem_cache_t *best_cachep;
+ xmem_cache_t *searchp;
+ xmem_cache_t *best_cachep;
unsigned int best_pages;
unsigned int best_len;
unsigned int scan;
@@ -1693,14 +1693,14 @@ int kmem_cache_reap(void)
best_pages = pages;
if (pages >= REAP_PERFECT) {
clock_searchp = list_entry(searchp->next.next,
- kmem_cache_t,next);
+ xmem_cache_t,next);
goto perfect;
}
}
next_unlock:
spin_unlock_irq(&searchp->spinlock);
next:
- searchp = list_entry(searchp->next.next,kmem_cache_t,next);
+ searchp = list_entry(searchp->next.next,xmem_cache_t,next);
} while (--scan && searchp != clock_searchp);
clock_searchp = searchp;
@@ -1733,7 +1733,7 @@ int kmem_cache_reap(void)
* cache.
*/
spin_unlock_irq(&best_cachep->spinlock);
- kmem_slab_destroy(best_cachep, slabp);
+ xmem_slab_destroy(best_cachep, slabp);
spin_lock_irq(&best_cachep->spinlock);
}
spin_unlock_irq(&best_cachep->spinlock);
@@ -1762,14 +1762,14 @@ void dump_slabinfo()
down(&cache_chain_sem);
p = &cache_cache.next;
do {
- kmem_cache_t *cachep;
+ xmem_cache_t *cachep;
struct list_head *q;
slab_t *slabp;
unsigned long active_objs;
unsigned long num_objs;
unsigned long active_slabs = 0;
unsigned long num_slabs;
- cachep = list_entry(p, kmem_cache_t, next);
+ cachep = list_entry(p, xmem_cache_t, next);
spin_lock_irq(&cachep->spinlock);
active_objs = 0;
diff --git a/xen/common/trace.c b/xen/common/trace.c
index 6d37ceeec1..f7952886e0 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -59,7 +59,7 @@ void init_trace_bufs(void)
nr_pages = smp_num_cpus * opt_tbuf_size;
order = get_order(nr_pages * PAGE_SIZE);
- if ( (rawbuf = (char *)__get_free_pages(order)) == NULL )
+ if ( (rawbuf = (char *)alloc_xenheap_pages(order)) == NULL )
{
printk("Xen trace buffers: memory allocation failed\n");
return;
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index b1887fa1fb..15219b67c7 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -303,7 +303,7 @@ long do_console_io(int cmd, int count, char *buffer)
case CONSOLEIO_write:
if ( count > (PAGE_SIZE-1) )
count = PAGE_SIZE-1;
- if ( (kbuf = (char *)get_free_page()) == NULL )
+ if ( (kbuf = (char *)alloc_xenheap_page()) == NULL )
return -ENOMEM;
kbuf[count] = '\0';
rc = count;
@@ -311,7 +311,7 @@ long do_console_io(int cmd, int count, char *buffer)
rc = -EFAULT;
else
serial_puts(sercon_handle, kbuf);
- free_page((unsigned long)kbuf);
+ free_xenheap_page((unsigned long)kbuf);
break;
case CONSOLEIO_read:
rc = 0;
diff --git a/xen/drivers/pci/pci.c b/xen/drivers/pci/pci.c
index f8697d1337..4ae22d07e0 100644
--- a/xen/drivers/pci/pci.c
+++ b/xen/drivers/pci/pci.c
@@ -1126,7 +1126,7 @@ static struct pci_bus * __devinit pci_alloc_bus(void)
{
struct pci_bus *b;
- b = kmalloc(sizeof(*b));
+ b = xmalloc(sizeof(*b));
if (b) {
memset(b, 0, sizeof(*b));
INIT_LIST_HEAD(&b->children);
@@ -1351,7 +1351,7 @@ struct pci_dev * __devinit pci_scan_device(struct pci_dev *temp)
if (l == 0xffffffff || l == 0x00000000 || l == 0x0000ffff || l == 0xffff0000)
return NULL;
- dev = kmalloc(sizeof(*dev));
+ dev = xmalloc(sizeof(*dev));
if (!dev)
return NULL;
@@ -1363,7 +1363,7 @@ struct pci_dev * __devinit pci_scan_device(struct pci_dev *temp)
set this higher, assuming the system even supports it. */
dev->dma_mask = 0xffffffff;
if (pci_setup_device(dev) < 0) {
- kfree(dev);
+ xfree(dev);
dev = NULL;
}
return dev;
@@ -1431,7 +1431,7 @@ unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
max = bus->secondary;
/* Create a device template */
- dev0 = kmalloc(sizeof(struct pci_dev));
+ dev0 = xmalloc(sizeof(struct pci_dev));
if(!dev0) {
panic("Out of memory scanning PCI bus!\n");
}
@@ -1444,7 +1444,7 @@ unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
dev0->devfn = devfn;
pci_scan_slot(dev0);
}
- kfree(dev0);
+ xfree(dev0);
/*
* After performing arch-dependent fixup of the bus, look behind
diff --git a/xen/drivers/pci/setup-bus.c b/xen/drivers/pci/setup-bus.c
index 4719c08181..0819c99b59 100644
--- a/xen/drivers/pci/setup-bus.c
+++ b/xen/drivers/pci/setup-bus.c
@@ -74,7 +74,7 @@ pbus_assign_resources_sorted(struct pci_bus *bus)
pci_assign_resource(list->dev, idx);
tmp = list;
list = list->next;
- kfree(tmp);
+ xfree(tmp);
}
return found_vga;
diff --git a/xen/drivers/pci/setup-res.c b/xen/drivers/pci/setup-res.c
index 3486f7a1ed..3435b2ac9c 100644
--- a/xen/drivers/pci/setup-res.c
+++ b/xen/drivers/pci/setup-res.c
@@ -171,10 +171,10 @@ pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
ln->res->start;
}
if (r_align > align) {
- tmp = kmalloc(sizeof(*tmp));
+ tmp = xmalloc(sizeof(*tmp));
if (!tmp)
panic("pdev_sort_resources(): "
- "kmalloc() failed!\n");
+ "xmalloc() failed!\n");
tmp->next = ln;
tmp->res = r;
tmp->dev = dev;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index e299f616b0..061c205438 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -9,7 +9,7 @@ extern void arch_final_setup_guestos(
static inline void free_perdomain_pt(struct domain *d)
{
- free_page((unsigned long)d->mm.perdomain_pt);
+ free_xenheap_page((unsigned long)d->mm.perdomain_pt);
}
extern void domain_relinquish_memory(struct domain *d);
diff --git a/xen/include/asm-x86/io.h b/xen/include/asm-x86/io.h
index c88648aa76..2d92fc9234 100644
--- a/xen/include/asm-x86/io.h
+++ b/xen/include/asm-x86/io.h
@@ -12,7 +12,7 @@
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
+ * addresses directly mapped or allocated via xmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 494a301220..9bce81f1eb 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -524,7 +524,7 @@ static inline void set_shadow_status( struct mm_struct *m,
SH_LOG("allocate more shadow hashtable blocks");
// we need to allocate more space
- extra = kmalloc(sizeof(void*) + (shadow_ht_extra_size *
+ extra = xmalloc(sizeof(void*) + (shadow_ht_extra_size *
sizeof(struct shadow_status)));
if( ! extra ) BUG(); // should be more graceful here....
diff --git a/xen/include/asm-x86/types.h b/xen/include/asm-x86/types.h
index 3f1f0a6c6a..9bb1f6ec85 100644
--- a/xen/include/asm-x86/types.h
+++ b/xen/include/asm-x86/types.h
@@ -56,6 +56,6 @@ typedef unsigned long size_t;
typedef unsigned long dma_addr_t;
typedef u64 dma64_addr_t;
-typedef unsigned short kmem_bufctl_t;
+typedef unsigned short xmem_bufctl_t;
#endif
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 0ad445b32b..409e5c4bd1 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -4,12 +4,10 @@
/* page_alloc.c */
void init_page_allocator(unsigned long min, unsigned long max);
-unsigned long __get_free_pages(int order);
-void __free_pages(unsigned long p, int order);
-#define get_free_page() (__get_free_pages(0))
-#define __get_free_page() (__get_free_pages(0))
-#define free_pages(_p,_o) (__free_pages(_p,_o))
-#define free_page(_p) (__free_pages(_p,0))
+unsigned long alloc_xenheap_pages(int order);
+void free_xenheap_pages(unsigned long p, int order);
+#define alloc_xenheap_page() (alloc_xenheap_pages(0))
+#define free_xenheap_page(_p) (free_xenheap_pages(_p,0))
#include <asm/mm.h>
diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h
index 90fce051b1..c81cd0d57d 100644
--- a/xen/include/xen/pci.h
+++ b/xen/include/xen/pci.h
@@ -667,7 +667,7 @@ unsigned int pci_do_scan_bus(struct pci_bus *bus);
struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr);
#if 0
-/* kmem_cache style wrapper around pci_alloc_consistent() */
+/* xmem_cache style wrapper around pci_alloc_consistent() */
struct pci_pool *pci_pool_create (const char *name, struct pci_dev *dev,
size_t size, size_t align, size_t allocation, int flags);
void pci_pool_destroy (struct pci_pool *pool);
diff --git a/xen/include/xen/slab.h b/xen/include/xen/slab.h
index 7df322e12e..81f43e8b47 100644
--- a/xen/include/xen/slab.h
+++ b/xen/include/xen/slab.h
@@ -6,12 +6,12 @@
#ifndef __SLAB_H__
#define __SLAB_H__
-typedef struct kmem_cache_s kmem_cache_t;
+typedef struct xmem_cache_s xmem_cache_t;
#include <xen/mm.h>
#include <xen/cache.h>
-/* Flags to pass to kmem_cache_create(). */
+/* Flags to pass to xmem_cache_create(). */
/* NB. The first 3 are only valid when built with SLAB_DEBUG_SUPPORT. */
#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor */
#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
@@ -24,23 +24,23 @@ typedef struct kmem_cache_s kmem_cache_t;
#define SLAB_CTOR_ATOMIC 0x002UL /* tell cons. it can't sleep */
#define SLAB_CTOR_VERIFY 0x004UL /* tell cons. it's a verify call */
-extern void kmem_cache_init(void);
-extern void kmem_cache_sizes_init(unsigned long);
+extern void xmem_cache_init(void);
+extern void xmem_cache_sizes_init(unsigned long);
-extern kmem_cache_t *kmem_find_general_cachep(size_t);
-extern kmem_cache_t *kmem_cache_create(
+extern xmem_cache_t *xmem_find_general_cachep(size_t);
+extern xmem_cache_t *xmem_cache_create(
const char *, size_t, size_t, unsigned long,
- void (*)(void *, kmem_cache_t *, unsigned long),
- void (*)(void *, kmem_cache_t *, unsigned long));
-extern int kmem_cache_destroy(kmem_cache_t *);
-extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *);
-extern void kmem_cache_free(kmem_cache_t *, void *);
+ void (*)(void *, xmem_cache_t *, unsigned long),
+ void (*)(void *, xmem_cache_t *, unsigned long));
+extern int xmem_cache_destroy(xmem_cache_t *);
+extern int xmem_cache_shrink(xmem_cache_t *);
+extern void *xmem_cache_alloc(xmem_cache_t *);
+extern void xmem_cache_free(xmem_cache_t *, void *);
-extern void *kmalloc(size_t);
-extern void kfree(const void *);
+extern void *xmalloc(size_t);
+extern void xfree(const void *);
-extern int kmem_cache_reap(void);
+extern int xmem_cache_reap(void);
extern void dump_slabinfo();