diff options
author | kaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk> | 2005-08-17 16:53:30 +0000 |
---|---|---|
committer | kaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk> | 2005-08-17 16:53:30 +0000 |
commit | 81cf88dc9c8db10fe5ab4495c917be4933730a2a (patch) | |
tree | 3e4e3c3acc00cfd136fb1eba23adaa3eb0c3f0d5 | |
parent | 88f95d50cfb1332776cebe47a929a889b8d82759 (diff) | |
download | xen-81cf88dc9c8db10fe5ab4495c917be4933730a2a.tar.gz xen-81cf88dc9c8db10fe5ab4495c917be4933730a2a.tar.bz2 xen-81cf88dc9c8db10fe5ab4495c917be4933730a2a.zip |
Fix the skbuff allocator for multi-page buffers.
pci-dma.c still needs fixing to recognise contiguous
multi-page buffers.
Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r-- | linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c | 82 |
1 files changed, 66 insertions, 16 deletions
diff --git a/linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c b/linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c index 90274fd599..a7fe1519f6 100644 --- a/linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c +++ b/linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c @@ -5,8 +5,6 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/string.h> -#include <linux/errno.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/etherdevice.h> @@ -14,34 +12,86 @@ #include <linux/init.h> #include <asm/io.h> #include <asm/page.h> - -EXPORT_SYMBOL(__dev_alloc_skb); +#include <asm-xen/hypervisor.h> /* Referenced in netback.c. */ /*static*/ kmem_cache_t *skbuff_cachep; -/* Size must be cacheline-aligned (alloc_skb uses SKB_DATA_ALIGN). */ -#define XEN_SKB_SIZE \ - ((PAGE_SIZE - sizeof(struct skb_shared_info)) & ~(SMP_CACHE_BYTES - 1)) +#define MAX_SKBUFF_ORDER 2 +static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1]; struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask) { - struct sk_buff *skb; - skb = alloc_skb_from_cache(skbuff_cachep, length + 16, gfp_mask); - if ( likely(skb != NULL) ) - skb_reserve(skb, 16); - return skb; + struct sk_buff *skb; + int order; + + length = SKB_DATA_ALIGN(length + 16); + order = get_order(length + sizeof(struct skb_shared_info)); + if (order > MAX_SKBUFF_ORDER) { + printk(KERN_ALERT "Attempt to allocate order %d skbuff. " + "Increase MAX_SKBUFF_ORDER.\n", order); + return NULL; + } + + skb = alloc_skb_from_cache( + skbuff_order_cachep[order], length, gfp_mask); + if (skb != NULL) + skb_reserve(skb, 16); + + return skb; } static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused) { - scrub_pages(buf, 1); + int order = 0; + + while (skbuff_order_cachep[order] != cachep) + order++; + + if (order != 0) + xen_create_contiguous_region((unsigned long)buf, order); + + scrub_pages(buf, 1 << order); +} + +static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused) +{ + int order = 0; + + while (skbuff_order_cachep[order] != cachep) + order++; + + if (order != 0) + xen_destroy_contiguous_region((unsigned long)buf, order); } static int __init skbuff_init(void) { - skbuff_cachep = kmem_cache_create( - "xen-skb", PAGE_SIZE, PAGE_SIZE, 0, skbuff_ctor, NULL); - return 0; + static char name[MAX_SKBUFF_ORDER + 1][20]; + unsigned long size; + int order; + + for (order = 0; order <= MAX_SKBUFF_ORDER; order++) { + size = PAGE_SIZE << order; + sprintf(name[order], "xen-skb-%lu", size); + skbuff_order_cachep[order] = kmem_cache_create( + name[order], size, size, 0, skbuff_ctor, skbuff_dtor); + } + + skbuff_cachep = skbuff_order_cachep[0]; + + return 0; } __initcall(skbuff_init); + +EXPORT_SYMBOL(__dev_alloc_skb); + +/* + * Local variables: + * c-file-style: "linux" + * indent-tabs-mode: t + * c-indent-level: 8 + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ |