aboutsummaryrefslogtreecommitdiffstats
path: root/xenolinux-2.4.16-sparse
diff options
context:
space:
mode:
authorakw27@boulderdash.cl.cam.ac.uk <akw27@boulderdash.cl.cam.ac.uk>2003-02-06 16:31:29 +0000
committerakw27@boulderdash.cl.cam.ac.uk <akw27@boulderdash.cl.cam.ac.uk>2003-02-06 16:31:29 +0000
commitcb4878ee12aa24be7a1bb8c1132cc8b6e55c0c44 (patch)
treee5245ef099d3aa46a273f5f36ee143cdc5a283da /xenolinux-2.4.16-sparse
parent07b29e865c02423dea3508cabc7ba462fcb5db43 (diff)
downloadxen-cb4878ee12aa24be7a1bb8c1132cc8b6e55c0c44.tar.gz
xen-cb4878ee12aa24be7a1bb8c1132cc8b6e55c0c44.tar.bz2
xen-cb4878ee12aa24be7a1bb8c1132cc8b6e55c0c44.zip
bitkeeper revision 1.22.1.9 (3e428de1nQwf1QUfwsp1nTiJP4ByhQ)
Zero-copy receive path now works over discontiguous memory with no guest-side pool.
Diffstat (limited to 'xenolinux-2.4.16-sparse')
-rw-r--r--xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c65
-rw-r--r--xenolinux-2.4.16-sparse/include/asm-xeno/io.h11
-rw-r--r--xenolinux-2.4.16-sparse/include/linux/skbuff.h5
-rw-r--r--xenolinux-2.4.16-sparse/net/core/skbuff.c37
4 files changed, 86 insertions, 32 deletions
diff --git a/xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c b/xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c
index 9c40ad0066..64f33c10ff 100644
--- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c
+++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/network/network.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/init.h>
+#include <asm/io.h>
#include <net/sock.h>
#define NET_TX_IRQ _EVENT_NET_TX
@@ -48,6 +49,8 @@ static void cleanup_module(void);
static struct list_head dev_list;
+static unsigned int net_countx;
+
/*
* RX RING: RX_IDX <= rx_cons <= rx_prod
* TX RING: TX_IDX <= tx_cons <= tx_prod
@@ -72,7 +75,7 @@ struct net_private
static int network_open(struct net_device *dev)
{
struct net_private *np = dev->priv;
- int error;
+ int error = 0;
char *rxlabel, *txlabel;
// This is inevitably not the right way to allocate a couple of static strings.
@@ -179,13 +182,59 @@ static void network_tx_buf_gc(struct net_device *dev)
spin_unlock_irqrestore(&np->tx_lock, flags);
}
+inline unsigned long get_ppte(unsigned long addr)
+{
+ unsigned long ppte = 0xdeadbeef;
+ pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
+ pgd = pgd_offset_k(addr);
+
+ if (pgd_none(*pgd) || pgd_bad(*pgd)) BUG();
+
+ pmd = pmd_offset(pgd, addr);
+ if (pmd_none(*pmd)) BUG();
+ if (pmd_bad(*pmd)) BUG();
+
+ ptep = pte_offset(pmd, addr);
+ ppte = (unsigned long)phys_to_machine(virt_to_phys(ptep));
+
+ return ppte;
+}
+/*
+static void validate_free_list(void)
+{
+ unsigned long addr, ppfn, mpfn, mpfn2, flags;
+ struct list_head *i;
+ struct net_page_info *np;
+
+ printk(KERN_ALERT "Walking free pages:\n");
+
+ spin_lock_irqsave(&net_page_list_lock, flags);
+
+ list_for_each(i, &net_page_list)
+ {
+ np = list_entry(i, struct net_page_info, list);
+ addr = np->virt_addr;
+ ppfn = virt_to_phys(addr) >> PAGE_SHIFT;
+ mpfn = get_ppte(addr);
+ mpfn2 = phys_to_machine_mapping[ppfn];
+ mpfn = (*(unsigned long *)phys_to_virt(machine_to_phys(mpfn))) >> PAGE_SHIFT;
+ if (mpfn != mpfn2) printk(KERN_ALERT "mpfn %lu != %lu\n", mpfn, mpfn2);
+
+ if (machine_to_phys_mapping[mpfn] != ppfn) printk(KERN_ALERT "ppfn %lu != %lu\n", machine_to_phys_mapping[mpfn], ppfn);
+ }
+
+ spin_unlock_irqrestore(&net_page_list_lock, flags);
+
+}
+*/
static void network_alloc_rx_buffers(struct net_device *dev)
{
unsigned int i;
struct net_private *np = dev->priv;
struct sk_buff *skb;
unsigned int end = RX_RING_ADD(np->rx_idx, RX_MAX_ENTRIES);
+
for ( i = np->net_ring->rx_prod; i != end; i = RX_RING_INC(i) )
{
@@ -194,8 +243,9 @@ static void network_alloc_rx_buffers(struct net_device *dev)
skb->dev = dev;
//skb_reserve(skb, 2); /* word align the IP header */
np->rx_skb_ring[i] = skb;
- np->net_ring->rx_ring[i].addr = (unsigned long)skb->net_page->ppte; //data;
+ np->net_ring->rx_ring[i].addr = get_ppte(skb->head);
np->net_ring->rx_ring[i].size = RX_BUF_SIZE - 16; /* arbitrary */
+//printk(KERN_ALERT "[%p]\n", phys_to_machine(virt_to_phys(skb->page_ptr)));
}
np->net_ring->rx_prod = i;
@@ -273,6 +323,8 @@ static void network_rx_int(int irq, void *dev_id, struct pt_regs *ptregs)
struct net_private *np = dev->priv;
struct sk_buff *skb;
+ /*if (net_countx++ % 100 == 0) validate_free_list();*/
+
again:
for ( i = np->rx_idx; i != np->net_ring->rx_cons; i = RX_RING_INC(i) )
{
@@ -283,7 +335,14 @@ static void network_rx_int(int irq, void *dev_id, struct pt_regs *ptregs)
continue;
}
skb = np->rx_skb_ring[i];
-
+
+//printk(KERN_ALERT "[%u]: ptmm[%lx] old:(%lx) new:(%lx)\n", i , virt_to_phys(skb->head) >> PAGE_SHIFT, phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT], (*(unsigned long *)phys_to_virt(machine_to_phys(np->net_ring->rx_ring[i].addr))) >> PAGE_SHIFT);
+
+ phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] =
+ (*(unsigned long *)phys_to_virt(
+ machine_to_phys(np->net_ring->rx_ring[i].addr))
+ ) >> PAGE_SHIFT;
+
skb_put(skb, np->net_ring->rx_ring[i].size);
skb->protocol = eth_type_trans(skb, dev);
np->stats.rx_packets++;
diff --git a/xenolinux-2.4.16-sparse/include/asm-xeno/io.h b/xenolinux-2.4.16-sparse/include/asm-xeno/io.h
index 0f097342ba..1afc7e4d68 100644
--- a/xenolinux-2.4.16-sparse/include/asm-xeno/io.h
+++ b/xenolinux-2.4.16-sparse/include/asm-xeno/io.h
@@ -78,17 +78,6 @@ static inline void * phys_to_virt(unsigned long address)
* These are equally trivial.
*/
-static inline unsigned long virt_to_mach(volatile void * address)
-{
- return __pa(address) + (unsigned long) start_info.phys_base;
-}
-
-static inline void *mach_to_virt(unsigned long address)
-{
- return __va(address) - (unsigned long) start_info.phys_base;
-}
-
-
/*
* Change "struct page" to physical address.
*/
diff --git a/xenolinux-2.4.16-sparse/include/linux/skbuff.h b/xenolinux-2.4.16-sparse/include/linux/skbuff.h
index 2d0a94dce2..54963090f6 100644
--- a/xenolinux-2.4.16-sparse/include/linux/skbuff.h
+++ b/xenolinux-2.4.16-sparse/include/linux/skbuff.h
@@ -43,7 +43,8 @@
#define SKB_ZERO_COPY 1
#define NUM_NET_PAGES 9 // about 1Meg of buffers. (2^9)
-struct net_page_info {
+
+/*struct net_page_info {
struct list_head list;
unsigned long virt_addr;
unsigned long ppte;
@@ -54,7 +55,7 @@ extern struct net_page_info *net_page_table;
extern struct list_head net_page_list;
extern spinlock_t net_page_list_lock;
extern unsigned int net_pages;
-
+*/
/* End zero copy additions */
#define HAVE_ALLOC_SKB /* For the drivers to know */
diff --git a/xenolinux-2.4.16-sparse/net/core/skbuff.c b/xenolinux-2.4.16-sparse/net/core/skbuff.c
index ec76f00de0..45332f1ffd 100644
--- a/xenolinux-2.4.16-sparse/net/core/skbuff.c
+++ b/xenolinux-2.4.16-sparse/net/core/skbuff.c
@@ -64,12 +64,13 @@
#include <asm/system.h>
/* zc globals: */
+/*
char *net_page_chunk;
struct net_page_info *net_page_table;
struct list_head net_page_list;
spinlock_t net_page_list_lock = SPIN_LOCK_UNLOCKED;
unsigned int net_pages;
-
+*/
int sysctl_hot_list_len = 128;
@@ -229,7 +230,7 @@ nohead:
}
/* begin zc code additions: */
-
+/*
void init_net_pages(unsigned long order_pages)
{
int i;
@@ -248,16 +249,16 @@ void init_net_pages(unsigned long order_pages)
np->virt_addr = (unsigned long)net_page_chunk + (i * PAGE_SIZE);
// now fill the pte pointer:
- np->ppte = 0xdeadbeef;
- pgd = pgd_offset_k(np->virt_addr);
- if (pgd_none(*pgd) || pgd_bad(*pgd)) BUG();
+ //np->ppte = 0xdeadbeef;
+ //pgd = pgd_offset_k(np->virt_addr);
+ //if (pgd_none(*pgd) || pgd_bad(*pgd)) BUG();
- if (pmd_none(*pmd)) BUG();
- if (pmd_bad(*pmd)) BUG();
-
- ptep = pte_offset(pmd, np->virt_addr);
- np->ppte = (unsigned long)virt_to_mach(ptep);
+ //if (pmd_none(*pmd)) BUG();
+ //if (pmd_bad(*pmd)) BUG();
+ //ptep = pte_offset(pmd, np->virt_addr);
+ //np->ppte = phys_to_machine(virt_to_phys(ptep));
+
list_add_tail(&np->list, &net_page_list);
}
net_pages = nr_pages;
@@ -267,6 +268,7 @@ void init_net_pages(unsigned long order_pages)
struct net_page_info *get_net_page(void)
{
+
struct list_head *list_ptr;
struct net_page_info *np;
unsigned long flags;
@@ -301,7 +303,7 @@ void free_net_page(struct net_page_info *np)
spin_unlock_irqrestore(&net_page_list_lock, flags);
}
-
+*/
struct sk_buff *alloc_zc_skb(unsigned int size,int gfp_mask)
{
struct sk_buff *skb;
@@ -332,12 +334,13 @@ struct sk_buff *alloc_zc_skb(unsigned int size,int gfp_mask)
printk("alloc_zc_skb called with unruly size.\n");
size = PAGE_SIZE;
}
- skb->net_page = get_net_page();
+ /*skb->net_page = get_net_page();
if (skb->net_page == NULL)
{
goto nodata;
}
- data = (u8 *)skb->net_page->virt_addr;
+ data = (u8 *)skb->net_page->virt_addr;*/
+ data = (char *)__get_free_page(gfp_mask);
if (data == NULL)
goto nodata;
/* XXX: does not include slab overhead */
@@ -443,7 +446,9 @@ static void skb_release_data(struct sk_buff *skb)
{
kfree(skb->head);
} else {// SKB_ZERO_COPY
- free_net_page(skb->net_page);
+ //free_net_page(skb->net_page);
+//printk(KERN_ALERT "<%p>\n", phys_to_machine(virt_to_phys(skb->head)));
+ free_page((void *)skb->head);
}
}
@@ -559,7 +564,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
C(tc_index);
#endif
C(skb_type);
- C(net_page);
+ //C(net_page);
atomic_inc(&(skb_shinfo(skb)->dataref));
skb->cloned = 1;
#ifdef CONFIG_NETFILTER
@@ -1361,7 +1366,7 @@ void __init skb_init(void)
if (!skbuff_head_cache)
panic("cannot create skbuff cache");
- init_net_pages(NUM_NET_PAGES);
+ //init_net_pages(NUM_NET_PAGES);
for (i=0; i<NR_CPUS; i++)
skb_queue_head_init(&skb_head_pool[i].list);