aboutsummaryrefslogtreecommitdiffstats
path: root/xenolinux-2.4.21-pre4-sparse
diff options
context:
space:
mode:
authorkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>2003-03-10 13:44:34 +0000
committerkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>2003-03-10 13:44:34 +0000
commit696f066a077467c05fe73a9efcb4e7db72f81adc (patch)
tree48a4f9ca5a56413ec8fdc81d50676a52f20aabce /xenolinux-2.4.21-pre4-sparse
parentfe98a95d91fb5a6a45c93c927ddb7499f8d8a8c7 (diff)
downloadxen-696f066a077467c05fe73a9efcb4e7db72f81adc.tar.gz
xen-696f066a077467c05fe73a9efcb4e7db72f81adc.tar.bz2
xen-696f066a077467c05fe73a9efcb4e7db72f81adc.zip
bitkeeper revision 1.119 (3e6c96c2opjGKDx0oCP831RfhP4uDw)
hypervisor.c, network.c: Added locing to page-table update code in Xenolinux. Network driver noew flushed the update queue before pushing new rx buffers.
Diffstat (limited to 'xenolinux-2.4.21-pre4-sparse')
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c5
-rw-r--r--xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c41
2 files changed, 45 insertions, 1 deletions
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c
index 7ef9ce4ef8..7e85bf5abe 100644
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c
+++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/network/network.c
@@ -240,6 +240,11 @@ static void network_alloc_rx_buffers(struct net_device *dev)
np->net_ring->rx_event = RX_RING_INC(np->rx_idx);
+ /*
+ * We may have allocated buffers which have entries outstanding in
+ * the page update queue -- make sure we flush those first!
+ */
+ flush_page_update_queue();
HYPERVISOR_net_update();
}
diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c
index b051684aa2..044c531d35 100644
--- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c
+++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c
@@ -12,6 +12,14 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+/*
+ * This suffices to protect us if we ever move to SMP domains.
+ * Further, it protects us against interrupts. At the very least, this is
+ * required for the network driver which flushes the update queue before
+ * pushing new receive buffers.
+ */
+static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
+
#define QUEUE_SIZE 2048
static page_update_request_t update_queue[QUEUE_SIZE];
unsigned int pt_update_queue_idx = 0;
@@ -79,7 +87,9 @@ unsigned long pt_baseptr;
void _flush_page_update_queue(void)
{
- if ( idx == 0 ) return;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
+ if ( idx == 0 ) goto out;
#if PT_UPDATE_DEBUG > 1
printk("Flushing %d entries from pt update queue\n", idx);
#endif
@@ -88,6 +98,8 @@ void _flush_page_update_queue(void)
#endif
HYPERVISOR_pt_update(update_queue, idx);
idx = 0;
+ out:
+ spin_unlock_irqrestore(&update_lock, flags);
}
static void increment_index(void)
@@ -97,72 +109,99 @@ static void increment_index(void)
void queue_l1_entry_update(unsigned long ptr, unsigned long val)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
#if PT_UPDATE_DEBUG > 0
DEBUG_disallow_pt_read(ptr);
#endif
update_queue[idx].ptr = phys_to_machine(ptr);
update_queue[idx].val = val;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}
void queue_l2_entry_update(unsigned long ptr, unsigned long val)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
update_queue[idx].val = val;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}
void queue_pt_switch(unsigned long ptr)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
update_queue[idx].val = PGEXT_NEW_BASEPTR;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}
void queue_tlb_flush(void)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND;
update_queue[idx].val = PGEXT_TLB_FLUSH;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}
void queue_invlpg(unsigned long ptr)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND;
update_queue[idx].val = ptr & PAGE_MASK;
update_queue[idx].val |= PGEXT_INVLPG;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}
void queue_pgd_pin(unsigned long ptr)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
update_queue[idx].val = PGEXT_PIN_L2_TABLE;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}
void queue_pgd_unpin(unsigned long ptr)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
update_queue[idx].val = PGEXT_UNPIN_TABLE;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}
void queue_pte_pin(unsigned long ptr)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
update_queue[idx].val = PGEXT_PIN_L1_TABLE;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}
void queue_pte_unpin(unsigned long ptr)
{
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
update_queue[idx].ptr = phys_to_machine(ptr);
update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND;
update_queue[idx].val = PGEXT_UNPIN_TABLE;
increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
}