From 1f41c8f857594c33942f4bd2b72ec417f77e7ed9 Mon Sep 17 00:00:00 2001 From: "kaf24@labyrinth.cl.cam.ac.uk" Date: Tue, 11 Mar 2003 00:37:28 +0000 Subject: bitkeeper revision 1.122 (3e6d2fc8CULc6D1QN5JO9MAu4FbLdw) system.h, hypervisor.c: Reverted to locking for page table updat esynchronisation. Shouldn't be noticeably slower, and the stronger guarantees may be useful in future. --- .../arch/xeno/mm/hypervisor.c | 182 ++++++++++----------- .../include/asm-xeno/system.h | 4 +- 2 files changed, 86 insertions(+), 100 deletions(-) (limited to 'xenolinux-2.4.21-pre4-sparse') diff --git a/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c b/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c index 674d099d6b..135ed5c066 100644 --- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c +++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/mm/hypervisor.c @@ -13,26 +13,17 @@ #include /* - * A note on atomicity of these operations. We assume that queue_xxx - * operations never occur in an asynchronous (eg. interrupt) context. - * Therefore they do not need to be synchronised w.r.t. each other. - * However, flush_update_queue may be called from an interrupt context - * (eg. this is done in the network driver). - * - * We use lock-free techniques to synchronise on the queue index. If a - * queue_xxx operation finds this index changes while it runs, it will - * fail and retry. - * - * Flush operations must synchronize with themselves. They do this by - * atomically updating the index to zero on entry. This effectively locks - * out any other asynchronous calls to a flush operation. - * - * Debug routines synchronise by disabling interrupts. It's easier that way. + * This suffices to protect us if we ever move to SMP domains. + * Further, it protects us against interrupts. At the very least, this is + * required for the network driver which flushes the update queue before + * pushing new receive buffers. */ +static spinlock_t update_lock = SPIN_LOCK_UNLOCKED; #define QUEUE_SIZE 2048 static page_update_request_t update_queue[QUEUE_SIZE]; -volatile unsigned int pt_update_queue_idx = 0; +unsigned int pt_update_queue_idx = 0; +#define idx pt_update_queue_idx #if PT_UPDATE_DEBUG > 0 page_update_debug_t update_debug_queue[QUEUE_SIZE] = {{0}}; @@ -42,11 +33,7 @@ static void DEBUG_allow_pt_reads(void) { pte_t *pte; page_update_request_t update; - unsigned int idx; - unsigned long flags; int i; - local_irq_save(flags); - idx = pt_update_queue_idx; for ( i = idx-1; i >= 0; i-- ) { pte = update_debug_queue[i].ptep; @@ -56,17 +43,13 @@ static void DEBUG_allow_pt_reads(void) update.val = update_debug_queue[i].pteval; HYPERVISOR_pt_update(&update, 1); } - local_irq_restore(flags); } static void DEBUG_disallow_pt_read(unsigned long pa) { pte_t *pte; pmd_t *pmd; pgd_t *pgd; - unsigned long pteval, flags; - unsigned int idx; - local_irq_save(flags); - idx = pt_update_queue_idx; + unsigned long pteval; /* * We may fault because of an already outstanding update. * That's okay -- it'll get fixed up in the fault handler. @@ -82,7 +65,6 @@ static void DEBUG_disallow_pt_read(unsigned long pa) HYPERVISOR_pt_update(&update, 1); update_debug_queue[idx].ptep = pte; update_debug_queue[idx].pteval = pteval; - local_irq_restore(flags); } #endif @@ -103,10 +85,8 @@ static void DEBUG_disallow_pt_read(unsigned long pa) */ unsigned long pt_baseptr; -void _flush_page_update_queue(void) +static inline void __flush_page_update_queue(void) { - unsigned int idx = xchg(&pt_update_queue_idx, 0); - if ( idx == 0 ) return; #if PT_UPDATE_DEBUG > 1 printk("Flushing %d entries from pt update queue\n", idx); #endif @@ -114,112 +94,118 @@ void _flush_page_update_queue(void) DEBUG_allow_pt_reads(); #endif HYPERVISOR_pt_update(update_queue, idx); + idx = 0; +} + +void _flush_page_update_queue(void) +{ + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + if ( idx != 0 ) __flush_page_update_queue(); + spin_unlock_irqrestore(&update_lock, flags); +} + +static inline void increment_index(void) +{ + idx++; + if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue(); } void queue_l1_entry_update(unsigned long ptr, unsigned long val) { - unsigned int idx; + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); #if PT_UPDATE_DEBUG > 0 DEBUG_disallow_pt_read(ptr); #endif - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].val = val; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + update_queue[idx].ptr = phys_to_machine(ptr); + update_queue[idx].val = val; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } void queue_l2_entry_update(unsigned long ptr, unsigned long val) { - unsigned int idx; - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].val = val; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + update_queue[idx].ptr = phys_to_machine(ptr); + update_queue[idx].val = val; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } void queue_pt_switch(unsigned long ptr) { - unsigned int idx; - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; - update_queue[idx].val = PGEXT_NEW_BASEPTR; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + update_queue[idx].ptr = phys_to_machine(ptr); + update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; + update_queue[idx].val = PGEXT_NEW_BASEPTR; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } void queue_tlb_flush(void) { - unsigned int idx; - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND; - update_queue[idx].val = PGEXT_TLB_FLUSH; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND; + update_queue[idx].val = PGEXT_TLB_FLUSH; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } void queue_invlpg(unsigned long ptr) { - unsigned int idx; - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND; - update_queue[idx].val = ptr & PAGE_MASK; - update_queue[idx].val |= PGEXT_INVLPG; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + update_queue[idx].ptr = PGREQ_EXTENDED_COMMAND; + update_queue[idx].val = ptr & PAGE_MASK; + update_queue[idx].val |= PGEXT_INVLPG; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } void queue_pgd_pin(unsigned long ptr) { - unsigned int idx; - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; - update_queue[idx].val = PGEXT_PIN_L2_TABLE; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + update_queue[idx].ptr = phys_to_machine(ptr); + update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; + update_queue[idx].val = PGEXT_PIN_L2_TABLE; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } void queue_pgd_unpin(unsigned long ptr) { - unsigned int idx; - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; - update_queue[idx].val = PGEXT_UNPIN_TABLE; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + update_queue[idx].ptr = phys_to_machine(ptr); + update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; + update_queue[idx].val = PGEXT_UNPIN_TABLE; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } void queue_pte_pin(unsigned long ptr) { - unsigned int idx; - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; - update_queue[idx].val = PGEXT_PIN_L1_TABLE; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + update_queue[idx].ptr = phys_to_machine(ptr); + update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; + update_queue[idx].val = PGEXT_PIN_L1_TABLE; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } void queue_pte_unpin(unsigned long ptr) { - unsigned int idx; - do { - idx = pt_update_queue_idx; - update_queue[idx].ptr = phys_to_machine(ptr); - update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; - update_queue[idx].val = PGEXT_UNPIN_TABLE; - } while ( cmpxchg(&pt_update_queue_idx, idx, idx+1) != idx ); - if ( idx == (QUEUE_SIZE-1) ) _flush_page_update_queue(); + unsigned long flags; + spin_lock_irqsave(&update_lock, flags); + update_queue[idx].ptr = phys_to_machine(ptr); + update_queue[idx].ptr |= PGREQ_EXTENDED_COMMAND; + update_queue[idx].val = PGEXT_UNPIN_TABLE; + increment_index(); + spin_unlock_irqrestore(&update_lock, flags); } diff --git a/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/system.h b/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/system.h index 05ff47c6f0..e318716a66 100644 --- a/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/system.h +++ b/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/system.h @@ -319,7 +319,7 @@ do { \ shared_info_t *_shared = HYPERVISOR_shared_info; \ _shared->events_enable = (x); \ barrier(); \ - if ( _shared->events && (x) ) do_hypervisor_callback(NULL); \ + if ( unlikely(_shared->events) && (x) ) do_hypervisor_callback(NULL); \ } while (0) #define __cli() (HYPERVISOR_shared_info->events_enable = 0); barrier() #define __sti() \ @@ -327,7 +327,7 @@ do { \ shared_info_t *_shared = HYPERVISOR_shared_info; \ _shared->events_enable = 1; \ barrier(); \ - if ( _shared->events ) do_hypervisor_callback(NULL); \ + if ( unlikely(_shared->events) ) do_hypervisor_callback(NULL); \ } while (0) #define safe_halt() ((void)0) -- cgit v1.2.3