aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-03-25 15:16:57 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-03-25 15:16:57 +0000
commitb47f9dc7a6076d22ec127b931798f5daae658a07 (patch)
tree1c17fa2e7438da09ab36854a8c0f99062902e443
parent96b1a7ea3f0cd281bbd655ff3e648028301c0eec (diff)
downloadxen-b47f9dc7a6076d22ec127b931798f5daae658a07.tar.gz
xen-b47f9dc7a6076d22ec127b931798f5daae658a07.tar.bz2
xen-b47f9dc7a6076d22ec127b931798f5daae658a07.zip
bitkeeper revision 1.825.3.1 (4062f7e9e4Hjc12XFoN-wZ-bm0GL4w)
synch_bitops.h: new file system.h, evtchn.h, evtchn.c, entry.S, console.c: Fix races in event-channel status checks and updates.
-rw-r--r--.rootkeys1
-rw-r--r--xen/arch/i386/entry.S2
-rw-r--r--xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c7
-rw-r--r--xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S12
-rw-r--r--xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c5
-rw-r--r--xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h36
-rw-r--r--xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h83
-rw-r--r--xenolinux-2.4.25-sparse/include/asm-xen/system.h51
8 files changed, 140 insertions, 57 deletions
diff --git a/.rootkeys b/.rootkeys
index e6eaf701f0..2caa01cd04 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -684,6 +684,7 @@
3e5a4e68uJz-xI0IBVMD7xRLQKJDFg xenolinux-2.4.25-sparse/include/asm-xen/segment.h
3e5a4e68Nfdh6QcOKUTGCaYkf2LmYA xenolinux-2.4.25-sparse/include/asm-xen/smp.h
3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ xenolinux-2.4.25-sparse/include/asm-xen/suspend.h
+4062f7e2PzFOUGT0PaE7A0VprTU3JQ xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h
3e5a4e68mTr0zcp9SXDbnd-XLrrfxw xenolinux-2.4.25-sparse/include/asm-xen/system.h
3f1056a9L_kqHcFheV00KbKBzv9j5w xenolinux-2.4.25-sparse/include/asm-xen/vga.h
3f689063nhrIRsMMZjZxMFk7iEINqQ xenolinux-2.4.25-sparse/include/asm-xen/xen_proc.h
diff --git a/xen/arch/i386/entry.S b/xen/arch/i386/entry.S
index af7c7fa6f5..8db1f20074 100644
--- a/xen/arch/i386/entry.S
+++ b/xen/arch/i386/entry.S
@@ -373,7 +373,7 @@ test_all_events:
andl UPCALL_PENDING(%eax),%ecx # ECX = pending & ~mask
andl $1,%ecx # Is bit 0 pending and not masked?
jz restore_all_guest
- orl %ecx,UPCALL_MASK(%eax) # Upcalls are masked during delivery
+ lock btsl $0,UPCALL_MASK(%eax) # Upcalls are masked during delivery
/*process_guest_events:*/
movzwl PROCESSOR(%ebx),%edx
shl $4,%edx # sizeof(guest_trap_bounce) == 16
diff --git a/xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c b/xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c
index c55cd01464..8b76e8ab4e 100644
--- a/xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c
+++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c
@@ -144,12 +144,7 @@ void xen_console_init(void)
register_console(&kcons_info);
- /*
- * XXX This prevents a bogus 'VIRQ_ERROR' when interrupts are enabled
- * for the first time. This works because by this point all important
- * VIRQs (eg. timer) have been properly bound.
- */
- clear_bit(0, &HYPERVISOR_shared_info->evtchn_pending[0]);
+ evtchn_clear_error_virq();
}
diff --git a/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S b/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S
index 5f8dcae2fe..22ec1f1b46 100644
--- a/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S
+++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S
@@ -210,14 +210,14 @@ ENTRY(system_call)
movl %eax,EAX(%esp) # save the return value
ENTRY(ret_from_sys_call)
movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
- btsl $0,evtchn_upcall_mask(%esi) # make tests atomic
+ lock btsl $0,evtchn_upcall_mask(%esi) # make tests atomic
ret_syscall_tests:
cmpl $0,need_resched(%ebx)
jne reschedule
cmpl $0,sigpending(%ebx)
je safesti # ensure need_resched updates are seen
signal_return:
- btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+ lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
movl %esp,%eax
xorl %edx,%edx
call SYMBOL_NAME(do_signal)
@@ -254,7 +254,7 @@ ret_from_exception:
ALIGN
reschedule:
- btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+ lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
call SYMBOL_NAME(schedule) # test
jmp ret_from_sys_call
@@ -317,12 +317,12 @@ ENTRY(hypervisor_callback)
movb CS(%esp),%cl
test $2,%cl # slow return to ring 2 or 3
jne ret_syscall_tests
-safesti:btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
+safesti:lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks
scrit: /**** START OF CRITICAL REGION ****/
testb $1,evtchn_upcall_pending(%esi)
jnz 14f # process more events if necessary...
RESTORE_ALL
-14: btsl $0,evtchn_upcall_mask(%esi)
+14: lock btsl $0,evtchn_upcall_mask(%esi)
jmp 11b
ecrit: /**** END OF CRITICAL REGION ****/
# [How we do the fixup]. We want to merge the current stack frame with the
@@ -364,7 +364,7 @@ critical_fixup_table:
.byte 0x20 # pop %es
.byte 0x24,0x24,0x24 # add $4,%esp
.byte 0x28 # iret
- .byte 0x00,0x00,0x00,0x00,0x00 # btsl $0,4(%esi)
+ .byte 0x00,0x00,0x00,0x00,0x00,0x00 # lock btsl $0,4(%esi)
.byte 0x00,0x00 # jmp 11b
# Hypervisor uses this for application faults while it executes.
diff --git a/xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c b/xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c
index d312bf0d4f..266867fc74 100644
--- a/xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c
+++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c
@@ -14,6 +14,7 @@
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
#include <asm/hypervisor.h>
#include <asm/hypervisor-ifs/event_channel.h>
@@ -84,7 +85,7 @@ static void evtchn_handle_exceptions(shared_info_t *s, struct pt_regs *regs)
{
printk(KERN_ALERT "Error on IRQ line %d!\n",
dynirq + DYNIRQ_BASE);
- clear_bit(port, &s->evtchn_exception[0]);
+ synch_clear_bit(port, &s->evtchn_exception[0]);
}
else
evtchn_device_upcall(port, 1);
@@ -99,7 +100,7 @@ void evtchn_do_upcall(struct pt_regs *regs)
local_irq_save(flags);
- while ( test_and_clear_bit(0, &s->evtchn_upcall_pending) )
+ while ( synch_test_and_clear_bit(0, &s->evtchn_upcall_pending) )
{
if ( s->evtchn_pending_sel != 0 )
evtchn_handle_normal(s, regs);
diff --git a/xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h b/xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h
index 2aea319dd5..fd52b97009 100644
--- a/xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h
+++ b/xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h
@@ -13,6 +13,7 @@
#include <linux/config.h>
#include <asm/hypervisor.h>
#include <asm/ptrace.h>
+#include <asm/synch_bitops.h>
/*
* LOW-LEVEL DEFINITIONS
@@ -27,21 +28,15 @@ void evtchn_device_upcall(int port, int exception);
static inline void mask_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
- set_bit(port, &s->evtchn_mask[0]);
+ synch_set_bit(port, &s->evtchn_mask[0]);
}
-/*
- * I haven't thought too much about the synchronisation in here against
- * other CPUs, but all the bit-update operations are reorder barriers on
- * x86 so reordering concerns aren't a problem for now. Some mb() calls
- * would be required on weaker architectures I think. -- KAF (24/3/2004)
- */
static inline void unmask_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
int need_upcall = 0;
- clear_bit(port, &s->evtchn_mask[0]);
+ synch_clear_bit(port, &s->evtchn_mask[0]);
/*
* The following is basically the equivalent of 'hw_resend_irq'. Just like
@@ -49,34 +44,43 @@ static inline void unmask_evtchn(int port)
*/
/* Asserted a standard notification? */
- if ( test_bit (port, &s->evtchn_pending[0]) &&
- !test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
+ if ( synch_test_bit (port, &s->evtchn_pending[0]) &&
+ !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
need_upcall = 1;
/* Asserted an exceptional notification? */
- if ( test_bit (port, &s->evtchn_exception[0]) &&
- !test_and_set_bit(port>>5, &s->evtchn_exception_sel) )
+ if ( synch_test_bit (port, &s->evtchn_exception[0]) &&
+ !synch_test_and_set_bit(port>>5, &s->evtchn_exception_sel) )
need_upcall = 1;
/* If asserted either type of notification, check the master flags. */
if ( need_upcall &&
- !test_and_set_bit(0, &s->evtchn_upcall_pending) &&
- !test_bit (0, &s->evtchn_upcall_mask) )
+ !synch_test_and_set_bit(0, &s->evtchn_upcall_pending) &&
+ !synch_test_bit (0, &s->evtchn_upcall_mask) )
evtchn_do_upcall(NULL);
}
static inline void clear_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
- clear_bit(port, &s->evtchn_pending[0]);
+ synch_clear_bit(port, &s->evtchn_pending[0]);
}
static inline void clear_evtchn_exception(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
- clear_bit(port, &s->evtchn_exception[0]);
+ synch_clear_bit(port, &s->evtchn_exception[0]);
}
+static inline void evtchn_clear_error_virq(void)
+{
+ /*
+ * XXX This prevents a bogus 'VIRQ_ERROR' when interrupts are enabled
+ * for the first time. This works because by this point all important
+ * VIRQs (eg. timer) have been properly bound.
+ */
+ synch_clear_bit(0, &HYPERVISOR_shared_info->evtchn_pending[0]);
+}
/*
* CHARACTER-DEVICE DEFINITIONS
diff --git a/xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h b/xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h
new file mode 100644
index 0000000000..8093de0ac9
--- /dev/null
+++ b/xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h
@@ -0,0 +1,83 @@
+#ifndef __XEN_SYNCH_BITOPS_H__
+#define __XEN_SYNCH_BITOPS_H__
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ * Heavily modified to provide guaranteed strong synchronisation
+ * when communicating with Xen or other guest OSes running on other CPUs.
+ */
+
+#include <linux/config.h>
+
+#define ADDR (*(volatile long *) addr)
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btsl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btrl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ void synch_change_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__ (
+ "lock btcl %1,%0"
+ : "=m" (ADDR) : "Ir" (nr) : "memory" );
+}
+
+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btsl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "lock btrl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__ (
+ "lock btcl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+ __asm__ __volatile__ (
+ "btl %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
+ return oldbit;
+}
+
+#define synch_test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ synch_const_test_bit((nr),(addr)) : \
+ synch_var_test_bit((nr),(addr)))
+
+#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/xenolinux-2.4.25-sparse/include/asm-xen/system.h b/xenolinux-2.4.25-sparse/include/asm-xen/system.h
index 2c1194a781..8237063f59 100644
--- a/xenolinux-2.4.25-sparse/include/asm-xen/system.h
+++ b/xenolinux-2.4.25-sparse/include/asm-xen/system.h
@@ -4,9 +4,10 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/bitops.h>
+#include <asm/synch_bitops.h>
#include <asm/segment.h>
#include <asm/hypervisor.h>
-#include <linux/bitops.h> /* for LOCK_PREFIX */
#include <asm/evtchn.h>
#ifdef __KERNEL__
@@ -250,19 +251,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long prev;
switch (size) {
case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ __asm__ __volatile__("lock cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ __asm__ __volatile__("lock cmpxchgw %w1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ __asm__ __volatile__("lock cmpxchgl %1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
@@ -320,49 +321,47 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define safe_halt() ((void)0)
+
/*
- * NB. ALl the following routines are SMP-safe on x86, even where they look
- * possibly racy. For example, we must ensure that we clear the mask bit and
- * /then/ check teh pending bit. But this will happen because the bit-update
- * operations are ordering barriers.
- *
- * For this reason also, many uses of 'barrier' here are rather anal. But
- * they do no harm.
+ * Note the use of synch_*_bit() operations in the following. These operations
+ * ensure correct serialisation of checks and updates w.r.t. Xen executing on
+ * a different CPU.
*/
#define __cli() \
do { \
- set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \
- barrier(); \
+ synch_set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \
} while (0)
#define __sti() \
do { \
shared_info_t *_shared = HYPERVISOR_shared_info; \
- clear_bit(0, &_shared->evtchn_upcall_mask); \
- barrier(); \
- if ( unlikely(test_bit(0, &_shared->evtchn_upcall_pending)) ) \
+ synch_clear_bit(0, &_shared->evtchn_upcall_mask); \
+ if ( unlikely(synch_test_bit(0, &_shared->evtchn_upcall_pending)) ) \
evtchn_do_upcall(NULL); \
} while (0)
#define __save_flags(x) \
do { \
- (x) = test_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \
- barrier(); \
+ (x) = synch_test_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \
} while (0)
-#define __restore_flags(x) do { if (x) __cli(); else __sti(); } while (0)
-
-#define safe_halt() ((void)0)
+#define __restore_flags(x) do { if (x) __cli(); else __sti(); } while (0)
-#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0);
-#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0);
+#define __save_and_cli(x) \
+do { \
+ (x) = synch_test_and_set_bit( \
+ 0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \
+} while (0)
-#define local_irq_save(x) \
+#define __save_and_sti(x) \
do { \
- (x) = test_and_set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \
- barrier(); \
+ (x) = synch_test_and_clear_bit( \
+ 0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \
} while (0)
+
+#define local_irq_save(x) __save_and_cli(x)
#define local_irq_restore(x) __restore_flags(x)
#define local_irq_disable() __cli()
#define local_irq_enable() __sti()