aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/x86_64
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-03-04 10:20:57 +0100
committerJan Beulich <jbeulich@suse.com>2013-03-04 10:20:57 +0100
commitda46114c1beccf3c5745ce4dbebe6d44bfbd8132 (patch)
tree73b9c476fe178a70c15929c16fe81f86c193ab59 /xen/arch/x86/x86_64
parentd463b005bbd6475ed930a302821efe239e1b2cf9 (diff)
downloadxen-da46114c1beccf3c5745ce4dbebe6d44bfbd8132.tar.gz
xen-da46114c1beccf3c5745ce4dbebe6d44bfbd8132.tar.bz2
xen-da46114c1beccf3c5745ce4dbebe6d44bfbd8132.zip
x86: don't rely on __softirq_pending to be the first field in irq_cpustat_t
This is even more so as the field doesn't have a comment to that effect in the structure definition. Once modifying the respective assembly code, also convert the IRQSTAT_shift users to do a 32-bit shift only (as we won't support 48M CPUs any time soon) and use "cmpl" instead of "testl" when checking the field (both reducing code size). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/x86_64')
-rw-r--r--xen/arch/x86/x86_64/asm-offsets.c1
-rw-r--r--xen/arch/x86/x86_64/compat/entry.S8
-rw-r--r--xen/arch/x86/x86_64/entry.S6
3 files changed, 8 insertions, 7 deletions
diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
index 6dc832c01c..b0098b35a4 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -156,6 +156,7 @@ void __dummy__(void)
#endif
DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
+ OFFSET(IRQSTAT_softirq_pending, irq_cpustat_t, __softirq_pending);
BLANK();
OFFSET(CPUINFO86_ext_features, struct cpuinfo_x86, x86_capability[1]);
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
index 7769019e27..c0afe2cca5 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -96,10 +96,10 @@ ENTRY(compat_test_all_events)
cli # tests must not race interrupts
/*compat_test_softirqs:*/
movl VCPU_processor(%rbx),%eax
- shlq $IRQSTAT_shift,%rax
- leaq irq_stat(%rip),%rcx
- testl $~0,(%rcx,%rax,1)
- jnz compat_process_softirqs
+ shll $IRQSTAT_shift,%eax
+ leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
+ cmpl $0,(%rcx,%rax,1)
+ jne compat_process_softirqs
testb $1,VCPU_mce_pending(%rbx)
jnz compat_process_mce
.Lcompat_test_guest_nmi:
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 82d9881945..03e352bda3 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -195,8 +195,8 @@ test_all_events:
cli # tests must not race interrupts
/*test_softirqs:*/
movl VCPU_processor(%rbx),%eax
- shl $IRQSTAT_shift,%rax
- leaq irq_stat(%rip),%rcx
+ shll $IRQSTAT_shift,%eax
+ leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
cmpl $0,(%rcx,%rax,1)
jne process_softirqs
testb $1,VCPU_mce_pending(%rbx)
@@ -643,7 +643,7 @@ handle_ist_exception:
/* Send an IPI to ourselves to cover for the lack of event checking. */
movl VCPU_processor(%rbx),%eax
shll $IRQSTAT_shift,%eax
- leaq irq_stat(%rip),%rcx
+ leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
cmpl $0,(%rcx,%rax,1)
je 1f
movl $EVENT_CHECK_VECTOR,%edi