aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-03-29 16:52:40 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-03-29 16:52:40 +0100
commit90bc7dea0e4996f8bfe2952d53943182ee75eef3 (patch)
tree7915f5ab031ceadb00718878558047136d45581f
parent56ada11259ad745d39fdab4ded0403ba13fc0688 (diff)
downloadxen-90bc7dea0e4996f8bfe2952d53943182ee75eef3.tar.gz
xen-90bc7dea0e4996f8bfe2952d53943182ee75eef3.tar.bz2
xen-90bc7dea0e4996f8bfe2952d53943182ee75eef3.zip
xen: Get rid of some VCPUF_* flags and move into their own byte fields.
Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/ia64/vmx/vlsapic.c4
-rw-r--r--xen/arch/ia64/vmx/vmmu.c2
-rw-r--r--xen/arch/ia64/xen/domain.c16
-rw-r--r--xen/arch/ia64/xen/hypercall.c8
-rw-r--r--xen/arch/ia64/xen/vhpt.c10
-rw-r--r--xen/arch/powerpc/domain.c11
-rw-r--r--xen/arch/powerpc/domain_build.c2
-rw-r--r--xen/arch/x86/domain.c8
-rw-r--r--xen/arch/x86/domain_build.c4
-rw-r--r--xen/arch/x86/domctl.c2
-rw-r--r--xen/arch/x86/hvm/hvm.c10
-rw-r--r--xen/arch/x86/hvm/vlapic.c4
-rw-r--r--xen/arch/x86/i387.c4
-rw-r--r--xen/arch/x86/mm.c2
-rw-r--r--xen/arch/x86/mm/hap/hap.c3
-rw-r--r--xen/arch/x86/mm/shadow/multi.c2
-rw-r--r--xen/arch/x86/traps.c2
-rw-r--r--xen/common/compat/domain.c2
-rw-r--r--xen/common/domain.c14
-rw-r--r--xen/common/domctl.c6
-rw-r--r--xen/common/event_channel.c12
-rw-r--r--xen/common/keyhandler.c2
-rw-r--r--xen/common/sched_credit.c3
-rw-r--r--xen/common/sched_sedf.c2
-rw-r--r--xen/common/schedule.c42
-rw-r--r--xen/include/asm-ia64/event.h4
-rw-r--r--xen/include/asm-powerpc/event.h6
-rw-r--r--xen/include/asm-x86/event.h4
-rw-r--r--xen/include/asm-x86/i387.h11
-rw-r--r--xen/include/xen/sched.h40
30 files changed, 130 insertions, 112 deletions
diff --git a/xen/arch/ia64/vmx/vlsapic.c b/xen/arch/ia64/vmx/vlsapic.c
index a6d0a64c8a..e64a08f6a8 100644
--- a/xen/arch/ia64/vmx/vlsapic.c
+++ b/xen/arch/ia64/vmx/vlsapic.c
@@ -692,7 +692,7 @@ static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
if (targ == NULL)
panic_domain(NULL, "Unknown IPI cpu\n");
- if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags) ||
+ if (!targ->is_initialised ||
test_bit(_VCPUF_down, &targ->vcpu_flags)) {
struct pt_regs *targ_regs = vcpu_regs(targ);
@@ -717,7 +717,7 @@ static void vlsapic_write_ipi(VCPU *vcpu, uint64_t addr, uint64_t value)
printk("arch_boot_vcpu: huh, already awake!");
}
} else {
- int running = test_bit(_VCPUF_running, &targ->vcpu_flags);
+ int running = targ->is_running;
vlsapic_deliver_ipi(targ, ((ipi_d_t)value).dm,
((ipi_d_t)value).vector);
vcpu_unblock(targ);
diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c
index ee456c030e..b7ea24dd46 100644
--- a/xen/arch/ia64/vmx/vmmu.c
+++ b/xen/arch/ia64/vmx/vmmu.c
@@ -598,7 +598,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
vcpu_get_rr(vcpu, va, &args.rid);
args.ps = ps;
for_each_vcpu (d, v) {
- if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
+ if (!v->is_initialised)
continue;
args.vcpu = v;
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
index 9520373825..d7d3e67aa6 100644
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -657,7 +657,7 @@ int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
v->arch.iva = er->iva;
}
- if (test_bit(_VCPUF_initialised, &v->vcpu_flags))
+ if (v->is_initialised)
return 0;
if (d->arch.is_vti) {
@@ -676,10 +676,12 @@ int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
/* This overrides some registers. */
vcpu_init_regs(v);
- /* Don't redo final setup. Auto-online VCPU0. */
- if (!test_and_set_bit(_VCPUF_initialised, &v->vcpu_flags) &&
- (v->vcpu_id == 0))
- clear_bit(_VCPUF_down, &v->vcpu_flags);
+ if (!v->is_initialised) {
+ v->is_initialised = 1;
+ /* Auto-online VCPU0 when it is initialised. */
+ if (v->vcpu_id == 0)
+ clear_bit(_VCPUF_down, &v->vcpu_flags);
+ }
return 0;
}
@@ -1067,7 +1069,7 @@ int construct_dom0(struct domain *d,
/* Sanity! */
BUG_ON(d != dom0);
BUG_ON(d->vcpu[0] == NULL);
- BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
+ BUG_ON(v->is_initialised);
printk("*** LOADING DOMAIN 0 ***\n");
@@ -1188,7 +1190,7 @@ int construct_dom0(struct domain *d,
printk("Dom0: 0x%lx\n", (u64)dom0);
- set_bit(_VCPUF_initialised, &v->vcpu_flags);
+ v->is_initialised = 1;
clear_bit(_VCPUF_down, &v->vcpu_flags);
/* Build firmware.
diff --git a/xen/arch/ia64/xen/hypercall.c b/xen/arch/ia64/xen/hypercall.c
index 98a3165d06..d3630ded83 100644
--- a/xen/arch/ia64/xen/hypercall.c
+++ b/xen/arch/ia64/xen/hypercall.c
@@ -81,11 +81,11 @@ fw_hypercall_ipi (struct pt_regs *regs)
return;
if (vector == XEN_SAL_BOOT_RENDEZ_VEC
- && (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)
+ && (!targ->is_initialised
|| test_bit(_VCPUF_down, &targ->vcpu_flags))) {
/* First start: initialize vpcu. */
- if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) {
+ if (!targ->is_initialised) {
struct vcpu_guest_context c;
memset (&c, 0, sizeof (c));
@@ -112,9 +112,7 @@ fw_hypercall_ipi (struct pt_regs *regs)
printk ("arch_boot_vcpu: huu, already awaken!\n");
}
else {
- int running = test_bit(_VCPUF_running,
- &targ->vcpu_flags);
-
+ int running = targ->is_running;
vcpu_pend_interrupt(targ, vector);
vcpu_unblock(targ);
if (running)
diff --git a/xen/arch/ia64/xen/vhpt.c b/xen/arch/ia64/xen/vhpt.c
index bbb5d6f751..07d1810ee8 100644
--- a/xen/arch/ia64/xen/vhpt.c
+++ b/xen/arch/ia64/xen/vhpt.c
@@ -184,7 +184,7 @@ domain_purge_swtc_entries(struct domain *d)
{
struct vcpu* v;
for_each_vcpu(d, v) {
- if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
+ if (!v->is_initialised)
continue;
/* Purge TC entries.
@@ -202,7 +202,7 @@ domain_purge_swtc_entries_vcpu_dirty_mask(struct domain* d,
for_each_vcpu_mask(vcpu, vcpu_dirty_mask) {
struct vcpu* v = d->vcpu[vcpu];
- if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
+ if (!v->is_initialised)
continue;
/* Purge TC entries.
@@ -263,7 +263,7 @@ void domain_flush_vtlb_all(struct domain* d)
struct vcpu *v;
for_each_vcpu(d, v) {
- if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
+ if (!v->is_initialised)
continue;
if (v->processor == cpu)
@@ -341,7 +341,7 @@ void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
smp_mb();
for_each_vcpu (d, v) {
- if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
+ if (!v->is_initialised)
continue;
if (HAS_PERVCPU_VHPT(d)) {
@@ -407,7 +407,7 @@ __domain_flush_vtlb_track_entry(struct domain* d,
if (HAS_PERVCPU_VHPT(d)) {
for_each_vcpu_mask(vcpu, entry->vcpu_dirty_mask) {
v = d->vcpu[vcpu];
- if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
+ if (!v->is_initialised)
continue;
/* Invalidate VHPT entries. */
diff --git a/xen/arch/powerpc/domain.c b/xen/arch/powerpc/domain.c
index 1ecf35a666..e8e3c7805f 100644
--- a/xen/arch/powerpc/domain.c
+++ b/xen/arch/powerpc/domain.c
@@ -168,10 +168,13 @@ int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
d->shared_info->wc_nsec = dom0->shared_info->wc_nsec;
d->shared_info->arch.boot_timebase = dom0->shared_info->arch.boot_timebase;
- /* Auto-online VCPU0 when it is initialised. */
- if ( !test_and_set_bit(_VCPUF_initialised, &v->vcpu_flags) &&
- (v->vcpu_id == 0) )
- clear_bit(_VCPUF_down, &v->vcpu_flags);
+ if ( !v->is_initialised )
+ {
+ v->is_initialised = 1;
+ /* Auto-online VCPU0 when it is initialised. */
+ if ( v->vcpu_id == 0 )
+ clear_bit(_VCPUF_down, &v->vcpu_flags);
+ }
cpu_init_vcpu(v);
diff --git a/xen/arch/powerpc/domain_build.c b/xen/arch/powerpc/domain_build.c
index af7ad3db45..e8ee2b01dd 100644
--- a/xen/arch/powerpc/domain_build.c
+++ b/xen/arch/powerpc/domain_build.c
@@ -273,7 +273,7 @@ int construct_dom0(struct domain *d,
ofd_dom0_fixup(d, *ofh_tree + rma, cmdline, shared_info_addr);
- set_bit(_VCPUF_initialised, &v->vcpu_flags);
+ v->is_initialised = 1;
clear_bit(_VCPUF_down, &v->vcpu_flags);
rc = 0;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 005813306c..8da2287a75 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -563,9 +563,7 @@ int arch_set_info_guest(
#endif
}
- clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
- if ( flags & VGCF_I387_VALID )
- set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
+ v->fpu_initialised = !!(flags & VGCF_I387_VALID);
v->arch.flags &= ~TF_kernel_mode;
if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ )
@@ -600,7 +598,7 @@ int arch_set_info_guest(
hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs);
}
- if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( v->is_initialised )
goto out;
memset(v->arch.guest_context.debugreg, 0,
@@ -699,7 +697,7 @@ int arch_set_info_guest(
update_domain_wallclock_time(d);
/* Don't redo final setup */
- set_bit(_VCPUF_initialised, &v->vcpu_flags);
+ v->is_initialised = 1;
if ( paging_mode_enabled(d) )
paging_update_paging_modes(v);
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index a05060ee5e..6cecf6b56f 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -254,7 +254,7 @@ int construct_dom0(struct domain *d,
/* Sanity! */
BUG_ON(d->domain_id != 0);
BUG_ON(d->vcpu[0] == NULL);
- BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
+ BUG_ON(v->is_initialised);
printk("*** LOADING DOMAIN 0 ***\n");
@@ -901,7 +901,7 @@ int construct_dom0(struct domain *d,
update_domain_wallclock_time(d);
- set_bit(_VCPUF_initialised, &v->vcpu_flags);
+ v->is_initialised = 1;
clear_bit(_VCPUF_down, &v->vcpu_flags);
/*
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 503b0000c7..9cf3825cd1 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -448,7 +448,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
#endif
c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
- if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
+ if ( v->fpu_initialised )
c(flags |= VGCF_i387_valid);
if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
c(flags |= VGCF_online);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2c783bfe7a..632e993fc1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -85,7 +85,7 @@ void hvm_disable(void)
void hvm_stts(struct vcpu *v)
{
/* FPU state already dirty? Then no need to setup_fpu() lazily. */
- if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
+ if ( !v->fpu_dirtied )
hvm_funcs.stts(v);
}
@@ -332,10 +332,10 @@ void hvm_vcpu_reset(struct vcpu *v)
hvm_funcs.vcpu_initialise(v);
set_bit(_VCPUF_down, &v->vcpu_flags);
- clear_bit(_VCPUF_initialised, &v->vcpu_flags);
- clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
- clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
clear_bit(_VCPUF_blocked, &v->vcpu_flags);
+ v->fpu_initialised = 0;
+ v->fpu_dirtied = 0;
+ v->is_initialised = 0;
vcpu_unpause(v);
}
@@ -722,7 +722,7 @@ int hvm_bringup_ap(int vcpuid, int trampoline_vector)
LOCK_BIGLOCK(d);
rc = -EEXIST;
- if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( !v->is_initialised )
rc = boot_vcpu(d, vcpuid, ctxt);
UNLOCK_BIGLOCK(d);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index af2d0bbe44..e80ec083b2 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -303,7 +303,7 @@ static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
if ( trig_mode && !(level & APIC_INT_ASSERT) )
break;
/* FIXME How to check the situation after vcpu reset? */
- if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( v->is_initialised )
hvm_vcpu_reset(v);
v->arch.hvm_vcpu.init_sipi_sipi_state =
HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
@@ -318,7 +318,7 @@ static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
v->arch.hvm_vcpu.init_sipi_sipi_state =
HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM;
- if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( v->is_initialised )
{
gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id);
goto exit_and_crash;
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 899f80da3b..f376a67754 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -21,7 +21,7 @@ void init_fpu(void)
__asm__ __volatile__ ( "fninit" );
if ( cpu_has_xmm )
load_mxcsr(0x1f80);
- set_bit(_VCPUF_fpu_initialised, &current->vcpu_flags);
+ current->fpu_initialised = 1;
}
void save_init_fpu(struct vcpu *v)
@@ -76,7 +76,7 @@ void save_init_fpu(struct vcpu *v)
: "=m" (*fpu_ctxt) );
}
- clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
+ v->fpu_dirtied = 0;
write_cr0(cr0|X86_CR0_TS);
}
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d6ca78726a..b559b1f567 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1089,7 +1089,7 @@ static int alloc_l3_table(struct page_info *page)
*/
if ( (pfn >= 0x100000) &&
unlikely(!VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3)) &&
- d->vcpu[0] && test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
+ d->vcpu[0] && d->vcpu[0]->is_initialised )
{
MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn);
return 0;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 47139f2627..ac4df6ec70 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -569,7 +569,8 @@ void hap_update_cr3(struct vcpu *v, int do_locking)
HERE_I_AM;
/* Don't do anything on an uninitialised vcpu */
- if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) ) {
+ if ( !is_hvm_domain(d) && !v->is_initialised )
+ {
ASSERT(v->arch.cr3 == 0);
return;
}
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 7945913429..b1419a7b27 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3427,7 +3427,7 @@ sh_update_cr3(struct vcpu *v, int do_locking)
#endif
/* Don't do anything on an uninitialised vcpu */
- if ( !is_hvm_domain(d) && !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( !is_hvm_domain(d) && !v->is_initialised )
{
ASSERT(v->arch.cr3 == 0);
return;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index c19cc491e6..67e8ca0b6d 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1030,7 +1030,7 @@ long do_fpu_taskswitch(int set)
else
{
v->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS;
- if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
+ if ( v->fpu_dirtied )
clts();
}
diff --git a/xen/common/compat/domain.c b/xen/common/compat/domain.c
index 12929122f6..a7f2d8ff43 100644
--- a/xen/common/compat/domain.c
+++ b/xen/common/compat/domain.c
@@ -44,7 +44,7 @@ int compat_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
LOCK_BIGLOCK(d);
rc = -EEXIST;
- if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( !v->is_initialised )
rc = boot_vcpu(d, vcpuid, cmp_ctxt);
UNLOCK_BIGLOCK(d);
diff --git a/xen/common/domain.c b/xen/common/domain.c
index f9e43a04ce..1b3e68bd0e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -484,7 +484,7 @@ int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
{
struct vcpu *v = d->vcpu[vcpuid];
- BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
+ BUG_ON(v->is_initialised);
return arch_set_info_guest(v, ctxt);
}
@@ -503,13 +503,13 @@ int vcpu_reset(struct vcpu *v)
set_bit(_VCPUF_down, &v->vcpu_flags);
- clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
- clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
+ v->fpu_initialised = 0;
+ v->fpu_dirtied = 0;
+ v->is_polling = 0;
+ v->is_initialised = 0;
clear_bit(_VCPUF_blocked, &v->vcpu_flags);
- clear_bit(_VCPUF_initialised, &v->vcpu_flags);
clear_bit(_VCPUF_nmi_pending, &v->vcpu_flags);
clear_bit(_VCPUF_nmi_masked, &v->vcpu_flags);
- clear_bit(_VCPUF_polling, &v->vcpu_flags);
out:
UNLOCK_BIGLOCK(v->domain);
@@ -546,7 +546,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
LOCK_BIGLOCK(d);
rc = -EEXIST;
- if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( !v->is_initialised )
rc = boot_vcpu(d, vcpuid, ctxt);
UNLOCK_BIGLOCK(d);
@@ -554,7 +554,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
break;
case VCPUOP_up:
- if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( !v->is_initialised )
return -EINVAL;
if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 0f0c5998da..5d7e5d7f90 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -105,7 +105,7 @@ void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
{
if ( !(v->vcpu_flags & VCPUF_blocked) )
flags &= ~XEN_DOMINF_blocked;
- if ( v->vcpu_flags & VCPUF_running )
+ if ( v->is_running )
flags |= XEN_DOMINF_running;
info->nr_online_vcpus++;
}
@@ -517,7 +517,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
goto getvcpucontext_out;
ret = -ENODATA;
- if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ if ( !v->is_initialised )
goto getvcpucontext_out;
#ifdef CONFIG_COMPAT
@@ -576,7 +576,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags);
op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags);
- op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ op->u.getvcpuinfo.running = v->is_running;
op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
op->u.getvcpuinfo.cpu = v->processor;
ret = 0;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 783dfb99f1..0db68de41a 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -529,11 +529,17 @@ void evtchn_set_pending(struct vcpu *v, int port)
}
/* Check if some VCPU might be polling for this event. */
- if ( unlikely(d->is_polling) && likely(xchg(&d->is_polling, 0)) )
+ if ( unlikely(d->is_polling) )
{
+ d->is_polling = 0;
+ smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */
for_each_vcpu ( d, v )
- if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) )
- vcpu_unblock(v);
+ {
+ if ( !v->is_polling )
+ continue;
+ v->is_polling = 0;
+ vcpu_unblock(v);
+ }
}
}
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index e3eb0b9cee..838c1222a4 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -188,7 +188,7 @@ static void dump_domains(unsigned char key)
printk(" VCPU%d: CPU%d [has=%c] flags=%lx "
"upcall_pend = %02x, upcall_mask = %02x ",
v->vcpu_id, v->processor,
- test_bit(_VCPUF_running, &v->vcpu_flags) ? 'T':'F',
+ v->is_running ? 'T':'F',
v->vcpu_flags,
vcpu_info(v, evtchn_upcall_pending),
vcpu_info(v, evtchn_upcall_mask));
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 6bbbb7ae5b..6bc79cf0a4 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -411,8 +411,7 @@ __csched_vcpu_is_migrateable(struct vcpu *vc, int dest_cpu)
* Don't pick up work that's in the peer's scheduling tail. Also only pick
* up work that's allowed to run on our CPU.
*/
- return !test_bit(_VCPUF_running, &vc->vcpu_flags) &&
- cpu_isset(dest_cpu, vc->cpu_affinity);
+ return !vc->is_running && cpu_isset(dest_cpu, vc->cpu_affinity);
}
static int
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 8f79dd23b4..8a1223a03b 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -1189,7 +1189,7 @@ void sedf_wake(struct vcpu *d)
static void sedf_dump_domain(struct vcpu *d)
{
printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id,
- test_bit(_VCPUF_running, &d->vcpu_flags) ? 'T':'F');
+ d->is_running ? 'T':'F');
printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu"
" sc=%i xtr(%s)=%"PRIu64" ew=%hu",
EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs,
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 6fc968126e..93c15d151a 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -123,7 +123,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
{
per_cpu(schedule_data, v->processor).curr = v;
per_cpu(schedule_data, v->processor).idle = v;
- set_bit(_VCPUF_running, &v->vcpu_flags);
+ v->is_running = 1;
}
TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id);
@@ -172,7 +172,7 @@ void vcpu_sleep_sync(struct vcpu *v)
{
vcpu_sleep_nosync(v);
- while ( !vcpu_runnable(v) && test_bit(_VCPUF_running, &v->vcpu_flags) )
+ while ( !vcpu_runnable(v) && v->is_running )
cpu_relax();
sync_vcpu_execstate(v);
@@ -208,7 +208,12 @@ static void vcpu_migrate(struct vcpu *v)
vcpu_schedule_lock_irqsave(v, flags);
- if ( test_bit(_VCPUF_running, &v->vcpu_flags) ||
+ /*
+ * NB. Check of v->running happens /after/ setting migration flag
+ * because they both happen in (different) spinlock regions, and those
+ * regions are strictly serialised.
+ */
+ if ( v->is_running ||
!test_and_clear_bit(_VCPUF_migrating, &v->vcpu_flags) )
{
vcpu_schedule_unlock_irqrestore(v, flags);
@@ -234,7 +239,7 @@ static void vcpu_migrate(struct vcpu *v)
void vcpu_force_reschedule(struct vcpu *v)
{
vcpu_schedule_lock_irq(v);
- if ( test_bit(_VCPUF_running, &v->vcpu_flags) )
+ if ( v->is_running )
set_bit(_VCPUF_migrating, &v->vcpu_flags);
vcpu_schedule_unlock_irq(v);
@@ -310,14 +315,13 @@ static long do_poll(struct sched_poll *sched_poll)
if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
return -EFAULT;
- /* These operations must occur in order. */
set_bit(_VCPUF_blocked, &v->vcpu_flags);
- set_bit(_VCPUF_polling, &v->vcpu_flags);
- smp_wmb();
+ v->is_polling = 1;
d->is_polling = 1;
- smp_wmb();
/* Check for events /after/ setting flags: avoids wakeup waiting race. */
+ smp_wmb();
+
for ( i = 0; i < sched_poll->nr_ports; i++ )
{
rc = -EFAULT;
@@ -342,7 +346,7 @@ static long do_poll(struct sched_poll *sched_poll)
return 0;
out:
- clear_bit(_VCPUF_polling, &v->vcpu_flags);
+ v->is_polling = 0;
clear_bit(_VCPUF_blocked, &v->vcpu_flags);
return rc;
}
@@ -651,8 +655,8 @@ static void schedule(void)
ASSERT(next->runstate.state != RUNSTATE_running);
vcpu_runstate_change(next, RUNSTATE_running, now);
- ASSERT(!test_bit(_VCPUF_running, &next->vcpu_flags));
- set_bit(_VCPUF_running, &next->vcpu_flags);
+ ASSERT(!next->is_running);
+ next->is_running = 1;
spin_unlock_irq(&sd->schedule_lock);
@@ -673,7 +677,13 @@ static void schedule(void)
void context_saved(struct vcpu *prev)
{
- clear_bit(_VCPUF_running, &prev->vcpu_flags);
+ /* Clear running flag /after/ writing context to memory. */
+ smp_wmb();
+
+ prev->is_running = 0;
+
+ /* Check for migration request /after/ clearing running flag. */
+ smp_mb();
if ( unlikely(test_bit(_VCPUF_migrating, &prev->vcpu_flags)) )
vcpu_migrate(prev);
@@ -704,8 +714,12 @@ static void vcpu_singleshot_timer_fn(void *data)
static void poll_timer_fn(void *data)
{
struct vcpu *v = data;
- if ( test_and_clear_bit(_VCPUF_polling, &v->vcpu_flags) )
- vcpu_unblock(v);
+
+ if ( !v->is_polling )
+ return;
+
+ v->is_polling = 0;
+ vcpu_unblock(v);
}
/* Initialise the data structures. */
diff --git a/xen/include/asm-ia64/event.h b/xen/include/asm-ia64/event.h
index c3d67cfa2c..92cdddac79 100644
--- a/xen/include/asm-ia64/event.h
+++ b/xen/include/asm-ia64/event.h
@@ -20,10 +20,10 @@ static inline void vcpu_kick(struct vcpu *v)
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
*
- * NB2. We save VCPUF_running across the unblock to avoid a needless
+ * NB2. We save the running flag across the unblock to avoid a needless
* IPI for domains that we IPI'd to unblock.
*/
- int running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ int running = v->is_running;
vcpu_unblock(v);
if ( running )
smp_send_event_check_cpu(v->processor);
diff --git a/xen/include/asm-powerpc/event.h b/xen/include/asm-powerpc/event.h
index 1dd4e5b3c3..9f10ee98ba 100644
--- a/xen/include/asm-powerpc/event.h
+++ b/xen/include/asm-powerpc/event.h
@@ -27,7 +27,7 @@
static inline void evtchn_notify(struct vcpu *v)
{
#ifdef XXX_NO_SMP_YET
- int running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ int running = v->is_running;
vcpu_unblock(v);
if (running)
smp_send_event_check_cpu(v->processor);
@@ -73,10 +73,10 @@ static inline void vcpu_kick(struct vcpu *v)
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
*
- * NB2. We save VCPUF_running across the unblock to avoid a needless
+ * NB2. We save the running flag across the unblock to avoid a needless
* IPI for domains that we IPI'd to unblock.
*/
- int running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ int running = v->is_running;
vcpu_unblock(v);
if (running)
smp_send_event_check_cpu(v->processor);
diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h
index 86f6653f83..922ede6fc6 100644
--- a/xen/include/asm-x86/event.h
+++ b/xen/include/asm-x86/event.h
@@ -20,10 +20,10 @@ static inline void vcpu_kick(struct vcpu *v)
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
*
- * NB2. We save VCPUF_running across the unblock to avoid a needless
+ * NB2. We save the running flag across the unblock to avoid a needless
* IPI for domains that we IPI'd to unblock.
*/
- int running = test_bit(_VCPUF_running, &v->vcpu_flags);
+ int running = v->is_running;
vcpu_unblock(v);
if ( running )
smp_send_event_check_cpu(v->processor);
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index a324af013f..b0ce77aac3 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -18,9 +18,9 @@ extern void init_fpu(void);
extern void save_init_fpu(struct vcpu *v);
extern void restore_fpu(struct vcpu *v);
-#define unlazy_fpu(v) do { \
- if ( test_bit(_VCPUF_fpu_dirtied, &(v)->vcpu_flags) ) \
- save_init_fpu(v); \
+#define unlazy_fpu(v) do { \
+ if ( (v)->fpu_dirtied ) \
+ save_init_fpu(v); \
} while ( 0 )
#define load_mxcsr(val) do { \
@@ -33,9 +33,10 @@ static inline void setup_fpu(struct vcpu *v)
/* Avoid recursion. */
clts();
- if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
+ if ( !v->fpu_dirtied )
{
- if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
+ v->fpu_dirtied = 1;
+ if ( v->fpu_initialised )
restore_fpu(v);
else
init_fpu();
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 2a86f77806..7a1257ee99 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -100,6 +100,17 @@ struct vcpu
} runstate_guest; /* guest address */
#endif
+ /* Has the FPU been initialised? */
+ bool_t fpu_initialised;
+ /* Has the FPU been used since it was last saved? */
+ bool_t fpu_dirtied;
+ /* Is this VCPU polling any event channels (SCHEDOP_poll)? */
+ bool_t is_polling;
+ /* Initialization completed for this VCPU? */
+ bool_t is_initialised;
+ /* Currently running on a CPU? */
+ bool_t is_running;
+
unsigned long vcpu_flags;
spinlock_t pause_lock;
@@ -423,41 +434,26 @@ extern struct domain *domain_list;
/*
* Per-VCPU flags (vcpu_flags).
*/
- /* Has the FPU been initialised? */
-#define _VCPUF_fpu_initialised 0
-#define VCPUF_fpu_initialised (1UL<<_VCPUF_fpu_initialised)
- /* Has the FPU been used since it was last saved? */
-#define _VCPUF_fpu_dirtied 1
-#define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied)
/* Domain is blocked waiting for an event. */
-#define _VCPUF_blocked 2
+#define _VCPUF_blocked 0
#define VCPUF_blocked (1UL<<_VCPUF_blocked)
- /* Currently running on a CPU? */
-#define _VCPUF_running 3
-#define VCPUF_running (1UL<<_VCPUF_running)
- /* Initialization completed. */
-#define _VCPUF_initialised 4
-#define VCPUF_initialised (1UL<<_VCPUF_initialised)
/* VCPU is offline. */
-#define _VCPUF_down 5
+#define _VCPUF_down 1
#define VCPUF_down (1UL<<_VCPUF_down)
/* NMI callback pending for this VCPU? */
-#define _VCPUF_nmi_pending 8
+#define _VCPUF_nmi_pending 2
#define VCPUF_nmi_pending (1UL<<_VCPUF_nmi_pending)
/* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
-#define _VCPUF_nmi_masked 9
+#define _VCPUF_nmi_masked 3
#define VCPUF_nmi_masked (1UL<<_VCPUF_nmi_masked)
- /* VCPU is polling a set of event channels (SCHEDOP_poll). */
-#define _VCPUF_polling 10
-#define VCPUF_polling (1UL<<_VCPUF_polling)
/* VCPU is paused by the hypervisor? */
-#define _VCPUF_paused 11
+#define _VCPUF_paused 4
#define VCPUF_paused (1UL<<_VCPUF_paused)
/* VCPU is blocked awaiting an event to be consumed by Xen. */
-#define _VCPUF_blocked_in_xen 12
+#define _VCPUF_blocked_in_xen 5
#define VCPUF_blocked_in_xen (1UL<<_VCPUF_blocked_in_xen)
/* VCPU affinity has changed: migrating to a new CPU. */
-#define _VCPUF_migrating 13
+#define _VCPUF_migrating 6
#define VCPUF_migrating (1UL<<_VCPUF_migrating)
static inline int vcpu_runnable(struct vcpu *v)