diff options
author | George Dunlap <george.dunlap@eu.citrix.com> | 2013-03-04 13:37:39 +0100 |
---|---|---|
committer | Jan Beulich <jbeulich@suse.com> | 2013-03-04 13:37:39 +0100 |
commit | be6507509454adf3bb5a50b9406c88504e996d5a (patch) | |
tree | 22ad57b2c9a4ceccfd4bc9789f6992f95238d30a /xen/common/sched_credit.c | |
parent | 81881483859fabf918afe6da8e1a679a37479537 (diff) | |
download | xen-be6507509454adf3bb5a50b9406c88504e996d5a.tar.gz xen-be6507509454adf3bb5a50b9406c88504e996d5a.tar.bz2 xen-be6507509454adf3bb5a50b9406c88504e996d5a.zip |
credit1: Use atomic bit operations for the flags structure
The flags structure is not protected by locks (or more precisely,
it is protected using an inconsistent set of locks); we therefore need
to make sure that all accesses are atomic-safe. This is particulary
important in the case of the PARKED flag, which if clobbered while
changing the YIELD bit will leave a vcpu wedged in an offline state.
Using the atomic bitops also requires us to change the size of the "flags"
element.
Spotted-by: Igor Pavlikevich <ipavlikevich@gmail.com>
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Diffstat (limited to 'xen/common/sched_credit.c')
-rw-r--r-- | xen/common/sched_credit.c | 23 |
1 files changed, 10 insertions, 13 deletions
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index 7fbc0059ad..9e9ef33a49 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -47,8 +47,8 @@ /* * Flags */ -#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */ -#define CSCHED_FLAG_VCPU_YIELD 0x0002 /* VCPU yielding */ +#define CSCHED_FLAG_VCPU_PARKED 0x0 /* VCPU over capped credits */ +#define CSCHED_FLAG_VCPU_YIELD 0x1 /* VCPU yielding */ /* @@ -139,7 +139,7 @@ struct csched_vcpu { atomic_t credit; unsigned int residual; s_time_t start_time; /* When we were scheduled (used for credit) */ - uint16_t flags; + unsigned flags; int16_t pri; #ifdef CSCHED_STATS struct { @@ -222,7 +222,7 @@ __runq_insert(unsigned int cpu, struct csched_vcpu *svc) /* If the vcpu yielded, try to put it behind one lower-priority * runnable vcpu if we can. The next runq_sort will bring it forward * within 30ms if the queue too long. */ - if ( svc->flags & CSCHED_FLAG_VCPU_YIELD + if ( test_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags) && __runq_elem(iter)->pri > CSCHED_PRI_IDLE ) { iter=iter->next; @@ -817,7 +817,7 @@ csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) * those. */ if ( svc->pri == CSCHED_PRI_TS_UNDER && - !(svc->flags & CSCHED_FLAG_VCPU_PARKED) ) + !test_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) ) { svc->pri = CSCHED_PRI_TS_BOOST; } @@ -830,10 +830,10 @@ csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) static void csched_vcpu_yield(const struct scheduler *ops, struct vcpu *vc) { - struct csched_vcpu * const sv = CSCHED_VCPU(vc); + struct csched_vcpu * const svc = CSCHED_VCPU(vc); /* Let the scheduler know that this vcpu is trying to yield */ - sv->flags |= CSCHED_FLAG_VCPU_YIELD; + set_bit(CSCHED_FLAG_VCPU_YIELD, &svc->flags); } static int @@ -1157,11 +1157,10 @@ csched_acct(void* dummy) /* Park running VCPUs of capped-out domains */ if ( sdom->cap != 0U && credit < -credit_cap && - !(svc->flags & CSCHED_FLAG_VCPU_PARKED) ) + !test_and_set_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) ) { SCHED_STAT_CRANK(vcpu_park); vcpu_pause_nosync(svc->vcpu); - svc->flags |= CSCHED_FLAG_VCPU_PARKED; } /* Lower bound on credits */ @@ -1177,7 +1176,7 @@ csched_acct(void* dummy) svc->pri = CSCHED_PRI_TS_UNDER; /* Unpark any capped domains whose credits go positive */ - if ( svc->flags & CSCHED_FLAG_VCPU_PARKED) + if ( test_and_clear_bit(CSCHED_FLAG_VCPU_PARKED, &svc->flags) ) { /* * It's important to unset the flag AFTER the unpause() @@ -1186,7 +1185,6 @@ csched_acct(void* dummy) */ SCHED_STAT_CRANK(vcpu_unpark); vcpu_unpause(svc->vcpu); - svc->flags &= ~CSCHED_FLAG_VCPU_PARKED; } /* Upper bound on credits means VCPU stops earning */ @@ -1448,8 +1446,7 @@ csched_schedule( /* * Clear YIELD flag before scheduling out */ - if ( scurr->flags & CSCHED_FLAG_VCPU_YIELD ) - scurr->flags &= ~(CSCHED_FLAG_VCPU_YIELD); + clear_bit(CSCHED_FLAG_VCPU_YIELD, &scurr->flags); /* * SMP Load balance: |