aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/hpet.c41
-rw-r--r--xen/arch/x86/hvm/hvm.c1
-rw-r--r--xen/arch/x86/hvm/i8254.c83
-rw-r--r--xen/arch/x86/hvm/pmtimer.c60
-rw-r--r--xen/arch/x86/hvm/rtc.c58
-rw-r--r--xen/arch/x86/hvm/vpt.c77
-rw-r--r--xen/common/timer.c2
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h3
-rw-r--r--xen/include/asm-x86/hvm/vpt.h6
9 files changed, 277 insertions, 54 deletions
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 155dbffcc7..2b8935b47f 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -113,6 +113,8 @@ static inline int hpet_check_access_length(
static inline uint64_t hpet_read_maincounter(HPETState *h)
{
+ ASSERT(spin_is_locked(&h->lock));
+
if ( hpet_enabled(h) )
return guest_time_hpet(h->vcpu) + h->mc_offset;
else
@@ -131,6 +133,8 @@ static unsigned long hpet_read(
if ( hpet_check_access_length(addr, length) != 0 )
return ~0UL;
+ spin_lock(&h->lock);
+
val = hpet_read64(h, addr & ~7);
if ( (addr & ~7) == HPET_COUNTER )
val = hpet_read_maincounter(h);
@@ -139,12 +143,15 @@ static unsigned long hpet_read(
if ( length != 8 )
result = (val >> ((addr & 7) * 8)) & ((1UL << (length * 8)) - 1);
+ spin_unlock(&h->lock);
+
return result;
}
static void hpet_stop_timer(HPETState *h, unsigned int tn)
{
ASSERT(tn < HPET_TIMER_NUM);
+ ASSERT(spin_is_locked(&h->lock));
stop_timer(&h->timers[tn]);
}
@@ -157,7 +164,8 @@ static void hpet_set_timer(HPETState *h, unsigned int tn)
uint64_t tn_cmp, cur_tick, diff;
ASSERT(tn < HPET_TIMER_NUM);
-
+ ASSERT(spin_is_locked(&h->lock));
+
if ( !hpet_enabled(h) || !timer_enabled(h, tn) )
return;
@@ -213,6 +221,8 @@ static void hpet_write(
if ( hpet_check_access_length(addr, length) != 0 )
return;
+ spin_lock(&h->lock);
+
old_val = hpet_read64(h, addr & ~7);
if ( (addr & ~7) == HPET_COUNTER )
old_val = hpet_read_maincounter(h);
@@ -302,6 +312,8 @@ static void hpet_write(
/* Ignore writes to unsupported and reserved registers. */
break;
}
+
+ spin_unlock(&h->lock);
}
static int hpet_range(struct vcpu *v, unsigned long addr)
@@ -321,6 +333,8 @@ static void hpet_route_interrupt(HPETState *h, unsigned int tn)
unsigned int tn_int_route = timer_int_route(h, tn);
struct domain *d = h->vcpu->domain;
+ ASSERT(spin_is_locked(&h->lock));
+
if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) )
{
/* if LegacyReplacementRoute bit is set, HPET specification requires
@@ -352,8 +366,13 @@ static void hpet_timer_fn(void *opaque)
HPETState *h = htfi->hs;
unsigned int tn = htfi->tn;
+ spin_lock(&h->lock);
+
if ( !hpet_enabled(h) || !timer_enabled(h, tn) )
+ {
+ spin_unlock(&h->lock);
return;
+ }
hpet_route_interrupt(h, tn);
@@ -374,6 +393,8 @@ static void hpet_timer_fn(void *opaque)
set_timer(&h->timers[tn],
NOW() + hpet_tick_to_ns(h, h->hpet.period[tn]));
}
+
+ spin_unlock(&h->lock);
}
void hpet_migrate_timers(struct vcpu *v)
@@ -391,12 +412,19 @@ void hpet_migrate_timers(struct vcpu *v)
static int hpet_save(struct domain *d, hvm_domain_context_t *h)
{
HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
+ int rc;
+
+ spin_lock(&hp->lock);
/* Write the proper value into the main counter */
hp->hpet.mc64 = hp->mc_offset + guest_time_hpet(hp->vcpu);
/* Save the HPET registers */
- return hvm_save_entry(HPET, 0, h, &hp->hpet);
+ rc = hvm_save_entry(HPET, 0, h, &hp->hpet);
+
+ spin_unlock(&hp->lock);
+
+ return rc;
}
static int hpet_load(struct domain *d, hvm_domain_context_t *h)
@@ -404,9 +432,14 @@ static int hpet_load(struct domain *d, hvm_domain_context_t *h)
HPETState *hp = &d->arch.hvm_domain.pl_time.vhpet;
int i;
+ spin_lock(&hp->lock);
+
/* Reload the HPET registers */
if ( hvm_load_entry(HPET, h, &hp->hpet) )
+ {
+ spin_unlock(&hp->lock);
return -EINVAL;
+ }
/* Recalculate the offset between the main counter and guest time */
hp->mc_offset = hp->hpet.mc64 - guest_time_hpet(hp->vcpu);
@@ -415,6 +448,8 @@ static int hpet_load(struct domain *d, hvm_domain_context_t *h)
for ( i = 0; i < HPET_TIMER_NUM; i++ )
hpet_set_timer(hp, i);
+ spin_unlock(&hp->lock);
+
return 0;
}
@@ -427,6 +462,8 @@ void hpet_init(struct vcpu *v)
memset(h, 0, sizeof(HPETState));
+ spin_lock_init(&h->lock);
+
h->vcpu = v;
h->tsc_freq = ticks_per_sec(v);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index dfd0400b46..367defdc32 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -401,6 +401,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
spin_unlock(&v->domain->arch.hvm_domain.ioreq.lock);
+ spin_lock_init(&v->arch.hvm_vcpu.tm_lock);
INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
if ( v->vcpu_id == 0 )
diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c
index 8c46bf6057..2aa3c1a5ba 100644
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -82,6 +82,8 @@ static int pit_get_count(PITState *pit, int channel)
struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
struct vcpu *v = vpit_vcpu(pit);
+ ASSERT(spin_is_locked(&pit->lock));
+
d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel],
PIT_FREQ, ticks_per_sec(v));
@@ -111,6 +113,8 @@ static int pit_get_out(PITState *pit, int channel)
int out;
struct vcpu *v = vpit_vcpu(pit);
+ ASSERT(spin_is_locked(&pit->lock));
+
d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel],
PIT_FREQ, ticks_per_sec(v));
@@ -143,6 +147,8 @@ static void pit_set_gate(PITState *pit, int channel, int val)
struct hvm_hw_pit_channel *s = &pit->hw.channels[channel];
struct vcpu *v = vpit_vcpu(pit);
+ ASSERT(spin_is_locked(&pit->lock));
+
switch ( s->mode )
{
default:
@@ -165,6 +171,7 @@ static void pit_set_gate(PITState *pit, int channel, int val)
int pit_get_gate(PITState *pit, int channel)
{
+ ASSERT(spin_is_locked(&pit->lock));
return pit->hw.channels[channel].gate;
}
@@ -181,10 +188,15 @@ static void pit_load_count(PITState *pit, int channel, int val)
struct periodic_time *pt = &pit->pt[channel];
struct vcpu *v = vpit_vcpu(pit);
+ ASSERT(spin_is_locked(&pit->lock));
+
if ( val == 0 )
val = 0x10000;
- pit->count_load_time[channel] = hvm_get_guest_time(pt->vcpu);
+ if ( v == NULL )
+ rdtscll(pit->count_load_time[channel]);
+ else
+ pit->count_load_time[channel] = hvm_get_guest_time(v);
s->count = val;
period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
@@ -209,23 +221,29 @@ static void pit_load_count(PITState *pit, int channel, int val)
}
}
-static void pit_latch_count(PITState *s, int channel)
+static void pit_latch_count(PITState *pit, int channel)
{
- struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
+ struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
+
+ ASSERT(spin_is_locked(&pit->lock));
+
if ( !c->count_latched )
{
- c->latched_count = pit_get_count(s, channel);
+ c->latched_count = pit_get_count(pit, channel);
c->count_latched = c->rw_mode;
}
}
-static void pit_latch_status(PITState *s, int channel)
+static void pit_latch_status(PITState *pit, int channel)
{
- struct hvm_hw_pit_channel *c = &s->hw.channels[channel];
+ struct hvm_hw_pit_channel *c = &pit->hw.channels[channel];
+
+ ASSERT(spin_is_locked(&pit->lock));
+
if ( !c->status_latched )
{
/* TODO: Return NULL COUNT (bit 6). */
- c->status = ((pit_get_out(s, channel) << 7) |
+ c->status = ((pit_get_out(pit, channel) << 7) |
(c->rw_mode << 4) |
(c->mode << 1) |
c->bcd);
@@ -241,6 +259,8 @@ static void pit_ioport_write(struct PITState *pit, uint32_t addr, uint32_t val)
val &= 0xff;
addr &= 3;
+ spin_lock(&pit->lock);
+
if ( addr == 3 )
{
channel = val >> 6;
@@ -304,6 +324,8 @@ static void pit_ioport_write(struct PITState *pit, uint32_t addr, uint32_t val)
break;
}
}
+
+ spin_unlock(&pit->lock);
}
static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr)
@@ -314,6 +336,8 @@ static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr)
addr &= 3;
s = &pit->hw.channels[addr];
+ spin_lock(&pit->lock);
+
if ( s->status_latched )
{
s->status_latched = 0;
@@ -364,12 +388,16 @@ static uint32_t pit_ioport_read(struct PITState *pit, uint32_t addr)
}
}
+ spin_unlock(&pit->lock);
+
return ret;
}
void pit_stop_channel0_irq(PITState *pit)
{
+ spin_lock(&pit->lock);
destroy_periodic_time(&pit->pt[0]);
+ spin_unlock(&pit->lock);
}
#ifdef HVM_DEBUG_SUSPEND
@@ -422,11 +450,18 @@ static void pit_info(PITState *pit)
static int pit_save(struct domain *d, hvm_domain_context_t *h)
{
PITState *pit = domain_vpit(d);
+ int rc;
+
+ spin_lock(&pit->lock);
pit_info(pit);
/* Save the PIT hardware state */
- return hvm_save_entry(PIT, 0, h, &pit->hw);
+ rc = hvm_save_entry(PIT, 0, h, &pit->hw);
+
+ spin_unlock(&pit->lock);
+
+ return rc;
}
static int pit_load(struct domain *d, hvm_domain_context_t *h)
@@ -434,9 +469,14 @@ static int pit_load(struct domain *d, hvm_domain_context_t *h)
PITState *pit = domain_vpit(d);
int i;
+ spin_lock(&pit->lock);
+
/* Restore the PIT hardware state */
if ( hvm_load_entry(PIT, h, &pit->hw) )
+ {
+ spin_unlock(&pit->lock);
return 1;
+ }
/* Recreate platform timers from hardware state. There will be some
* time jitter here, but the wall-clock will have jumped massively, so
@@ -448,6 +488,9 @@ static int pit_load(struct domain *d, hvm_domain_context_t *h)
}
pit_info(pit);
+
+ spin_unlock(&pit->lock);
+
return 0;
}
@@ -456,17 +499,15 @@ HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
void pit_init(struct vcpu *v, unsigned long cpu_khz)
{
PITState *pit = vcpu_vpit(v);
- struct periodic_time *pt;
struct hvm_hw_pit_channel *s;
int i;
- pt = &pit->pt[0];
- pt[0].vcpu = v;
- pt[1].vcpu = v;
- pt[2].vcpu = v;
+ spin_lock_init(&pit->lock);
+
+ /* Some sub-functions assert that they are called with the lock held. */
+ spin_lock(&pit->lock);
register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
- /* register the speaker port */
register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
ticks_per_sec(v) = cpu_khz * (int64_t)1000;
@@ -477,6 +518,8 @@ void pit_init(struct vcpu *v, unsigned long cpu_khz)
s->gate = (i != 2);
pit_load_count(pit, i, 0);
}
+
+ spin_unlock(&pit->lock);
}
void pit_deinit(struct domain *d)
@@ -492,10 +535,10 @@ static int handle_pit_io(ioreq_t *p)
if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
{
- gdprintk(XENLOG_WARNING, "HVM_PIT bad access\n");
+ gdprintk(XENLOG_WARNING, "PIT bad access\n");
return 1;
}
-
+
if ( p->dir == IOREQ_WRITE )
{
pit_ioport_write(vpit, p->addr, p->data);
@@ -505,7 +548,7 @@ static int handle_pit_io(ioreq_t *p)
if ( (p->addr & 3) != 3 )
p->data = pit_ioport_read(vpit, p->addr);
else
- gdprintk(XENLOG_WARNING, "HVM_PIT: read A1:A0=3!\n");
+ gdprintk(XENLOG_WARNING, "PIT: read A1:A0=3!\n");
}
return 1;
@@ -533,15 +576,19 @@ static int handle_speaker_io(ioreq_t *p)
if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
{
- gdprintk(XENLOG_WARNING, "HVM_SPEAKER bad access\n");
+ gdprintk(XENLOG_WARNING, "PIT_SPEAKER bad access\n");
return 1;
}
+ spin_lock(&vpit->lock);
+
if ( p->dir == IOREQ_WRITE )
speaker_ioport_write(vpit, p->addr, p->data);
else
p->data = speaker_ioport_read(vpit, p->addr);
+ spin_unlock(&vpit->lock);
+
return 1;
}
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 7d848b4245..4a9f36ae46 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -53,6 +53,8 @@
/* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
static void pmt_update_sci(PMTState *s)
{
+ ASSERT(spin_is_locked(&s->lock));
+
if ( s->pm.pm1a_en & s->pm.pm1a_sts & SCI_MASK )
hvm_isa_irq_assert(s->vcpu->domain, SCI_IRQ);
else
@@ -66,6 +68,8 @@ static void pmt_update_time(PMTState *s)
uint64_t curr_gtime;
uint32_t msb = s->pm.tmr_val & TMR_VAL_MSB;
+ ASSERT(spin_is_locked(&s->lock));
+
/* Update the timer */
curr_gtime = hvm_get_guest_time(s->vcpu);
s->pm.tmr_val += ((curr_gtime - s->last_gtime) * s->scale) >> 32;
@@ -89,6 +93,8 @@ static void pmt_timer_callback(void *opaque)
uint32_t pmt_cycles_until_flip;
uint64_t time_until_flip;
+ spin_lock(&s->lock);
+
/* Recalculate the timer and make sure we get an SCI if we need one */
pmt_update_time(s);
@@ -103,8 +109,9 @@ static void pmt_timer_callback(void *opaque)
/* Wake up again near the next bit-flip */
set_timer(&s->timer, NOW() + time_until_flip + MILLISECS(1));
-}
+ spin_unlock(&s->lock);
+}
/* Handle port I/O to the PM1a_STS and PM1a_EN registers */
static int handle_evt_io(ioreq_t *p)
@@ -114,7 +121,9 @@ static int handle_evt_io(ioreq_t *p)
uint32_t addr, data, byte;
int i;
- if ( p->dir == 0 ) /* Write */
+ spin_lock(&s->lock);
+
+ if ( p->dir == IOREQ_WRITE )
{
/* Handle this I/O one byte at a time */
for ( i = p->size, addr = p->addr, data = p->data;
@@ -122,7 +131,7 @@ static int handle_evt_io(ioreq_t *p)
i--, addr++, data >>= 8 )
{
byte = data & 0xff;
- switch(addr)
+ switch ( addr )
{
/* PM1a_STS register bits are write-to-clear */
case PM1a_STS_ADDR:
@@ -149,7 +158,7 @@ static int handle_evt_io(ioreq_t *p)
/* Fix up the SCI state to match the new register state */
pmt_update_sci(s);
}
- else /* Read */
+ else /* p->dir == IOREQ_READ */
{
data = s->pm.pm1a_sts | (((uint32_t) s->pm.pm1a_en) << 16);
data >>= 8 * (p->addr - PM1a_STS_ADDR);
@@ -157,6 +166,9 @@ static int handle_evt_io(ioreq_t *p)
else if ( p->size == 2 ) data &= 0xffff;
p->data = data;
}
+
+ spin_unlock(&s->lock);
+
return 1;
}
@@ -167,29 +179,31 @@ static int handle_pmt_io(ioreq_t *p)
struct vcpu *v = current;
PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
- if (p->size != 4 ||
- p->data_is_ptr ||
- p->type != IOREQ_TYPE_PIO){
- printk("HVM_PMT: wrong PM timer IO\n");
+ if ( (p->size != 4) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) )
+ {
+ gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
return 1;
}
- if (p->dir == 0) { /* write */
- /* PM_TMR_BLK is read-only */
- return 1;
- } else if (p->dir == 1) { /* read */
+ if ( p->dir == IOREQ_READ )
+ {
+ spin_lock(&s->lock);
pmt_update_time(s);
p->data = s->pm.tmr_val;
+ spin_unlock(&s->lock);
return 1;
}
+
return 0;
}
static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
{
PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
- uint32_t msb = s->pm.tmr_val & TMR_VAL_MSB;
- uint32_t x;
+ uint32_t x, msb = s->pm.tmr_val & TMR_VAL_MSB;
+ int rc;
+
+ spin_lock(&s->lock);
/* Update the counter to the guest's current time. We always save
* with the domain paused, so the saved time should be after the
@@ -202,22 +216,33 @@ static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
/* No point in setting the SCI here because we'll already have saved the
* IRQ and *PIC state; we'll fix it up when we restore the domain */
- return hvm_save_entry(PMTIMER, 0, h, &s->pm);
+ rc = hvm_save_entry(PMTIMER, 0, h, &s->pm);
+
+ spin_unlock(&s->lock);
+
+ return rc;
}
static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
{
PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+ spin_lock(&s->lock);
+
/* Reload the registers */
if ( hvm_load_entry(PMTIMER, h, &s->pm) )
+ {
+ spin_unlock(&s->lock);
return -EINVAL;
+ }
/* Calculate future counter values from now. */
s->last_gtime = hvm_get_guest_time(s->vcpu);
/* Set the SCI state from the registers */
pmt_update_sci(s);
+
+ spin_unlock(&s->lock);
return 0;
}
@@ -225,14 +250,11 @@ static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
HVM_REGISTER_SAVE_RESTORE(PMTIMER, pmtimer_save, pmtimer_load,
1, HVMSR_PER_DOM);
-
void pmtimer_init(struct vcpu *v)
{
PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
- s->pm.tmr_val = 0;
- s->pm.pm1a_sts = 0;
- s->pm.pm1a_en = 0;
+ spin_lock_init(&s->lock);
s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / ticks_per_sec(v);
s->vcpu = v;
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index 75bd85e936..469eb34cf3 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -34,10 +34,12 @@
arch.hvm_domain.pl_time.vrtc))
#define vrtc_vcpu(rtc) (vrtc_domain(rtc)->vcpu[0])
-void rtc_periodic_cb(struct vcpu *v, void *opaque)
+static void rtc_periodic_cb(struct vcpu *v, void *opaque)
{
RTCState *s = opaque;
+ spin_lock(&s->lock);
s->hw.cmos_data[RTC_REG_C] |= 0xc0;
+ spin_unlock(&s->lock);
}
int is_rtc_periodic_irq(void *opaque)
@@ -55,6 +57,8 @@ static void rtc_timer_update(RTCState *s)
int period_code, period;
struct vcpu *v = vrtc_vcpu(s);
+ ASSERT(spin_is_locked(&s->lock));
+
period_code = s->hw.cmos_data[RTC_REG_A] & RTC_RATE_SELECT;
if ( (period_code != 0) && (s->hw.cmos_data[RTC_REG_B] & RTC_PIE) )
{
@@ -78,14 +82,21 @@ static int rtc_ioport_write(void *opaque, uint32_t addr, uint32_t data)
{
RTCState *s = opaque;
+ spin_lock(&s->lock);
+
if ( (addr & 1) == 0 )
{
- s->hw.cmos_index = data & 0x7f;
- return (s->hw.cmos_index < RTC_CMOS_SIZE);
+ data &= 0x7f;
+ s->hw.cmos_index = data;
+ spin_unlock(&s->lock);
+ return (data < RTC_CMOS_SIZE);
}
if ( s->hw.cmos_index >= RTC_CMOS_SIZE )
+ {
+ spin_unlock(&s->lock);
return 0;
+ }
switch ( s->hw.cmos_index )
{
@@ -134,6 +145,8 @@ static int rtc_ioport_write(void *opaque, uint32_t addr, uint32_t data)
break;
}
+ spin_unlock(&s->lock);
+
return 1;
}
@@ -158,6 +171,8 @@ static void rtc_set_time(RTCState *s)
struct tm *tm = &s->current_tm;
unsigned long before, after; /* XXX s_time_t */
+ ASSERT(spin_is_locked(&s->lock));
+
before = mktime(tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -182,6 +197,8 @@ static void rtc_copy_date(RTCState *s)
const struct tm *tm = &s->current_tm;
struct domain *d = vrtc_domain(s);
+ ASSERT(spin_is_locked(&s->lock));
+
if ( s->time_offset_seconds != d->time_offset_seconds )
{
s->current_tm = gmtime(get_localtime(d));
@@ -231,6 +248,8 @@ static void rtc_next_second(RTCState *s)
int days_in_month;
struct domain *d = vrtc_domain(s);
+ ASSERT(spin_is_locked(&s->lock));
+
if ( s->time_offset_seconds != d->time_offset_seconds )
{
s->current_tm = gmtime(get_localtime(d));
@@ -279,6 +298,8 @@ static void rtc_update_second(void *opaque)
{
RTCState *s = opaque;
+ spin_lock(&s->lock);
+
/* if the oscillator is not in normal operation, we do not update */
if ( (s->hw.cmos_data[RTC_REG_A] & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ )
{
@@ -295,6 +316,8 @@ static void rtc_update_second(void *opaque)
/* Delay time before update cycle */
set_timer(&s->second_timer2, s->next_second_time + 244000);
}
+
+ spin_unlock(&s->lock);
}
static void rtc_update_second2(void *opaque)
@@ -302,6 +325,8 @@ static void rtc_update_second2(void *opaque)
RTCState *s = opaque;
struct domain *d = vrtc_domain(s);
+ spin_lock(&s->lock);
+
if ( !(s->hw.cmos_data[RTC_REG_B] & RTC_SET) )
rtc_copy_date(s);
@@ -337,16 +362,19 @@ static void rtc_update_second2(void *opaque)
s->next_second_time += 1000000000ULL;
set_timer(&s->second_timer, s->next_second_time);
+
+ spin_unlock(&s->lock);
}
-static uint32_t rtc_ioport_read(void *opaque, uint32_t addr)
+static uint32_t rtc_ioport_read(RTCState *s, uint32_t addr)
{
- RTCState *s = opaque;
int ret;
if ( (addr & 1) == 0 )
return 0xff;
+ spin_lock(&s->lock);
+
switch ( s->hw.cmos_index )
{
case RTC_SECONDS:
@@ -371,6 +399,8 @@ static uint32_t rtc_ioport_read(void *opaque, uint32_t addr)
break;
}
+ spin_unlock(&s->lock);
+
return ret;
}
@@ -413,7 +443,11 @@ void rtc_migrate_timers(struct vcpu *v)
static int rtc_save(struct domain *d, hvm_domain_context_t *h)
{
RTCState *s = domain_vrtc(d);
- return hvm_save_entry(RTC, 0, h, &s->hw);
+ int rc;
+ spin_lock(&s->lock);
+ rc = hvm_save_entry(RTC, 0, h, &s->hw);
+ spin_unlock(&s->lock);
+ return rc;
}
/* Reload the hardware state from a saved domain */
@@ -421,9 +455,14 @@ static int rtc_load(struct domain *d, hvm_domain_context_t *h)
{
RTCState *s = domain_vrtc(d);
+ spin_lock(&s->lock);
+
/* Restore the registers */
if ( hvm_load_entry(RTC, h, &s->hw) != 0 )
+ {
+ spin_unlock(&s->lock);
return -EINVAL;
+ }
/* Reset the wall-clock time. In normal running, this runs with host
* time, so let's keep doing that. */
@@ -436,6 +475,8 @@ static int rtc_load(struct domain *d, hvm_domain_context_t *h)
/* Reset the periodic interrupt timer based on the registers */
rtc_timer_update(s);
+ spin_unlock(&s->lock);
+
return 0;
}
@@ -446,13 +487,18 @@ void rtc_init(struct vcpu *v, int base)
{
RTCState *s = vcpu_vrtc(v);
+ spin_lock_init(&s->lock);
+
s->hw.cmos_data[RTC_REG_A] = RTC_REF_CLCK_32KHZ | 6; /* ~1kHz */
s->hw.cmos_data[RTC_REG_B] = RTC_24H;
s->hw.cmos_data[RTC_REG_C] = 0;
s->hw.cmos_data[RTC_REG_D] = RTC_VRT;
s->current_tm = gmtime(get_localtime(v->domain));
+
+ spin_lock(&s->lock);
rtc_copy_date(s);
+ spin_unlock(&s->lock);
init_timer(&s->second_timer, rtc_update_second, s, v->processor);
init_timer(&s->second_timer2, rtc_update_second2, s, v->processor);
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index 3ce37aad80..cd39d9457d 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -17,11 +17,31 @@
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
*/
+
#include <xen/time.h>
#include <asm/hvm/support.h>
#include <asm/hvm/vpt.h>
#include <asm/event.h>
+static void pt_lock(struct periodic_time *pt)
+{
+ struct vcpu *v;
+
+ for ( ; ; )
+ {
+ v = pt->vcpu;
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ if ( likely(pt->vcpu == v) )
+ break;
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+ }
+}
+
+static void pt_unlock(struct periodic_time *pt)
+{
+ spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
+}
+
static void missed_ticks(struct periodic_time *pt)
{
s_time_t missed_ticks;
@@ -52,10 +72,14 @@ void pt_freeze_time(struct vcpu *v)
if ( test_bit(_VPF_blocked, &v->pause_flags) )
return;
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
list_for_each_entry ( pt, head, list )
stop_timer(&pt->timer);
+
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
}
void pt_thaw_time(struct vcpu *v)
@@ -63,6 +87,8 @@ void pt_thaw_time(struct vcpu *v)
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
if ( v->arch.hvm_vcpu.guest_time )
{
hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
@@ -74,12 +100,16 @@ void pt_thaw_time(struct vcpu *v)
set_timer(&pt->timer, pt->scheduled);
}
}
+
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
}
static void pt_timer_fn(void *data)
{
struct periodic_time *pt = data;
+ pt_lock(pt);
+
pt->pending_intr_nr++;
pt->scheduled += pt->period;
@@ -89,6 +119,8 @@ static void pt_timer_fn(void *data)
set_timer(&pt->timer, pt->scheduled);
vcpu_kick(pt->vcpu);
+
+ pt_unlock(pt);
}
void pt_update_irq(struct vcpu *v)
@@ -98,6 +130,8 @@ void pt_update_irq(struct vcpu *v)
uint64_t max_lag = -1ULL;
int irq = -1;
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
list_for_each_entry ( pt, head, list )
{
if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
@@ -108,6 +142,8 @@ void pt_update_irq(struct vcpu *v)
}
}
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+
if ( is_lvtt(v, irq) )
{
vlapic_set_irq(vcpu_vlapic(v), irq, 0);
@@ -119,7 +155,7 @@ void pt_update_irq(struct vcpu *v)
}
}
-struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
+static struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
{
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
@@ -152,19 +188,34 @@ struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type)
void pt_intr_post(struct vcpu *v, int vector, int type)
{
- struct periodic_time *pt = is_pt_irq(v, vector, type);
+ struct periodic_time *pt;
+ time_cb *cb;
+ void *cb_priv;
+
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+ pt = is_pt_irq(v, vector, type);
if ( pt == NULL )
+ {
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
return;
+ }
+
+ ASSERT(pt->vcpu == v);
pt->pending_intr_nr--;
pt->last_plt_gtime += pt->period_cycles;
- if ( hvm_get_guest_time(pt->vcpu) < pt->last_plt_gtime )
- hvm_set_guest_time(pt->vcpu, pt->last_plt_gtime);
+ if ( hvm_get_guest_time(v) < pt->last_plt_gtime )
+ hvm_set_guest_time(v, pt->last_plt_gtime);
+
+ cb = pt->cb;
+ cb_priv = pt->priv;
- if ( pt->cb != NULL )
- pt->cb(pt->vcpu, pt->priv);
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
+
+ if ( cb != NULL )
+ cb(v, cb_priv);
}
void pt_reset(struct vcpu *v)
@@ -172,6 +223,8 @@ void pt_reset(struct vcpu *v)
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
list_for_each_entry ( pt, head, list )
{
if ( pt->enabled )
@@ -182,6 +235,8 @@ void pt_reset(struct vcpu *v)
set_timer(&pt->timer, pt->scheduled);
}
}
+
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
}
void pt_migrate(struct vcpu *v)
@@ -189,11 +244,15 @@ void pt_migrate(struct vcpu *v)
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
list_for_each_entry ( pt, head, list )
{
if ( pt->enabled )
migrate_timer(&pt->timer, v->processor);
}
+
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
}
void create_periodic_time(
@@ -202,6 +261,8 @@ void create_periodic_time(
{
destroy_periodic_time(pt);
+ spin_lock(&v->arch.hvm_vcpu.tm_lock);
+
init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
pt->enabled = 1;
if ( period < 900000 ) /* < 0.9 ms */
@@ -223,6 +284,8 @@ void create_periodic_time(
list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
set_timer(&pt->timer, pt->scheduled);
+
+ spin_unlock(&v->arch.hvm_vcpu.tm_lock);
}
void destroy_periodic_time(struct periodic_time *pt)
@@ -230,8 +293,10 @@ void destroy_periodic_time(struct periodic_time *pt)
if ( !pt->enabled )
return;
+ pt_lock(pt);
pt->enabled = 0;
pt->pending_intr_nr = 0;
list_del(&pt->list);
kill_timer(&pt->timer);
+ pt_unlock(pt);
}
diff --git a/xen/common/timer.c b/xen/common/timer.c
index 618906db38..0699207132 100644
--- a/xen/common/timer.c
+++ b/xen/common/timer.c
@@ -183,7 +183,7 @@ static inline void timer_lock(struct timer *timer)
static inline void timer_unlock(struct timer *timer)
{
- spin_unlock(&per_cpu(timers, timer->cpu).lock);
+ spin_unlock(&per_cpu(timers, timer->cpu).lock);
}
#define timer_unlock_irq(t) \
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 75aae07b1c..b0a6956120 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -35,6 +35,9 @@ struct hvm_vcpu {
struct vlapic vlapic;
s64 cache_tsc_offset;
u64 guest_time;
+
+ /* Lock and list for virtual platform timers. */
+ spinlock_t tm_lock;
struct list_head tm_list;
/* For AP startup */
diff --git a/xen/include/asm-x86/hvm/vpt.h b/xen/include/asm-x86/hvm/vpt.h
index e89e1d3c4b..acd245a93d 100644
--- a/xen/include/asm-x86/hvm/vpt.h
+++ b/xen/include/asm-x86/hvm/vpt.h
@@ -31,7 +31,6 @@
#include <asm/hvm/vpic.h>
#include <public/hvm/save.h>
-
struct HPETState;
struct HPET_timer_fn_info {
struct HPETState *hs;
@@ -45,6 +44,7 @@ typedef struct HPETState {
uint64_t mc_offset;
struct timer timers[HPET_TIMER_NUM];
struct HPET_timer_fn_info timer_fn_info[HPET_TIMER_NUM];
+ spinlock_t lock;
} HPETState;
@@ -80,6 +80,7 @@ typedef struct PITState {
int64_t count_load_time[3];
/* irq handling */
struct periodic_time pt[3];
+ spinlock_t lock;
} PITState;
typedef struct RTCState {
@@ -93,6 +94,7 @@ typedef struct RTCState {
struct timer second_timer2;
struct periodic_time pt;
int32_t time_offset_seconds;
+ spinlock_t lock;
} RTCState;
#define FREQUENCE_PMTIMER 3579545 /* Timer should run at 3.579545 MHz */
@@ -102,6 +104,7 @@ typedef struct PMTState {
uint64_t last_gtime; /* Last (guest) time we updated the timer */
uint64_t scale; /* Multiplier to get from tsc to timer ticks */
struct timer timer; /* To make sure we send SCIs */
+ spinlock_t lock;
} PMTState;
struct pl_time { /* platform time */
@@ -116,7 +119,6 @@ struct pl_time { /* platform time */
void pt_freeze_time(struct vcpu *v);
void pt_thaw_time(struct vcpu *v);
void pt_update_irq(struct vcpu *v);
-struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type);
void pt_intr_post(struct vcpu *v, int vector, int type);
void pt_reset(struct vcpu *v);
void pt_migrate(struct vcpu *v);