aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/intercept.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xensource.com>2007-04-13 14:59:06 +0100
committerKeir Fraser <keir@xensource.com>2007-04-13 14:59:06 +0100
commit0c8a05c14251778943ed7243bb2c20eb9dcf9b65 (patch)
treec48581b9d1b6667138c5c17a015b17c51c1e8a9c /xen/arch/x86/hvm/intercept.c
parent4a507d9105776305edbafca73bdc5e3ad8edda0f (diff)
downloadxen-0c8a05c14251778943ed7243bb2c20eb9dcf9b65.tar.gz
xen-0c8a05c14251778943ed7243bb2c20eb9dcf9b65.tar.bz2
xen-0c8a05c14251778943ed7243bb2c20eb9dcf9b65.zip
hvm: Clean up treatment of is_dying per-domain boolean. All critical
checks are done under an appropriate lock, allowing the lock-free protocols surrounding this boolean to be removed. Also simplification and fixes to code for setting/zapping the ioreq and buf_ioreq shared pages. Signed-off-by: Keir Fraser <keir@xensource.com>
Diffstat (limited to 'xen/arch/x86/hvm/intercept.c')
-rw-r--r--xen/arch/x86/hvm/intercept.c36
1 files changed, 14 insertions, 22 deletions
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index fb8497a996..91749b676b 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -158,34 +158,26 @@ static inline void hvm_mmio_access(struct vcpu *v,
int hvm_buffered_io_send(ioreq_t *p)
{
struct vcpu *v = current;
- spinlock_t *buffered_io_lock;
- buffered_iopage_t *buffered_iopage =
- (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
- unsigned long tmp_write_pointer = 0;
-
- buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
- spin_lock(buffered_io_lock);
-
- if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
- (unsigned int)IOREQ_BUFFER_SLOT_NUM ) {
- /* the queue is full.
- * send the iopacket through the normal path.
- * NOTE: The arithimetic operation could handle the situation for
- * write_pointer overflow.
- */
- spin_unlock(buffered_io_lock);
+ struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
+ buffered_iopage_t *pg = iorp->va;
+
+ spin_lock(&iorp->lock);
+
+ if ( (pg->write_pointer - pg->read_pointer) == IOREQ_BUFFER_SLOT_NUM )
+ {
+ /* The queue is full: send the iopacket through the normal path. */
+ spin_unlock(&iorp->lock);
return 0;
}
- tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
-
- memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
+ memcpy(&pg->ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
+ p, sizeof(ioreq_t));
- /*make the ioreq_t visible before write_pointer*/
+ /* Make the ioreq_t visible /before/ write_pointer. */
wmb();
- buffered_iopage->write_pointer++;
+ pg->write_pointer++;
- spin_unlock(buffered_io_lock);
+ spin_unlock(&iorp->lock);
return 1;
}