aboutsummaryrefslogtreecommitdiffstats
path: root/tools/libvchan
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2012-01-21 17:15:40 +0000
committerKeir Fraser <keir@xen.org>2012-01-21 17:15:40 +0000
commitacbae3dcf66c00c03cfbb07aad2a278b54ab0c08 (patch)
treeeaf526534bf364abcd62d0aa9053325968aa2255 /tools/libvchan
parent8d3f757328e1580a8363597543b8e36ecc8837de (diff)
downloadxen-acbae3dcf66c00c03cfbb07aad2a278b54ab0c08.tar.gz
xen-acbae3dcf66c00c03cfbb07aad2a278b54ab0c08.tar.bz2
xen-acbae3dcf66c00c03cfbb07aad2a278b54ab0c08.zip
tools/libvchan: Beef up the CPU barriers in libvchan.
Although they were sufficient for x86, they weren't safe more generally. Signed-off-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'tools/libvchan')
-rw-r--r--tools/libvchan/io.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/tools/libvchan/io.c b/tools/libvchan/io.c
index 7023add0ec..1b2e94a6e7 100644
--- a/tools/libvchan/io.c
+++ b/tools/libvchan/io.c
@@ -55,9 +55,6 @@
#define VCHAN_DEBUG 0
#endif
-#define barrier() asm volatile("" ::: "memory")
-
-
static inline uint32_t rd_prod(struct libxenvchan *ctrl)
{
return ctrl->read.shr->prod;
@@ -104,12 +101,15 @@ static inline void request_notify(struct libxenvchan *ctrl, uint8_t bit)
{
uint8_t *notify = ctrl->is_server ? &ctrl->ring->cli_notify : &ctrl->ring->srv_notify;
__sync_or_and_fetch(notify, bit);
+ xen_mb(); /* post the request /before/ caller re-reads any indexes */
}
static inline int send_notify(struct libxenvchan *ctrl, uint8_t bit)
{
- uint8_t *notify = ctrl->is_server ? &ctrl->ring->srv_notify : &ctrl->ring->cli_notify;
- uint8_t prev = __sync_fetch_and_and(notify, ~bit);
+ uint8_t *notify, prev;
+ xen_mb(); /* caller updates indexes /before/ we decode to notify */
+ notify = ctrl->is_server ? &ctrl->ring->srv_notify : &ctrl->ring->cli_notify;
+ prev = __sync_fetch_and_and(notify, ~bit);
if (prev & bit)
return xc_evtchn_notify(ctrl->event, ctrl->event_port);
else
@@ -197,15 +197,15 @@ static int do_send(struct libxenvchan *ctrl, const void *data, size_t size)
}
if (avail_contig > size)
avail_contig = size;
+ xen_mb(); /* read indexes /then/ write data */
memcpy(wr_ring(ctrl) + real_idx, data, avail_contig);
if (avail_contig < size)
{
// we rolled across the end of the ring
memcpy(wr_ring(ctrl), data + avail_contig, size - avail_contig);
}
- barrier(); // data must be in the ring prior to increment
+ xen_wmb(); /* write data /then/ notify */
wr_prod(ctrl) += size;
- barrier(); // increment must happen prior to notify
if (send_notify(ctrl, VCHAN_NOTIFY_WRITE))
return -1;
return size;
@@ -268,13 +268,14 @@ static int do_recv(struct libxenvchan *ctrl, void *data, size_t size)
int avail_contig = rd_ring_size(ctrl) - real_idx;
if (avail_contig > size)
avail_contig = size;
- barrier(); // data read must happen after rd_cons read
+ xen_rmb(); /* data read must happen /after/ rd_cons read */
memcpy(data, rd_ring(ctrl) + real_idx, avail_contig);
if (avail_contig < size)
{
// we rolled across the end of the ring
memcpy(data + avail_contig, rd_ring(ctrl), size - avail_contig);
}
+ xen_mb(); /* consume /then/ notify */
rd_cons(ctrl) += size;
if (VCHAN_DEBUG) {
char metainfo[32];
@@ -285,7 +286,6 @@ static int do_recv(struct libxenvchan *ctrl, void *data, size_t size)
iov[1].iov_len = size;
writev(-1, iov, 2);
}
- barrier(); // consumption must happen prior to notify of newly freed space
if (send_notify(ctrl, VCHAN_NOTIFY_READ))
return -1;
return size;