aboutsummaryrefslogtreecommitdiffstats
path: root/xen/common
diff options
context:
space:
mode:
Diffstat (limited to 'xen/common')
-rw-r--r--xen/common/Makefile1
-rw-r--r--xen/common/event_2l.c99
-rw-r--r--xen/common/event_channel.c87
-rw-r--r--xen/common/schedule.c3
4 files changed, 135 insertions, 55 deletions
diff --git a/xen/common/Makefile b/xen/common/Makefile
index fcb4a84e7c..f6c473a44a 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -5,6 +5,7 @@ obj-y += cpupool.o
obj-$(HAS_DEVICE_TREE) += device_tree.o
obj-y += domctl.o
obj-y += domain.o
+obj-y += event_2l.o
obj-y += event_channel.o
obj-y += grant_table.o
obj-y += irq.o
diff --git a/xen/common/event_2l.c b/xen/common/event_2l.c
new file mode 100644
index 0000000000..7b28942933
--- /dev/null
+++ b/xen/common/event_2l.c
@@ -0,0 +1,99 @@
+/*
+ * Event channel port operations.
+ *
+ * Copyright (c) 2003-2006, K A Fraser.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2 or later. See the file COPYING for more details.
+ */
+
+#include <xen/config.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/event.h>
+
+static void evtchn_2l_set_pending(struct vcpu *v, struct evtchn *evtchn)
+{
+ struct domain *d = v->domain;
+ unsigned int port = evtchn->port;
+
+ /*
+ * The following bit operations must happen in strict order.
+ * NB. On x86, the atomic bit operations also act as memory barriers.
+ * There is therefore sufficiently strict ordering for this architecture --
+ * others may require explicit memory barriers.
+ */
+
+ if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
+ return;
+
+ if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
+ !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
+ &vcpu_info(v, evtchn_pending_sel)) )
+ {
+ vcpu_mark_events_pending(v);
+ }
+
+ evtchn_check_pollers(d, port);
+}
+
+static void evtchn_2l_clear_pending(struct domain *d, struct evtchn *evtchn)
+{
+ clear_bit(evtchn->port, &shared_info(d, evtchn_pending));
+}
+
+static void evtchn_2l_unmask(struct domain *d, struct evtchn *evtchn)
+{
+ struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];
+ unsigned int port = evtchn->port;
+
+ /*
+ * These operations must happen in strict order. Based on
+ * evtchn_2l_set_pending() above.
+ */
+ if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
+ test_bit (port, &shared_info(d, evtchn_pending)) &&
+ !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
+ &vcpu_info(v, evtchn_pending_sel)) )
+ {
+ vcpu_mark_events_pending(v);
+ }
+}
+
+static bool_t evtchn_2l_is_pending(struct domain *d,
+ const struct evtchn *evtchn)
+{
+ return test_bit(evtchn->port, &shared_info(d, evtchn_pending));
+}
+
+static bool_t evtchn_2l_is_masked(struct domain *d,
+ const struct evtchn *evtchn)
+{
+ return test_bit(evtchn->port, &shared_info(d, evtchn_mask));
+}
+
+static const struct evtchn_port_ops evtchn_port_ops_2l =
+{
+ .set_pending = evtchn_2l_set_pending,
+ .clear_pending = evtchn_2l_clear_pending,
+ .unmask = evtchn_2l_unmask,
+ .is_pending = evtchn_2l_is_pending,
+ .is_masked = evtchn_2l_is_masked,
+};
+
+void evtchn_2l_init(struct domain *d)
+{
+ d->evtchn_port_ops = &evtchn_port_ops_2l;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 64c976b20a..7290a21dc0 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -150,6 +150,7 @@ static int get_free_port(struct domain *d)
xfree(chn);
return -ENOMEM;
}
+ chn[i].port = port + i;
}
bucket_from_port(d, port) = chn;
@@ -530,7 +531,7 @@ static long __evtchn_close(struct domain *d1, int port1)
}
/* Clear pending event to avoid unexpected behavior on re-bind. */
- clear_bit(port1, &shared_info(d1, evtchn_pending));
+ evtchn_port_clear_pending(d1, chn1);
/* Reset binding to vcpu0 when the channel is freed. */
chn1->state = ECS_FREE;
@@ -615,43 +616,7 @@ out:
static void evtchn_set_pending(struct vcpu *v, int port)
{
- struct domain *d = v->domain;
- int vcpuid;
-
- /*
- * The following bit operations must happen in strict order.
- * NB. On x86, the atomic bit operations also act as memory barriers.
- * There is therefore sufficiently strict ordering for this architecture --
- * others may require explicit memory barriers.
- */
-
- if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
- return;
-
- if ( !test_bit (port, &shared_info(d, evtchn_mask)) &&
- !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
- &vcpu_info(v, evtchn_pending_sel)) )
- {
- vcpu_mark_events_pending(v);
- }
-
- /* Check if some VCPU might be polling for this event. */
- if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
- return;
-
- /* Wake any interested (or potentially interested) pollers. */
- for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
- vcpuid < d->max_vcpus;
- vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
- {
- v = d->vcpu[vcpuid];
- if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
- test_and_clear_bit(vcpuid, d->poll_mask) )
- {
- v->poll_evtchn = 0;
- vcpu_unblock(v);
- }
- }
+ evtchn_port_set_pending(v, evtchn_from_port(v->domain, port));
}
int guest_enabled_event(struct vcpu *v, uint32_t virq)
@@ -920,26 +885,15 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
int evtchn_unmask(unsigned int port)
{
struct domain *d = current->domain;
- struct vcpu *v;
+ struct evtchn *evtchn;
ASSERT(spin_is_locked(&d->event_lock));
if ( unlikely(!port_is_valid(d, port)) )
return -EINVAL;
- v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];
-
- /*
- * These operations must happen in strict order. Based on
- * include/xen/event.h:evtchn_set_pending().
- */
- if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
- test_bit (port, &shared_info(d, evtchn_pending)) &&
- !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
- &vcpu_info(v, evtchn_pending_sel)) )
- {
- vcpu_mark_events_pending(v);
- }
+ evtchn = evtchn_from_port(d, port);
+ evtchn_port_unmask(d, evtchn);
return 0;
}
@@ -1170,9 +1124,34 @@ void notify_via_xen_event_channel(struct domain *ld, int lport)
spin_unlock(&ld->event_lock);
}
+void evtchn_check_pollers(struct domain *d, unsigned int port)
+{
+ struct vcpu *v;
+ unsigned int vcpuid;
+
+ /* Check if some VCPU might be polling for this event. */
+ if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
+ return;
+
+ /* Wake any interested (or potentially interested) pollers. */
+ for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
+ vcpuid < d->max_vcpus;
+ vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
+ {
+ v = d->vcpu[vcpuid];
+ if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
+ test_and_clear_bit(vcpuid, d->poll_mask) )
+ {
+ v->poll_evtchn = 0;
+ vcpu_unblock(v);
+ }
+ }
+}
int evtchn_init(struct domain *d)
{
+ evtchn_2l_init(d);
+
spin_lock_init(&d->event_lock);
if ( get_free_port(d) != 0 )
return -EINVAL;
@@ -1270,8 +1249,8 @@ static void domain_dump_evtchn_info(struct domain *d)
printk(" %4u [%d/%d]: s=%d n=%d x=%d",
port,
- !!test_bit(port, &shared_info(d, evtchn_pending)),
- !!test_bit(port, &shared_info(d, evtchn_mask)),
+ !!evtchn_port_is_pending(d, chn),
+ !!evtchn_port_is_masked(d, chn),
chn->state, chn->notify_vcpu_id, chn->xen_consumer);
switch ( chn->state )
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index af3abc2f16..b8e4cb4417 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -34,6 +34,7 @@
#include <xen/multicall.h>
#include <xen/cpu.h>
#include <xen/preempt.h>
+#include <xen/event.h>
#include <public/sched.h>
#include <xsm/xsm.h>
@@ -759,7 +760,7 @@ static long do_poll(struct sched_poll *sched_poll)
goto out;
rc = 0;
- if ( test_bit(port, &shared_info(d, evtchn_pending)) )
+ if ( evtchn_port_is_pending(d, evtchn_from_port(d, port)) )
goto out;
}