aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIan Campbell <ian.campbell@citrix.com>2012-07-25 17:39:21 +0100
committerIan Campbell <ian.campbell@citrix.com>2012-07-25 17:39:21 +0100
commit4147c74e872432b7b41403ea4e9789532953b84d (patch)
treecf43cfab3c14988ce4b991ddaf443965e3b48723
parentb905f2f14b31bc9a060a7aea712056b73b17c036 (diff)
downloadxen-4147c74e872432b7b41403ea4e9789532953b84d.tar.gz
xen-4147c74e872432b7b41403ea4e9789532953b84d.tar.bz2
xen-4147c74e872432b7b41403ea4e9789532953b84d.zip
arm: implement hypercall continuations
Largely cribbed from x86, register names differ and the return value is r0 == the first argument rather than the hypercall number (which is r12). We must only clobber the hypercall arguments if PC has not been changed since continuations rely on them. Multicall variant is untested, On ARM do_multicall_call is currently a BUG() so we obviously don't use that yet. I have left a BUG in the hypercall continuation path too since it will need validation once multicalls are implemented. Since the multicall state is local we do not need a globally atomic {test,set}_bit. However we do need to be atomic WRT interrupts so can't just use the naive RMW version. Stick with the global atomic implementation for now but keep the __ as documentaion of the intention. Signed-off-by: Ian Campbell <Ian.Campbell@citrix.com> Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Committed-by: Ian Campbell <ian.campbell@citrix.com>
-rw-r--r--xen/arch/arm/domain.c87
-rw-r--r--xen/arch/arm/dummy.S1
-rw-r--r--xen/arch/arm/traps.c29
-rw-r--r--xen/include/asm-arm/bitops.h9
4 files changed, 115 insertions, 11 deletions
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index ac6a30dfde..57d8746fa3 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -5,6 +5,7 @@
#include <xen/softirq.h>
#include <xen/wait.h>
#include <xen/errno.h>
+#include <xen/bitops.h>
#include <asm/current.h>
#include <asm/regs.h>
@@ -224,6 +225,92 @@ void sync_vcpu_execstate(struct vcpu *v)
/* Nothing to do -- no lazy switching */
}
+#define next_arg(fmt, args) ({ \
+ unsigned long __arg; \
+ switch ( *(fmt)++ ) \
+ { \
+ case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
+ case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
+ case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
+ default: __arg = 0; BUG(); \
+ } \
+ __arg; \
+})
+
+void hypercall_cancel_continuation(void)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct mc_state *mcs = &current->mc_state;
+
+ if ( test_bit(_MCSF_in_multicall, &mcs->flags) )
+ {
+ __clear_bit(_MCSF_call_preempted, &mcs->flags);
+ }
+ else
+ {
+ regs->pc += 4; /* undo re-execute 'hvc #XEN_HYPERCALL_TAG' */
+ }
+}
+
+unsigned long hypercall_create_continuation(
+ unsigned int op, const char *format, ...)
+{
+ struct mc_state *mcs = &current->mc_state;
+ struct cpu_user_regs *regs;
+ const char *p = format;
+ unsigned long arg, rc;
+ unsigned int i;
+ va_list args;
+
+ /* All hypercalls take at least one argument */
+ BUG_ON( !p || *p == '\0' );
+
+ va_start(args, format);
+
+ if ( test_bit(_MCSF_in_multicall, &mcs->flags) )
+ {
+ BUG(); /* XXX multicalls not implemented yet. */
+
+ __set_bit(_MCSF_call_preempted, &mcs->flags);
+
+ for ( i = 0; *p != '\0'; i++ )
+ mcs->call.args[i] = next_arg(p, args);
+
+ /* Return value gets written back to mcs->call.result */
+ rc = mcs->call.result;
+ }
+ else
+ {
+ regs = guest_cpu_user_regs();
+ regs->r12 = op;
+
+ /* Ensure the hypercall trap instruction is re-executed. */
+ regs->pc -= 4; /* re-execute 'hvc #XEN_HYPERCALL_TAG' */
+
+ for ( i = 0; *p != '\0'; i++ )
+ {
+ arg = next_arg(p, args);
+
+ switch ( i )
+ {
+ case 0: regs->r0 = arg; break;
+ case 1: regs->r1 = arg; break;
+ case 2: regs->r2 = arg; break;
+ case 3: regs->r3 = arg; break;
+ case 4: regs->r4 = arg; break;
+ case 5: regs->r5 = arg; break;
+ }
+ }
+
+ /* Return value gets written back to r0 */
+ rc = regs->r0;
+ }
+
+ va_end(args);
+
+ return rc;
+}
+
void startup_cpu_idle_loop(void)
{
struct vcpu *v = current;
diff --git a/xen/arch/arm/dummy.S b/xen/arch/arm/dummy.S
index cab952233b..5406077232 100644
--- a/xen/arch/arm/dummy.S
+++ b/xen/arch/arm/dummy.S
@@ -46,7 +46,6 @@ DUMMY(domain_relinquish_resources);
DUMMY(domain_set_time_offset);
DUMMY(dom_cow);
DUMMY(gmfn_to_mfn);
-DUMMY(hypercall_create_continuation);
DUMMY(send_timer_event);
DUMMY(share_xen_page_with_privileged_guests);
DUMMY(wallclock_time);
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index d2adf4fee1..e4bed69291 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -470,6 +470,9 @@ static void do_debug_trap(struct cpu_user_regs *regs, unsigned int code)
static void do_trap_hypercall(struct cpu_user_regs *regs, unsigned long iss)
{
arm_hypercall_fn_t call = NULL;
+#ifndef NDEBUG
+ uint32_t orig_pc = regs->pc;
+#endif
if ( iss != XEN_HYPERCALL_TAG )
{
@@ -495,17 +498,23 @@ static void do_trap_hypercall(struct cpu_user_regs *regs, unsigned long iss)
regs->r0 = call(regs->r0, regs->r1, regs->r2, regs->r3, regs->r4);
#ifndef NDEBUG
- /* Clobber argument registers */
- switch ( arm_hypercall_table[regs->r12].nr_args ) {
- case 5: regs->r4 = 0xDEADBEEF;
- case 4: regs->r3 = 0xDEADBEEF;
- case 3: regs->r2 = 0xDEADBEEF;
- case 2: regs->r1 = 0xDEADBEEF;
- case 1: /* Don't clobber r0 -- it's the return value */
- break;
- default: BUG();
+ /*
+ * Clobber argument registers only if pc is unchanged, otherwise
+ * this is a hypercall continuation.
+ */
+ if ( orig_pc == regs->pc )
+ {
+ switch ( arm_hypercall_table[regs->r12].nr_args ) {
+ case 5: regs->r4 = 0xDEADBEEF;
+ case 4: regs->r3 = 0xDEADBEEF;
+ case 3: regs->r2 = 0xDEADBEEF;
+ case 2: regs->r1 = 0xDEADBEEF;
+ case 1: /* Don't clobber r0 -- it's the return value */
+ break;
+ default: BUG();
+ }
+ regs->r12 = 0xDEADBEEF;
}
- regs->r12 = 0xDEADBEEF;
#endif
}
diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
index e5c1781506..87de5db167 100644
--- a/xen/include/asm-arm/bitops.h
+++ b/xen/include/asm-arm/bitops.h
@@ -23,6 +23,15 @@ extern int _test_and_change_bit(int nr, volatile void * p);
#define test_and_clear_bit(n,p) _test_and_clear_bit(n,p)
#define test_and_change_bit(n,p) _test_and_change_bit(n,p)
+/*
+ * Non-atomic bit manipulation.
+ *
+ * Implemented using atomics to be interrupt safe. Could alternatively
+ * implement with local interrupt masking.
+ */
+#define __set_bit(n,p) _set_bit(n,p)
+#define __clear_bit(n,p) _clear_bit(n,p)
+
#define BIT(nr) (1UL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)