aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/domain.c12
-rw-r--r--xen/arch/x86/hvm/hvm.c18
-rw-r--r--xen/include/asm-x86/hvm/guest_access.h3
-rw-r--r--xen/include/asm-x86/hvm/vcpu.h3
-rw-r--r--xen/include/asm-x86/hypercall.h9
5 files changed, 15 insertions, 30 deletions
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index d50cbac751..4c7440c0e7 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1553,8 +1553,6 @@ void sync_vcpu_execstate(struct vcpu *v)
__arg; \
})
-DEFINE_PER_CPU(char, hc_preempted);
-
unsigned long hypercall_create_continuation(
unsigned int op, const char *format, ...)
{
@@ -1583,12 +1581,12 @@ unsigned long hypercall_create_continuation(
{
regs = guest_cpu_user_regs();
regs->eax = op;
- /*
- * For PV guest, we update EIP to re-execute 'syscall' / 'int 0x82';
- * HVM does not need this since 'vmcall' / 'vmmcall' is fault-like.
- */
+
+ /* Ensure the hypercall trap instruction is re-executed. */
if ( !is_hvm_vcpu(current) )
regs->eip -= 2; /* re-execute 'syscall' / 'int 0x82' */
+ else
+ current->arch.hvm_vcpu.hcall_preempted = 1;
#ifdef __x86_64__
if ( !is_hvm_vcpu(current) ?
@@ -1629,8 +1627,6 @@ unsigned long hypercall_create_continuation(
}
}
}
-
- this_cpu(hc_preempted) = 1;
}
va_end(args);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 966ec831d2..ba0481a133 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2028,16 +2028,13 @@ enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
PFEC_page_present | pfec);
}
-#ifdef __x86_64__
-DEFINE_PER_CPU(bool_t, hvm_64bit_hcall);
-#endif
-
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
{
int rc;
#ifdef __x86_64__
- if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(to, len) )
+ if ( !current->arch.hvm_vcpu.hcall_64bit &&
+ is_compat_arg_xlat_range(to, len) )
{
memcpy(to, from, len);
return 0;
@@ -2054,7 +2051,8 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
int rc;
#ifdef __x86_64__
- if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(from, len) )
+ if ( !current->arch.hvm_vcpu.hcall_64bit &&
+ is_compat_arg_xlat_range(from, len) )
{
memcpy(to, from, len);
return 0;
@@ -2567,7 +2565,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
return HVM_HCALL_completed;
}
- this_cpu(hc_preempted) = 0;
+ curr->arch.hvm_vcpu.hcall_preempted = 0;
#ifdef __x86_64__
if ( mode == 8 )
@@ -2575,13 +2573,13 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx)", eax,
regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8);
- this_cpu(hvm_64bit_hcall) = 1;
+ curr->arch.hvm_vcpu.hcall_64bit = 1;
regs->rax = hvm_hypercall64_table[eax](regs->rdi,
regs->rsi,
regs->rdx,
regs->r10,
regs->r8);
- this_cpu(hvm_64bit_hcall) = 0;
+ curr->arch.hvm_vcpu.hcall_64bit = 0;
}
else
#endif
@@ -2601,7 +2599,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u -> %lx",
eax, (unsigned long)regs->eax);
- if ( this_cpu(hc_preempted) )
+ if ( curr->arch.hvm_vcpu.hcall_preempted )
return HVM_HCALL_preempted;
if ( unlikely(curr->domain->arch.hvm_domain.qemu_mapcache_invalidate) &&
diff --git a/xen/include/asm-x86/hvm/guest_access.h b/xen/include/asm-x86/hvm/guest_access.h
index f401ac9d8d..7a89e81536 100644
--- a/xen/include/asm-x86/hvm/guest_access.h
+++ b/xen/include/asm-x86/hvm/guest_access.h
@@ -1,9 +1,6 @@
#ifndef __ASM_X86_HVM_GUEST_ACCESS_H__
#define __ASM_X86_HVM_GUEST_ACCESS_H__
-#include <xen/percpu.h>
-DECLARE_PER_CPU(bool_t, hvm_64bit_hcall);
-
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len);
unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 1d72ecfc60..682027fffa 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -63,6 +63,9 @@ struct hvm_vcpu {
bool_t debug_state_latch;
bool_t single_step;
+ bool_t hcall_preempted;
+ bool_t hcall_64bit;
+
u64 asid_generation;
u32 asid;
diff --git a/xen/include/asm-x86/hypercall.h b/xen/include/asm-x86/hypercall.h
index 86377862f1..a6615e895e 100644
--- a/xen/include/asm-x86/hypercall.h
+++ b/xen/include/asm-x86/hypercall.h
@@ -17,15 +17,6 @@
*/
#define MMU_UPDATE_PREEMPTED (~(~0U>>1))
-/*
- * This gets set to a non-zero value whenever hypercall_create_continuation()
- * is used (outside of multicall context; in multicall context the second call
- * from do_multicall() itself will have this effect). Internal callers of
- * hypercall handlers interested in this condition must clear the flag prior
- * to invoking the respective handler(s).
- */
-DECLARE_PER_CPU(char, hc_preempted);
-
extern long
do_event_channel_op_compat(
XEN_GUEST_HANDLE(evtchn_op_t) uop);