aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/emulate.c6
-rw-r--r--xen/arch/x86/hvm/hvm.c44
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c42
-rw-r--r--xen/include/asm-x86/hvm/io.h3
-rw-r--r--xen/include/asm-x86/hvm/support.h19
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmx.h135
6 files changed, 106 insertions, 143 deletions
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 719dd598bb..57065f7625 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -522,12 +522,6 @@ static int hvmemul_write_io(
unsigned long val,
struct x86_emulate_ctxt *ctxt)
{
- if ( port == 0xe9 )
- {
- hvm_print_line(current, val);
- return X86EMUL_OKAY;
- }
-
return hvmemul_do_pio(port, 1, bytes, val, IOREQ_WRITE, 0, 0, NULL);
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 61f32f1329..00f6adb5c7 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -255,6 +255,30 @@ static int hvm_set_ioreq_page(
return 0;
}
+static int hvm_print_line(
+ int dir, uint32_t port, uint32_t bytes, uint32_t *val)
+{
+ struct vcpu *curr = current;
+ struct hvm_domain *hd = &curr->domain->arch.hvm_domain;
+ char c = *val;
+
+ BUG_ON(bytes != 1);
+
+ spin_lock(&hd->pbuf_lock);
+ hd->pbuf[hd->pbuf_idx++] = c;
+ if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
+ {
+ if ( c != '\n' )
+ hd->pbuf[hd->pbuf_idx++] = '\n';
+ hd->pbuf[hd->pbuf_idx] = '\0';
+ printk(XENLOG_G_DEBUG "HVM%u: %s", curr->domain->domain_id, hd->pbuf);
+ hd->pbuf_idx = 0;
+ }
+ spin_unlock(&hd->pbuf_lock);
+
+ return 1;
+}
+
int hvm_domain_initialise(struct domain *d)
{
int rc;
@@ -289,6 +313,8 @@ int hvm_domain_initialise(struct domain *d)
hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
+ register_portio_handler(d, 0xe9, 1, hvm_print_line);
+
rc = hvm_funcs.domain_initialise(d);
if ( rc != 0 )
goto fail2;
@@ -1579,24 +1605,6 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
return rc ? len : 0; /* fake a copy_from_user() return code */
}
-/* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
-void hvm_print_line(struct vcpu *v, const char c)
-{
- struct hvm_domain *hd = &v->domain->arch.hvm_domain;
-
- spin_lock(&hd->pbuf_lock);
- hd->pbuf[hd->pbuf_idx++] = c;
- if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
- {
- if ( c != '\n' )
- hd->pbuf[hd->pbuf_idx++] = '\n';
- hd->pbuf[hd->pbuf_idx] = '\0';
- printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
- hd->pbuf_idx = 0;
- }
- spin_unlock(&hd->pbuf_lock);
-}
-
#define bitmaskof(idx) (1U << ((idx) & 31))
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index efda084fa4..29dcb68503 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1240,23 +1240,6 @@ static void vmx_do_cpuid(struct cpu_user_regs *regs)
regs->edx = edx;
}
-#define CASE_GET_REG_P(REG, reg) \
- case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
-
-#ifdef __i386__
-#define CASE_EXTEND_GET_REG_P
-#else
-#define CASE_EXTEND_GET_REG_P \
- CASE_GET_REG_P(R8, r8); \
- CASE_GET_REG_P(R9, r9); \
- CASE_GET_REG_P(R10, r10); \
- CASE_GET_REG_P(R11, r11); \
- CASE_GET_REG_P(R12, r12); \
- CASE_GET_REG_P(R13, r13); \
- CASE_GET_REG_P(R14, r14); \
- CASE_GET_REG_P(R15, r15)
-#endif
-
static void vmx_dr_access(unsigned long exit_qualification,
struct cpu_user_regs *regs)
{
@@ -1280,9 +1263,9 @@ static void vmx_invlpg_intercept(unsigned long vaddr)
}
#define CASE_SET_REG(REG, reg) \
- case REG_ ## REG: regs->reg = value; break
+ case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break
#define CASE_GET_REG(REG, reg) \
- case REG_ ## REG: value = regs->reg; break
+ case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break
#define CASE_EXTEND_SET_REG \
CASE_EXTEND_REG(S)
@@ -1408,26 +1391,25 @@ static int vmx_cr_access(unsigned long exit_qualification,
unsigned long value;
struct vcpu *v = current;
- switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE )
+ switch ( exit_qualification & VMX_CONTROL_REG_ACCESS_TYPE )
{
- case TYPE_MOV_TO_CR:
- gp = exit_qualification & CONTROL_REG_ACCESS_REG;
- cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
+ case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
+ gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
+ cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
return mov_to_cr(gp, cr, regs);
- case TYPE_MOV_FROM_CR:
- gp = exit_qualification & CONTROL_REG_ACCESS_REG;
- cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
+ case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
+ gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
+ cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
mov_from_cr(cr, gp, regs);
break;
- case TYPE_CLTS:
+ case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
vmx_update_guest_cr(v, 0);
HVMTRACE_0D(CLTS, current);
break;
- case TYPE_LMSW:
+ case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
value = v->arch.hvm_vcpu.guest_cr[0];
- value = (value & ~0xF) |
- (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
+ value = (value & ~0xFFFF) | ((exit_qualification >> 16) & 0xFFFF);
HVMTRACE_1D(LMSW, current, value);
return !hvm_set_cr0(value);
default:
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index 535f909d5a..9ef4f645a1 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -25,10 +25,9 @@
#include <public/hvm/ioreq.h>
#include <public/event_channel.h>
-#define MAX_IO_HANDLER 12
+#define MAX_IO_HANDLER 16
#define HVM_PORTIO 0
-#define HVM_MMIO 1
#define HVM_BUFFERED_IO 2
typedef unsigned long (*hvm_mmio_read_t)(struct vcpu *v,
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index f5a174c14c..2ec00f2fb0 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -27,12 +27,6 @@
#include <asm/regs.h>
#include <asm/processor.h>
-#ifndef NDEBUG
-#define HVM_DEBUG 1
-#else
-#define HVM_DEBUG 1
-#endif
-
static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
{
struct domain *d = v->domain;
@@ -42,17 +36,9 @@ static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
return &p->vcpu_iodata[v->vcpu_id];
}
-/* XXX these are really VMX specific */
-#define TYPE_MOV_TO_DR (0 << 4)
-#define TYPE_MOV_FROM_DR (1 << 4)
-#define TYPE_MOV_TO_CR (0 << 4)
-#define TYPE_MOV_FROM_CR (1 << 4)
-#define TYPE_CLTS (2 << 4)
-#define TYPE_LMSW (3 << 4)
-
#define HVM_DELIVER_NO_ERROR_CODE -1
-#if HVM_DEBUG
+#ifndef NDEBUG
#define DBG_LEVEL_0 (1 << 0)
#define DBG_LEVEL_1 (1 << 1)
#define DBG_LEVEL_2 (1 << 2)
@@ -131,9 +117,6 @@ enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
void *buf, unsigned long vaddr, int size, uint32_t pfec);
-void hvm_print_line(struct vcpu *v, const char c);
-void hlt_timer_fn(void *data);
-
#define HVM_HCALL_completed 0 /* hypercall completed - no further action */
#define HVM_HCALL_preempted 1 /* hypercall preempted - re-execute VMCALL */
#define HVM_HCALL_invalidate 2 /* invalidate ioemu-dm memory cache */
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index acf523cca4..1d2f37d4d1 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -95,35 +95,32 @@ void vmx_realmode(struct cpu_user_regs *regs);
/*
* Exit Qualifications for MOV for Control Register Access
*/
-#define CONTROL_REG_ACCESS_NUM 0xf /* 3:0, number of control register */
-#define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
-#define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
-#define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
-#define REG_EAX (0 << 8)
-#define REG_ECX (1 << 8)
-#define REG_EDX (2 << 8)
-#define REG_EBX (3 << 8)
-#define REG_ESP (4 << 8)
-#define REG_EBP (5 << 8)
-#define REG_ESI (6 << 8)
-#define REG_EDI (7 << 8)
-#define REG_R8 (8 << 8)
-#define REG_R9 (9 << 8)
-#define REG_R10 (10 << 8)
-#define REG_R11 (11 << 8)
-#define REG_R12 (12 << 8)
-#define REG_R13 (13 << 8)
-#define REG_R14 (14 << 8)
-#define REG_R15 (15 << 8)
-
-/*
- * Exit Qualifications for MOV for Debug Register Access
- */
-#define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug register */
-#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
-#define TYPE_MOV_TO_DR (0 << 4)
-#define TYPE_MOV_FROM_DR (1 << 4)
-#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
+ /* 3:0 - control register number (CRn) */
+#define VMX_CONTROL_REG_ACCESS_NUM 0xf
+ /* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
+#define VMX_CONTROL_REG_ACCESS_TYPE 0x30
+ /* 10:8 - general purpose register operand */
+#define VMX_CONTROL_REG_ACCESS_GPR 0xf00
+#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR (0 << 4)
+#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR (1 << 4)
+#define VMX_CONTROL_REG_ACCESS_TYPE_CLTS (2 << 4)
+#define VMX_CONTROL_REG_ACCESS_TYPE_LMSW (3 << 4)
+#define VMX_CONTROL_REG_ACCESS_GPR_EAX (0 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_ECX (1 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_EDX (2 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_EBX (3 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_ESP (4 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_EBP (5 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_ESI (6 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_EDI (7 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R8 (8 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R9 (9 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R10 (10 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R11 (11 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R12 (12 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R13 (13 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R14 (14 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R15 (15 << 8)
/*
* Access Rights
@@ -155,72 +152,72 @@ void vmx_realmode(struct cpu_user_regs *regs);
static inline void __vmptrld(u64 addr)
{
- __asm__ __volatile__ ( VMPTRLD_OPCODE
- MODRM_EAX_06
- /* CF==1 or ZF==1 --> crash (ud2) */
- "ja 1f ; ud2 ; 1:\n"
- :
- : "a" (&addr)
- : "memory");
+ asm volatile ( VMPTRLD_OPCODE
+ MODRM_EAX_06
+ /* CF==1 or ZF==1 --> crash (ud2) */
+ "ja 1f ; ud2 ; 1:\n"
+ :
+ : "a" (&addr)
+ : "memory");
}
static inline void __vmptrst(u64 addr)
{
- __asm__ __volatile__ ( VMPTRST_OPCODE
- MODRM_EAX_07
- :
- : "a" (&addr)
- : "memory");
+ asm volatile ( VMPTRST_OPCODE
+ MODRM_EAX_07
+ :
+ : "a" (&addr)
+ : "memory");
}
static inline void __vmpclear(u64 addr)
{
- __asm__ __volatile__ ( VMCLEAR_OPCODE
- MODRM_EAX_06
- /* CF==1 or ZF==1 --> crash (ud2) */
- "ja 1f ; ud2 ; 1:\n"
- :
- : "a" (&addr)
- : "memory");
+ asm volatile ( VMCLEAR_OPCODE
+ MODRM_EAX_06
+ /* CF==1 or ZF==1 --> crash (ud2) */
+ "ja 1f ; ud2 ; 1:\n"
+ :
+ : "a" (&addr)
+ : "memory");
}
static inline unsigned long __vmread(unsigned long field)
{
unsigned long ecx;
- __asm__ __volatile__ ( VMREAD_OPCODE
- MODRM_EAX_ECX
- /* CF==1 or ZF==1 --> crash (ud2) */
- "ja 1f ; ud2 ; 1:\n"
- : "=c" (ecx)
- : "a" (field)
- : "memory");
+ asm volatile ( VMREAD_OPCODE
+ MODRM_EAX_ECX
+ /* CF==1 or ZF==1 --> crash (ud2) */
+ "ja 1f ; ud2 ; 1:\n"
+ : "=c" (ecx)
+ : "a" (field)
+ : "memory");
return ecx;
}
static inline void __vmwrite(unsigned long field, unsigned long value)
{
- __asm__ __volatile__ ( VMWRITE_OPCODE
- MODRM_EAX_ECX
- /* CF==1 or ZF==1 --> crash (ud2) */
- "ja 1f ; ud2 ; 1:\n"
- :
- : "a" (field) , "c" (value)
- : "memory");
+ asm volatile ( VMWRITE_OPCODE
+ MODRM_EAX_ECX
+ /* CF==1 or ZF==1 --> crash (ud2) */
+ "ja 1f ; ud2 ; 1:\n"
+ :
+ : "a" (field) , "c" (value)
+ : "memory");
}
static inline unsigned long __vmread_safe(unsigned long field, int *error)
{
unsigned long ecx;
- __asm__ __volatile__ ( VMREAD_OPCODE
- MODRM_EAX_ECX
- /* CF==1 or ZF==1 --> rc = -1 */
- "setna %b0 ; neg %0"
- : "=q" (*error), "=c" (ecx)
- : "0" (0), "a" (field)
- : "memory");
+ asm volatile ( VMREAD_OPCODE
+ MODRM_EAX_ECX
+ /* CF==1 or ZF==1 --> rc = -1 */
+ "setna %b0 ; neg %0"
+ : "=q" (*error), "=c" (ecx)
+ : "0" (0), "a" (field)
+ : "memory");
return ecx;
}