aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-10-07 23:17:24 +0100
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-10-07 23:17:24 +0100
commit3bd6068aaeae76a6538b6875362547919b64efb8 (patch)
treeb5ff0bee20c1210fd0892a9965f23bdbfe999b94
parent5b013ee25684e0d3fb155123bd8ae33b8d465d60 (diff)
downloadxen-3bd6068aaeae76a6538b6875362547919b64efb8.tar.gz
xen-3bd6068aaeae76a6538b6875362547919b64efb8.tar.bz2
xen-3bd6068aaeae76a6538b6875362547919b64efb8.zip
Move mmio operation structure from domain to vcpu.
Also do some cleanup. Signed-off-by: Xin Li <xin.b.li@intel.com> Signed-off-by: Nakajima Jun <nakajima.jun@intel.com>
-rw-r--r--xen/arch/x86/vmx.c14
-rw-r--r--xen/arch/x86/vmx_io.c118
-rw-r--r--xen/arch/x86/vmx_platform.c118
-rw-r--r--xen/include/asm-x86/vmx_platform.h9
-rw-r--r--xen/include/asm-x86/vmx_vmcs.h11
5 files changed, 135 insertions, 135 deletions
diff --git a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
index 2862558ded..0b81d09f3c 100644
--- a/xen/arch/x86/vmx.c
+++ b/xen/arch/x86/vmx.c
@@ -659,14 +659,14 @@ void send_pio_req(struct cpu_user_regs *regs, unsigned long port,
static void vmx_io_instruction(struct cpu_user_regs *regs,
unsigned long exit_qualification, unsigned long inst_len)
{
- struct mi_per_cpu_info *mpcip;
+ struct mmio_op *mmio_opp;
unsigned long eip, cs, eflags;
unsigned long port, size, dir;
int vm86;
- mpcip = &current->domain->arch.vmx_platform.mpci;
- mpcip->instr = INSTR_PIO;
- mpcip->flags = 0;
+ mmio_opp = &current->arch.arch_vmx.mmio_op;
+ mmio_opp->instr = INSTR_PIO;
+ mmio_opp->flags = 0;
__vmread(GUEST_RIP, &eip);
__vmread(GUEST_CS_SELECTOR, &cs);
@@ -700,7 +700,7 @@ static void vmx_io_instruction(struct cpu_user_regs *regs,
addr = dir == IOREQ_WRITE ? regs->esi : regs->edi;
if (test_bit(5, &exit_qualification)) { /* "rep" prefix */
- mpcip->flags |= REPZ;
+ mmio_opp->flags |= REPZ;
count = vm86 ? regs->ecx & 0xFFFF : regs->ecx;
}
@@ -711,7 +711,7 @@ static void vmx_io_instruction(struct cpu_user_regs *regs,
if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
unsigned long value = 0;
- mpcip->flags |= OVERLAP;
+ mmio_opp->flags |= OVERLAP;
if (dir == IOREQ_WRITE)
vmx_copy(&value, addr, size, VMX_COPY_IN);
send_pio_req(regs, port, 1, size, value, dir, 0);
@@ -1695,7 +1695,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
(unsigned long)regs.eax, (unsigned long)regs.ebx,
(unsigned long)regs.ecx, (unsigned long)regs.edx,
(unsigned long)regs.esi, (unsigned long)regs.edi);
- v->domain->arch.vmx_platform.mpci.inst_decoder_regs = &regs;
+ v->arch.arch_vmx.mmio_op.inst_decoder_regs = &regs;
if (!(error = vmx_do_page_fault(va, &regs))) {
/*
diff --git a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c
index 4ad3ae8dc7..67cb7739f8 100644
--- a/xen/arch/x86/vmx_io.c
+++ b/xen/arch/x86/vmx_io.c
@@ -1,5 +1,5 @@
/*
- * vmx_io.c: handling I/O, interrupts related VMX entry/exit
+ * vmx_io.c: handling I/O, interrupts related VMX entry/exit
* Copyright (c) 2004, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
#ifdef CONFIG_VMX
#if defined (__i386__)
void load_cpu_user_regs(struct cpu_user_regs *regs)
-{
+{
/*
* Write the guest register value into VMCS
*/
@@ -52,7 +52,7 @@ void load_cpu_user_regs(struct cpu_user_regs *regs)
__vmwrite(GUEST_RFLAGS, regs->eflags);
if (regs->eflags & EF_TF)
__vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
- else
+ else
__vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
__vmwrite(GUEST_CS_SELECTOR, regs->cs);
@@ -189,7 +189,7 @@ void load_cpu_user_regs(struct cpu_user_regs *regs)
__vmwrite(GUEST_RFLAGS, regs->rflags);
if (regs->rflags & EF_TF)
__vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
- else
+ else
__vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
__vmwrite(GUEST_CS_SELECTOR, regs->cs);
@@ -265,52 +265,52 @@ static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *r
}
switch (index) {
- case 0:
+ case 0:
__set_reg_value(&regs->rax, size, value);
break;
- case 1:
+ case 1:
__set_reg_value(&regs->rcx, size, value);
break;
- case 2:
+ case 2:
__set_reg_value(&regs->rdx, size, value);
break;
- case 3:
+ case 3:
__set_reg_value(&regs->rbx, size, value);
break;
- case 4:
+ case 4:
__set_reg_value(&regs->rsp, size, value);
break;
- case 5:
+ case 5:
__set_reg_value(&regs->rbp, size, value);
break;
- case 6:
+ case 6:
__set_reg_value(&regs->rsi, size, value);
break;
- case 7:
+ case 7:
__set_reg_value(&regs->rdi, size, value);
break;
- case 8:
+ case 8:
__set_reg_value(&regs->r8, size, value);
break;
- case 9:
+ case 9:
__set_reg_value(&regs->r9, size, value);
break;
- case 10:
+ case 10:
__set_reg_value(&regs->r10, size, value);
break;
- case 11:
+ case 11:
__set_reg_value(&regs->r11, size, value);
break;
- case 12:
+ case 12:
__set_reg_value(&regs->r12, size, value);
break;
- case 13:
+ case 13:
__set_reg_value(&regs->r13, size, value);
break;
- case 14:
+ case 14:
__set_reg_value(&regs->r14, size, value);
break;
- case 15:
+ case 15:
__set_reg_value(&regs->r15, size, value);
break;
default:
@@ -391,7 +391,7 @@ static inline void set_eflags_PF(int size, unsigned long v1,
}
static void vmx_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
- struct mi_per_cpu_info *mpcip)
+ struct mmio_op *mmio_opp)
{
unsigned long old_eax;
int sign = p->df ? -1 : 1;
@@ -399,15 +399,15 @@ static void vmx_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
if (p->dir == IOREQ_WRITE) {
if (p->pdata_valid) {
regs->esi += sign * p->count * p->size;
- if (mpcip->flags & REPZ)
+ if (mmio_opp->flags & REPZ)
regs->ecx -= p->count;
}
} else {
- if (mpcip->flags & OVERLAP) {
+ if (mmio_opp->flags & OVERLAP) {
unsigned long addr;
regs->edi += sign * p->count * p->size;
- if (mpcip->flags & REPZ)
+ if (mmio_opp->flags & REPZ)
regs->ecx -= p->count;
addr = regs->edi;
@@ -416,7 +416,7 @@ static void vmx_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT);
} else if (p->pdata_valid) {
regs->edi += sign * p->count * p->size;
- if (mpcip->flags & REPZ)
+ if (mmio_opp->flags & REPZ)
regs->ecx -= p->count;
} else {
old_eax = regs->eax;
@@ -439,18 +439,18 @@ static void vmx_pio_assist(struct cpu_user_regs *regs, ioreq_t *p,
}
static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
- struct mi_per_cpu_info *mpcip)
+ struct mmio_op *mmio_opp)
{
int sign = p->df ? -1 : 1;
int size = -1, index = -1;
unsigned long value = 0, diff = 0;
unsigned long src, dst;
- src = mpcip->operand[0];
- dst = mpcip->operand[1];
+ src = mmio_opp->operand[0];
+ dst = mmio_opp->operand[1];
size = operand_size(src);
- switch (mpcip->instr) {
+ switch (mmio_opp->instr) {
case INSTR_MOV:
if (dst & REGISTER) {
index = operand_index(dst);
@@ -475,7 +475,7 @@ static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
regs->esi += sign * p->count * p->size;
regs->edi += sign * p->count * p->size;
- if ((mpcip->flags & OVERLAP) && p->dir == IOREQ_READ) {
+ if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) {
unsigned long addr = regs->edi;
if (sign > 0)
@@ -483,14 +483,14 @@ static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT);
}
- if (mpcip->flags & REPZ)
+ if (mmio_opp->flags & REPZ)
regs->ecx -= p->count;
break;
case INSTR_STOS:
sign = p->df ? -1 : 1;
regs->edi += sign * p->count * p->size;
- if (mpcip->flags & REPZ)
+ if (mmio_opp->flags & REPZ)
regs->ecx -= p->count;
break;
@@ -500,7 +500,7 @@ static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
value = get_reg_value(size, index, 0, regs);
diff = (unsigned long) p->u.data & value;
} else if (src & IMMEDIATE) {
- value = mpcip->immediate;
+ value = mmio_opp->immediate;
diff = (unsigned long) p->u.data & value;
} else if (src & MEMORY) {
index = operand_index(dst);
@@ -527,7 +527,7 @@ static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
value = get_reg_value(size, index, 0, regs);
diff = (unsigned long) p->u.data | value;
} else if (src & IMMEDIATE) {
- value = mpcip->immediate;
+ value = mmio_opp->immediate;
diff = (unsigned long) p->u.data | value;
} else if (src & MEMORY) {
index = operand_index(dst);
@@ -554,7 +554,7 @@ static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
value = get_reg_value(size, index, 0, regs);
diff = (unsigned long) p->u.data ^ value;
} else if (src & IMMEDIATE) {
- value = mpcip->immediate;
+ value = mmio_opp->immediate;
diff = (unsigned long) p->u.data ^ value;
} else if (src & MEMORY) {
index = operand_index(dst);
@@ -581,7 +581,7 @@ static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
value = get_reg_value(size, index, 0, regs);
diff = (unsigned long) p->u.data - value;
} else if (src & IMMEDIATE) {
- value = mpcip->immediate;
+ value = mmio_opp->immediate;
diff = (unsigned long) p->u.data - value;
} else if (src & MEMORY) {
index = operand_index(dst);
@@ -608,7 +608,7 @@ static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
index = operand_index(src);
value = get_reg_value(size, index, 0, regs);
} else if (src & IMMEDIATE) {
- value = mpcip->immediate;
+ value = mmio_opp->immediate;
} else if (src & MEMORY) {
index = operand_index(dst);
value = get_reg_value(size, index, 0, regs);
@@ -629,21 +629,21 @@ static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p,
load_cpu_user_regs(regs);
}
-void vmx_io_assist(struct vcpu *v)
+void vmx_io_assist(struct vcpu *v)
{
vcpu_iodata_t *vio;
ioreq_t *p;
struct cpu_user_regs *regs = guest_cpu_user_regs();
- struct mi_per_cpu_info *mpci_p;
+ struct mmio_op *mmio_opp;
struct cpu_user_regs *inst_decoder_regs;
- mpci_p = &v->domain->arch.vmx_platform.mpci;
- inst_decoder_regs = mpci_p->inst_decoder_regs;
+ mmio_opp = &v->arch.arch_vmx.mmio_op;
+ inst_decoder_regs = mmio_opp->inst_decoder_regs;
vio = get_vio(v->domain, v->vcpu_id);
if (vio == 0) {
- VMX_DBG_LOG(DBG_LEVEL_1,
+ VMX_DBG_LOG(DBG_LEVEL_1,
"bad shared page: %lx", (unsigned long) vio);
printf("bad shared page: %lx\n", (unsigned long) vio);
domain_crash_synchronous();
@@ -660,15 +660,15 @@ void vmx_io_assist(struct vcpu *v)
clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
if (p->type == IOREQ_TYPE_PIO)
- vmx_pio_assist(regs, p, mpci_p);
+ vmx_pio_assist(regs, p, mmio_opp);
else
- vmx_mmio_assist(regs, p, mpci_p);
+ vmx_mmio_assist(regs, p, mmio_opp);
}
/* else an interrupt send event raced us */
}
}
-int vmx_clear_pending_io_event(struct vcpu *v)
+int vmx_clear_pending_io_event(struct vcpu *v)
{
struct domain *d = v->domain;
int port = iopacket_port(d);
@@ -678,7 +678,7 @@ int vmx_clear_pending_io_event(struct vcpu *v)
clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
/* Note: VMX domains may need upcalls as well */
- if (!v->vcpu_info->evtchn_pending_sel)
+ if (!v->vcpu_info->evtchn_pending_sel)
clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
/* clear the pending bit for port */
@@ -688,18 +688,18 @@ int vmx_clear_pending_io_event(struct vcpu *v)
/* Because we've cleared the pending events first, we need to guarantee that
* all events to be handled by xen for VMX domains are taken care of here.
*
- * interrupts are guaranteed to be checked before resuming guest.
- * VMX upcalls have been already arranged for if necessary.
+ * interrupts are guaranteed to be checked before resuming guest.
+ * VMX upcalls have been already arranged for if necessary.
*/
-void vmx_check_events(struct vcpu *d)
+void vmx_check_events(struct vcpu *v)
{
- /* clear the event *before* checking for work. This should avoid
+ /* clear the event *before* checking for work. This should avoid
the set-and-check races */
if (vmx_clear_pending_io_event(current))
- vmx_io_assist(d);
+ vmx_io_assist(v);
}
-/* On exit from vmx_wait_io, we're guaranteed to have a I/O response from
+/* On exit from vmx_wait_io, we're guaranteed to have a I/O response from
the device model */
void vmx_wait_io()
{
@@ -782,7 +782,7 @@ static __inline__ int find_highest_irq(u32 *pintr)
return __fls(pintr[0]);
}
-#define BSP_CPU(d) (!(d->vcpu_id))
+#define BSP_CPU(v) (!(v->vcpu_id))
static inline void clear_extint(struct vcpu *v)
{
global_iodata_t *spg;
@@ -883,7 +883,7 @@ static inline int irq_masked(unsigned long eflags)
return ((eflags & X86_EFLAGS_IF) == 0);
}
-asmlinkage void vmx_intr_assist(void)
+asmlinkage void vmx_intr_assist(void)
{
int intr_type = 0;
int highest_vector;
@@ -945,19 +945,19 @@ asmlinkage void vmx_intr_assist(void)
return;
}
-void vmx_do_resume(struct vcpu *d)
+void vmx_do_resume(struct vcpu *v)
{
vmx_stts();
- if (event_pending(d)) {
- vmx_check_events(d);
+ if (event_pending(v)) {
+ vmx_check_events(v);
- if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags))
+ if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
vmx_wait_io();
}
/* We can't resume the guest if we're waiting on I/O */
- ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags));
+ ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags));
}
#endif /* CONFIG_VMX */
diff --git a/xen/arch/x86/vmx_platform.c b/xen/arch/x86/vmx_platform.c
index 3e805cac8c..cef786b14a 100644
--- a/xen/arch/x86/vmx_platform.c
+++ b/xen/arch/x86/vmx_platform.c
@@ -22,8 +22,8 @@
#include <xen/mm.h>
#include <asm/shadow.h>
#include <xen/domain_page.h>
-#include <asm/page.h>
-#include <xen/event.h>
+#include <asm/page.h>
+#include <xen/event.h>
#include <xen/trace.h>
#include <asm/vmx.h>
#include <asm/vmx_platform.h>
@@ -69,16 +69,16 @@ static inline long __get_reg_value(unsigned long reg, int size)
}
}
-long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
+long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
{
if (size == BYTE) {
- switch (index) {
+ switch (index) {
case 0: /* %al */
return (char)(regs->rax & 0xFF);
case 1: /* %cl */
return (char)(regs->rcx & 0xFF);
case 2: /* %dl */
- return (char)(regs->rdx & 0xFF);
+ return (char)(regs->rdx & 0xFF);
case 3: /* %bl */
return (char)(regs->rbx & 0xFF);
case 4: /* %ah */
@@ -90,7 +90,7 @@ long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
case 7: /* %bh */
return (char)((regs->rbx & 0xFF00) >> 8);
default:
- printf("Error: (get_reg_value) Invalid index value\n");
+ printf("Error: (get_reg_value) Invalid index value\n");
domain_crash_synchronous();
}
/* NOTREACHED */
@@ -114,7 +114,7 @@ long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
case 14: return __get_reg_value(regs->r14, size);
case 15: return __get_reg_value(regs->r15, size);
default:
- printf("Error: (get_reg_value) Invalid index value\n");
+ printf("Error: (get_reg_value) Invalid index value\n");
domain_crash_synchronous();
}
}
@@ -131,7 +131,7 @@ void store_cpu_user_regs(struct cpu_user_regs *regs)
}
static inline long __get_reg_value(unsigned long reg, int size)
-{
+{
switch(size) {
case WORD:
return (short)(reg & 0xFFFF);
@@ -144,15 +144,15 @@ static inline long __get_reg_value(unsigned long reg, int size)
}
long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
-{
+{
if (size == BYTE) {
- switch (index) {
+ switch (index) {
case 0: /* %al */
return (char)(regs->eax & 0xFF);
case 1: /* %cl */
return (char)(regs->ecx & 0xFF);
case 2: /* %dl */
- return (char)(regs->edx & 0xFF);
+ return (char)(regs->edx & 0xFF);
case 3: /* %bl */
return (char)(regs->ebx & 0xFF);
case 4: /* %ah */
@@ -164,7 +164,7 @@ long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
case 7: /* %bh */
return (char)((regs->ebx & 0xFF00) >> 8);
default:
- printf("Error: (get_reg_value) Invalid index value\n");
+ printf("Error: (get_reg_value) Invalid index value\n");
domain_crash_synchronous();
}
}
@@ -179,7 +179,7 @@ long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
case 6: return __get_reg_value(regs->esi, size);
case 7: return __get_reg_value(regs->edi, size);
default:
- printf("Error: (get_reg_value) Invalid index value\n");
+ printf("Error: (get_reg_value) Invalid index value\n");
domain_crash_synchronous();
}
}
@@ -283,9 +283,9 @@ static inline int get_index(const unsigned char *inst, unsigned char rex)
//Only one operand in the instruction is register
if (mod == 3) {
- return (rm + (rex_b << 3));
+ return (rm + (rex_b << 3));
} else {
- return (reg + (rex_r << 3));
+ return (reg + (rex_r << 3));
}
return 0;
}
@@ -299,7 +299,7 @@ static void init_instruction(struct instruction *mmio_inst)
mmio_inst->operand[0] = 0;
mmio_inst->operand[1] = 0;
-
+
mmio_inst->flags = 0;
}
@@ -498,12 +498,12 @@ static int vmx_decode(unsigned char *opcode, struct instruction *instr)
instr->instr = INSTR_MOVS;
instr->op_size = BYTE;
return DECODE_success;
-
+
case 0xA5: /* movsw/movsl */
instr->instr = INSTR_MOVS;
GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
return DECODE_success;
-
+
case 0xAA: /* stosb */
instr->instr = INSTR_STOS;
instr->op_size = BYTE;
@@ -513,7 +513,7 @@ static int vmx_decode(unsigned char *opcode, struct instruction *instr)
instr->instr = INSTR_STOS;
GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
return DECODE_success;
-
+
case 0xC6:
if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
instr->instr = INSTR_MOV;
@@ -522,11 +522,11 @@ static int vmx_decode(unsigned char *opcode, struct instruction *instr)
instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
-
+
return DECODE_success;
} else
return DECODE_failure;
-
+
case 0xC7:
if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
instr->instr = INSTR_MOV;
@@ -535,7 +535,7 @@ static int vmx_decode(unsigned char *opcode, struct instruction *instr)
instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
-
+
return DECODE_success;
} else
return DECODE_failure;
@@ -598,34 +598,34 @@ int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_l
return inst_len;
}
-void send_mmio_req(unsigned char type, unsigned long gpa,
+void send_mmio_req(unsigned char type, unsigned long gpa,
unsigned long count, int size, long value, int dir, int pvalid)
{
- struct vcpu *d = current;
+ struct vcpu *v = current;
vcpu_iodata_t *vio;
ioreq_t *p;
int vm86;
struct cpu_user_regs *regs;
extern long evtchn_send(int lport);
- regs = current->domain->arch.vmx_platform.mpci.inst_decoder_regs;
+ regs = current->arch.arch_vmx.mmio_op.inst_decoder_regs;
- vio = get_vio(d->domain, d->vcpu_id);
+ vio = get_vio(v->domain, v->vcpu_id);
if (vio == NULL) {
printf("bad shared page\n");
- domain_crash_synchronous();
+ domain_crash_synchronous();
}
p = &vio->vp_ioreq;
vm86 = regs->eflags & X86_EFLAGS_VM;
- if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
+ if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
printf("VMX I/O has not yet completed\n");
domain_crash_synchronous();
}
- set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
+ set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
p->dir = dir;
p->pdata_valid = pvalid;
@@ -647,27 +647,27 @@ void send_mmio_req(unsigned char type, unsigned long gpa,
if (vmx_mmio_intercept(p)){
p->state = STATE_IORESP_READY;
- vmx_io_assist(d);
+ vmx_io_assist(v);
return;
}
- evtchn_send(iopacket_port(d->domain));
+ evtchn_send(iopacket_port(v->domain));
vmx_wait_io();
}
static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
- struct mi_per_cpu_info *mpcip, struct cpu_user_regs *regs)
+ struct mmio_op *mmio_opp, struct cpu_user_regs *regs)
{
unsigned long value = 0;
int index, size;
-
+
size = operand_size(inst->operand[0]);
- mpcip->flags = inst->flags;
- mpcip->instr = inst->instr;
- mpcip->operand[0] = inst->operand[0]; /* source */
- mpcip->operand[1] = inst->operand[1]; /* destination */
- mpcip->immediate = inst->immediate;
+ mmio_opp->flags = inst->flags;
+ mmio_opp->instr = inst->instr;
+ mmio_opp->operand[0] = inst->operand[0]; /* source */
+ mmio_opp->operand[1] = inst->operand[1]; /* destination */
+ mmio_opp->immediate = inst->immediate;
if (inst->operand[0] & REGISTER) { /* dest is memory */
index = operand_index(inst->operand[0]);
@@ -687,19 +687,19 @@ static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
#define GET_REPEAT_COUNT() \
(mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1)
-
+
void handle_mmio(unsigned long va, unsigned long gpa)
{
unsigned long eip, eflags, cs;
unsigned long inst_len, inst_addr;
- struct mi_per_cpu_info *mpcip;
+ struct mmio_op *mmio_opp;
struct cpu_user_regs *regs;
struct instruction mmio_inst;
unsigned char inst[MAX_INST_LEN];
int i, vm86, ret;
-
- mpcip = &current->domain->arch.vmx_platform.mpci;
- regs = mpcip->inst_decoder_regs;
+
+ mmio_opp = &current->arch.arch_vmx.mmio_op;
+ regs = mmio_opp->inst_decoder_regs;
__vmread(GUEST_RIP, &eip);
__vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
@@ -720,7 +720,7 @@ void handle_mmio(unsigned long va, unsigned long gpa)
}
init_instruction(&mmio_inst);
-
+
if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:",
va, gpa, inst_len);
@@ -735,7 +735,7 @@ void handle_mmio(unsigned long va, unsigned long gpa)
switch (mmio_inst.instr) {
case INSTR_MOV:
- mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
+ mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
break;
case INSTR_MOVS:
@@ -769,8 +769,8 @@ void handle_mmio(unsigned long va, unsigned long gpa)
}
}
- mpcip->flags = mmio_inst.flags;
- mpcip->instr = mmio_inst.instr;
+ mmio_opp->flags = mmio_inst.flags;
+ mmio_opp->instr = mmio_inst.instr;
/*
* In case of a movs spanning multiple pages, we break the accesses
@@ -785,7 +785,7 @@ void handle_mmio(unsigned long va, unsigned long gpa)
if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
unsigned long value = 0;
- mpcip->flags |= OVERLAP;
+ mmio_opp->flags |= OVERLAP;
regs->eip -= inst_len; /* do not advance %eip */
@@ -808,7 +808,7 @@ void handle_mmio(unsigned long va, unsigned long gpa)
}
case INSTR_MOVZ:
- mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
+ mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs);
break;
case INSTR_STOS:
@@ -816,31 +816,31 @@ void handle_mmio(unsigned long va, unsigned long gpa)
* Since the destination is always in (contiguous) mmio space we don't
* need to break it up into pages.
*/
- mpcip->flags = mmio_inst.flags;
- mpcip->instr = mmio_inst.instr;
+ mmio_opp->flags = mmio_inst.flags;
+ mmio_opp->instr = mmio_inst.instr;
send_mmio_req(IOREQ_TYPE_COPY, gpa,
GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0);
break;
case INSTR_OR:
- mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs);
+ mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mmio_opp, regs);
break;
case INSTR_AND:
- mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs);
+ mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mmio_opp, regs);
break;
case INSTR_XOR:
- mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
+ mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mmio_opp, regs);
break;
case INSTR_CMP: /* Pass through */
case INSTR_TEST:
- mpcip->flags = mmio_inst.flags;
- mpcip->instr = mmio_inst.instr;
- mpcip->operand[0] = mmio_inst.operand[0]; /* source */
- mpcip->operand[1] = mmio_inst.operand[1]; /* destination */
- mpcip->immediate = mmio_inst.immediate;
+ mmio_opp->flags = mmio_inst.flags;
+ mmio_opp->instr = mmio_inst.instr;
+ mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */
+ mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */
+ mmio_opp->immediate = mmio_inst.immediate;
/* send the request and wait for the value */
send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, mmio_inst.op_size, 0, IOREQ_READ, 0);
diff --git a/xen/include/asm-x86/vmx_platform.h b/xen/include/asm-x86/vmx_platform.h
index a457be5ccf..a7a0e80641 100644
--- a/xen/include/asm-x86/vmx_platform.h
+++ b/xen/include/asm-x86/vmx_platform.h
@@ -75,20 +75,11 @@ struct instruction {
#define MAX_INST_LEN 32
-struct mi_per_cpu_info {
- int flags;
- int instr; /* instruction */
- unsigned long operand[2]; /* operands */
- unsigned long immediate; /* immediate portion */
- struct cpu_user_regs *inst_decoder_regs; /* current context */
-};
-
struct virtual_platform_def {
unsigned long *real_mode_data; /* E820, etc. */
unsigned long shared_page_va;
struct vmx_virpit_t vmx_pit;
struct vmx_handler_t vmx_handler;
- struct mi_per_cpu_info mpci; /* MMIO */
};
extern void handle_mmio(unsigned long, unsigned long);
diff --git a/xen/include/asm-x86/vmx_vmcs.h b/xen/include/asm-x86/vmx_vmcs.h
index ae0c31639d..d0240f1a5b 100644
--- a/xen/include/asm-x86/vmx_vmcs.h
+++ b/xen/include/asm-x86/vmx_vmcs.h
@@ -71,6 +71,14 @@ struct msr_state{
unsigned long shadow_gs;
};
+struct mmio_op {
+ int flags;
+ int instr; /* instruction */
+ unsigned long operand[2]; /* operands */
+ unsigned long immediate; /* immediate portion */
+ struct cpu_user_regs *inst_decoder_regs; /* current context */
+};
+
#define PC_DEBUG_PORT 0x80
struct arch_vmx_struct {
@@ -83,7 +91,8 @@ struct arch_vmx_struct {
unsigned long cpu_state;
unsigned long cpu_based_exec_control;
struct msr_state msr_content;
- void *io_bitmap_a, *io_bitmap_b;
+ struct mmio_op mmio_op; /* MMIO */
+ void *io_bitmap_a, *io_bitmap_b;
};
#define vmx_schedule_tail(next) \