aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/ioemu/target-i386-dm/helper2.c88
-rw-r--r--tools/ioemu/vl.c8
-rw-r--r--tools/libxc/xc_hvm_build.c31
-rw-r--r--xen/arch/x86/hvm/hvm.c29
-rw-r--r--xen/arch/x86/hvm/intercept.c66
-rw-r--r--xen/arch/x86/hvm/platform.c2
-rw-r--r--xen/arch/x86/hvm/svm/svm.c3
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c3
-rw-r--r--xen/include/asm-x86/e820.h1
-rw-r--r--xen/include/asm-x86/hvm/domain.h2
-rw-r--r--xen/include/asm-x86/hvm/hvm.h4
-rw-r--r--xen/include/asm-x86/hvm/support.h1
-rw-r--r--xen/include/public/hvm/ioreq.h8
13 files changed, 213 insertions, 33 deletions
diff --git a/tools/ioemu/target-i386-dm/helper2.c b/tools/ioemu/target-i386-dm/helper2.c
index 4f1a33c7e7..83fc5e8f00 100644
--- a/tools/ioemu/target-i386-dm/helper2.c
+++ b/tools/ioemu/target-i386-dm/helper2.c
@@ -76,6 +76,10 @@ int xc_handle;
shared_iopage_t *shared_page = NULL;
+#define BUFFER_IO_MAX_DELAY 100
+buffered_iopage_t *buffered_io_page = NULL;
+QEMUTimer *buffered_io_timer;
+
/* the evtchn fd for polling */
int xce_handle = -1;
@@ -419,36 +423,68 @@ void cpu_ioreq_xor(CPUState *env, ioreq_t *req)
req->u.data = tmp1;
}
+void __handle_ioreq(CPUState *env, ioreq_t *req)
+{
+ if (!req->pdata_valid && req->dir == IOREQ_WRITE && req->size != 4)
+ req->u.data &= (1UL << (8 * req->size)) - 1;
+
+ switch (req->type) {
+ case IOREQ_TYPE_PIO:
+ cpu_ioreq_pio(env, req);
+ break;
+ case IOREQ_TYPE_COPY:
+ cpu_ioreq_move(env, req);
+ break;
+ case IOREQ_TYPE_AND:
+ cpu_ioreq_and(env, req);
+ break;
+ case IOREQ_TYPE_OR:
+ cpu_ioreq_or(env, req);
+ break;
+ case IOREQ_TYPE_XOR:
+ cpu_ioreq_xor(env, req);
+ break;
+ default:
+ hw_error("Invalid ioreq type 0x%x\n", req->type);
+ }
+}
+
+void __handle_buffered_iopage(CPUState *env)
+{
+ ioreq_t *req = NULL;
+
+ if (!buffered_io_page)
+ return;
+
+ while (buffered_io_page->read_pointer !=
+ buffered_io_page->write_pointer) {
+ req = &buffered_io_page->ioreq[buffered_io_page->read_pointer %
+ IOREQ_BUFFER_SLOT_NUM];
+
+ __handle_ioreq(env, req);
+
+ mb();
+ buffered_io_page->read_pointer++;
+ }
+}
+
+void handle_buffered_io(void *opaque)
+{
+ CPUState *env = opaque;
+
+ __handle_buffered_iopage(env);
+ qemu_mod_timer(buffered_io_timer, BUFFER_IO_MAX_DELAY +
+ qemu_get_clock(rt_clock));
+}
+
void cpu_handle_ioreq(void *opaque)
{
CPUState *env = opaque;
ioreq_t *req = cpu_get_ioreq();
+ handle_buffered_io(env);
if (req) {
- if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
- if (req->size != 4)
- req->u.data &= (1UL << (8 * req->size))-1;
- }
-
- switch (req->type) {
- case IOREQ_TYPE_PIO:
- cpu_ioreq_pio(env, req);
- break;
- case IOREQ_TYPE_COPY:
- cpu_ioreq_move(env, req);
- break;
- case IOREQ_TYPE_AND:
- cpu_ioreq_and(env, req);
- break;
- case IOREQ_TYPE_OR:
- cpu_ioreq_or(env, req);
- break;
- case IOREQ_TYPE_XOR:
- cpu_ioreq_xor(env, req);
- break;
- default:
- hw_error("Invalid ioreq type 0x%x\n", req->type);
- }
+ __handle_ioreq(env, req);
/* No state change if state = STATE_IORESP_HOOK */
if (req->state == STATE_IOREQ_INPROCESS) {
@@ -466,6 +502,10 @@ int main_loop(void)
CPUState *env = cpu_single_env;
int evtchn_fd = xc_evtchn_fd(xce_handle);
+ buffered_io_timer = qemu_new_timer(rt_clock, handle_buffered_io,
+ cpu_single_env);
+ qemu_mod_timer(buffered_io_timer, qemu_get_clock(rt_clock));
+
qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
env->send_event = 0;
diff --git a/tools/ioemu/vl.c b/tools/ioemu/vl.c
index c5bd964edb..423c385e3a 100644
--- a/tools/ioemu/vl.c
+++ b/tools/ioemu/vl.c
@@ -5834,6 +5834,7 @@ int main(int argc, char **argv)
unsigned long nr_pages;
xen_pfn_t *page_array;
extern void *shared_page;
+ extern void *buffered_io_page;
char qemu_dm_logfilename[64];
@@ -6378,12 +6379,17 @@ int main(int argc, char **argv)
phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
PROT_READ|PROT_WRITE, page_array,
- nr_pages - 1);
+ nr_pages - 3);
if (phys_ram_base == 0) {
fprintf(logfile, "xc_map_foreign_batch returned error %d\n", errno);
exit(-1);
}
+ /* not yet add for IA64 */
+ buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
+ PROT_READ|PROT_WRITE,
+ page_array[nr_pages - 3]);
+
shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE,
PROT_READ|PROT_WRITE,
page_array[nr_pages - 1]);
diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
index 19138ff968..b99494e0dc 100644
--- a/tools/libxc/xc_hvm_build.c
+++ b/tools/libxc/xc_hvm_build.c
@@ -26,6 +26,7 @@
#define E820_IO 16
#define E820_SHARED_PAGE 17
#define E820_XENSTORE 18
+#define E820_BUFFERED_IO 19
#define E820_MAP_PAGE 0x00090000
#define E820_MAP_NR_OFFSET 0x000001E8
@@ -96,7 +97,13 @@ static void build_e820map(void *e820_page, unsigned long long mem_size)
e820entry[nr_map].type = E820_RESERVED;
nr_map++;
-#define STATIC_PAGES 2 /* for ioreq_t and store_mfn */
+#define STATIC_PAGES 3
+ /* 3 static pages:
+ * - ioreq buffer.
+ * - xenstore.
+ * - shared_page.
+ */
+
/* Most of the ram goes here */
e820entry[nr_map].addr = 0x100000;
e820entry[nr_map].size = mem_size - 0x100000 - STATIC_PAGES * PAGE_SIZE;
@@ -105,6 +112,12 @@ static void build_e820map(void *e820_page, unsigned long long mem_size)
/* Statically allocated special pages */
+ /* For buffered IO requests */
+ e820entry[nr_map].addr = mem_size - 3 * PAGE_SIZE;
+ e820entry[nr_map].size = PAGE_SIZE;
+ e820entry[nr_map].type = E820_BUFFERED_IO;
+ nr_map++;
+
/* For xenstore */
e820entry[nr_map].addr = mem_size - 2 * PAGE_SIZE;
e820entry[nr_map].size = PAGE_SIZE;
@@ -213,6 +226,9 @@ static int setup_guest(int xc_handle,
unsigned long shared_page_frame = 0;
shared_iopage_t *sp;
+ unsigned long ioreq_buffer_frame = 0;
+ void *ioreq_buffer_page;
+
memset(&dsi, 0, sizeof(struct domain_setup_info));
if ( (parseelfimage(image, image_size, &dsi)) != 0 )
@@ -303,6 +319,19 @@ static int setup_guest(int xc_handle,
memset(sp, 0, PAGE_SIZE);
munmap(sp, PAGE_SIZE);
+ /* clean the buffered IO requests page */
+ ioreq_buffer_frame = page_array[(v_end >> PAGE_SHIFT) - 3];
+ ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+ ioreq_buffer_frame);
+
+ if ( ioreq_buffer_page == NULL )
+ goto error_out;
+
+ memset(ioreq_buffer_page, 0, PAGE_SIZE);
+
+ munmap(ioreq_buffer_page, PAGE_SIZE);
+
xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, (v_end >> PAGE_SHIFT) - 2);
xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 72b261e34a..62b328de54 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -134,15 +134,28 @@ static void e820_map_io_shared_callback(struct domain *d,
}
}
-void hvm_map_io_shared_page(struct vcpu *v)
+static void e820_map_buffered_io_callback(struct domain *d,
+ struct e820entry *e,
+ void *data)
{
- unsigned long mfn = INVALID_MFN;
+ unsigned long *mfn = data;
+ if ( e->type == E820_BUFFERED_IO ) {
+ ASSERT(*mfn == INVALID_MFN);
+ *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
+ }
+}
+
+void hvm_map_io_shared_pages(struct vcpu *v)
+{
+ unsigned long mfn;
void *p;
struct domain *d = v->domain;
- if ( d->arch.hvm_domain.shared_page_va )
+ if ( d->arch.hvm_domain.shared_page_va ||
+ d->arch.hvm_domain.buffered_io_va )
return;
+ mfn = INVALID_MFN;
e820_foreach(d, e820_map_io_shared_callback, &mfn);
if ( mfn == INVALID_MFN )
@@ -159,6 +172,14 @@ void hvm_map_io_shared_page(struct vcpu *v)
}
d->arch.hvm_domain.shared_page_va = (unsigned long)p;
+
+ mfn = INVALID_MFN;
+ e820_foreach(d, e820_map_buffered_io_callback, &mfn);
+ if ( mfn != INVALID_MFN ) {
+ p = map_domain_page_global(mfn);
+ if ( p )
+ d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
+ }
}
void hvm_create_event_channels(struct vcpu *v)
@@ -210,6 +231,8 @@ void hvm_setup_platform(struct domain* d)
hvm_vioapic_init(d);
}
+ spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
+
init_timer(&platform->pl_time.periodic_tm.timer,
pt_timer_fn, v, v->processor);
pit_init(v, cpu_khz);
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 8bc06ada8d..5c19609245 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -36,12 +36,26 @@ extern struct hvm_mmio_handler vioapic_mmio_handler;
#define HVM_MMIO_HANDLER_NR 2
-struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
+static struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
{
&vlapic_mmio_handler,
&vioapic_mmio_handler
};
+struct hvm_buffered_io_range {
+ unsigned long start_addr;
+ unsigned long length;
+};
+
+#define HVM_BUFFERED_IO_RANGE_NR 1
+
+static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
+static struct hvm_buffered_io_range
+*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
+{
+ &buffered_stdvga_range
+};
+
static inline void hvm_mmio_access(struct vcpu *v,
ioreq_t *p,
hvm_mmio_read_t read_handler,
@@ -140,6 +154,56 @@ static inline void hvm_mmio_access(struct vcpu *v,
}
}
+int hvm_buffered_io_intercept(ioreq_t *p)
+{
+ struct vcpu *v = current;
+ spinlock_t *buffered_io_lock;
+ buffered_iopage_t *buffered_iopage =
+ (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
+ unsigned long tmp_write_pointer = 0;
+ int i;
+
+ /* ignore READ ioreq_t! */
+ if ( p->dir == IOREQ_READ )
+ return 0;
+
+ for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
+ if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
+ p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
+ hvm_buffered_io_ranges[i]->length )
+ break;
+ }
+
+ if ( i == HVM_BUFFERED_IO_RANGE_NR )
+ return 0;
+
+ buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
+ spin_lock(buffered_io_lock);
+
+ if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
+ (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
+ /* the queue is full.
+ * send the iopacket through the normal path.
+ * NOTE: The arithimetic operation could handle the situation for
+ * write_pointer overflow.
+ */
+ spin_unlock(buffered_io_lock);
+ return 0;
+ }
+
+ tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
+
+ memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
+
+ /*make the ioreq_t visible before write_pointer*/
+ wmb();
+ buffered_iopage->write_pointer++;
+
+ spin_unlock(buffered_io_lock);
+
+ return 1;
+}
+
int hvm_mmio_intercept(ioreq_t *p)
{
struct vcpu *v = current;
diff --git a/xen/arch/x86/hvm/platform.c b/xen/arch/x86/hvm/platform.c
index a5f86c417b..2ee9fd9683 100644
--- a/xen/arch/x86/hvm/platform.c
+++ b/xen/arch/x86/hvm/platform.c
@@ -779,7 +779,7 @@ void send_mmio_req(
} else
p->u.data = value;
- if (hvm_mmio_intercept(p)){
+ if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) ) {
p->state = STATE_IORESP_READY;
hvm_io_assist(v);
return;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 2059bff0ea..e38a2fa3d1 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -810,6 +810,9 @@ static void svm_relinquish_guest_resources(struct domain *d)
unmap_domain_page_global(
(void *)d->arch.hvm_domain.shared_page_va);
+ if ( d->arch.hvm_domain.buffered_io_va )
+ unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
+
shadow_direct_map_clean(d);
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 8d8fbbe75f..1d0b120d95 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -151,6 +151,9 @@ static void vmx_relinquish_guest_resources(struct domain *d)
unmap_domain_page_global(
(void *)d->arch.hvm_domain.shared_page_va);
+ if ( d->arch.hvm_domain.buffered_io_va )
+ unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
+
shadow_direct_map_clean(d);
}
diff --git a/xen/include/asm-x86/e820.h b/xen/include/asm-x86/e820.h
index b135cbff0d..8053fbd790 100644
--- a/xen/include/asm-x86/e820.h
+++ b/xen/include/asm-x86/e820.h
@@ -12,6 +12,7 @@
#define E820_IO 16
#define E820_SHARED_PAGE 17
#define E820_XENSTORE 18
+#define E820_BUFFERED_IO 19
#define E820_MAP_PAGE 0x00090000
#define E820_MAP_NR_OFFSET 0x000001E8
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 2d1d977c41..c69d692502 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -33,6 +33,8 @@
struct hvm_domain {
unsigned long shared_page_va;
+ unsigned long buffered_io_va;
+ spinlock_t buffered_io_lock;
s64 tsc_frequency;
struct pl_time pl_time;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 567103aca4..9361094688 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -78,7 +78,7 @@ hvm_disable(void)
}
void hvm_create_event_channels(struct vcpu *v);
-void hvm_map_io_shared_page(struct vcpu *v);
+void hvm_map_io_shared_pages(struct vcpu *v);
static inline int
hvm_initialize_guest_resources(struct vcpu *v)
@@ -87,7 +87,7 @@ hvm_initialize_guest_resources(struct vcpu *v)
if ( hvm_funcs.initialize_guest_resources )
ret = hvm_funcs.initialize_guest_resources(v);
if ( ret == 1 ) {
- hvm_map_io_shared_page(v);
+ hvm_map_io_shared_pages(v);
hvm_create_event_channels(v);
}
return ret;
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 9d3440e5aa..d4adb1b6cb 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -139,6 +139,7 @@ extern int hvm_copy(void *buf, unsigned long vaddr, int size, int dir);
extern void hvm_setup_platform(struct domain* d);
extern int hvm_mmio_intercept(ioreq_t *p);
extern int hvm_io_intercept(ioreq_t *p, int type);
+extern int hvm_buffered_io_intercept(ioreq_t *p);
extern void hvm_hooks_assist(struct vcpu *v);
extern void hvm_print_line(struct vcpu *v, const char c);
extern void hlt_timer_fn(void *data);
diff --git a/xen/include/public/hvm/ioreq.h b/xen/include/public/hvm/ioreq.h
index 275f94df11..8e92b004b1 100644
--- a/xen/include/public/hvm/ioreq.h
+++ b/xen/include/public/hvm/ioreq.h
@@ -78,6 +78,14 @@ struct shared_iopage {
};
typedef struct shared_iopage shared_iopage_t;
+#define IOREQ_BUFFER_SLOT_NUM 80
+struct buffered_iopage {
+ unsigned long read_pointer;
+ unsigned long write_pointer;
+ ioreq_t ioreq[IOREQ_BUFFER_SLOT_NUM];
+}; /* sizeof this structure must be in one page */
+typedef struct buffered_iopage buffered_iopage_t;
+
#endif /* _IOREQ_H_ */
/*