diff options
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 112 | ||||
-rw-r--r-- | xen/arch/x86/hvm/intercept.c | 36 | ||||
-rw-r--r-- | xen/arch/x86/hvm/io.c | 2 | ||||
-rw-r--r-- | xen/arch/x86/hvm/platform.c | 6 | ||||
-rw-r--r-- | xen/arch/x86/mm.c | 22 | ||||
-rw-r--r-- | xen/arch/x86/x86_32/domain_page.c | 21 | ||||
-rw-r--r-- | xen/common/domain.c | 3 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/domain.h | 12 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/support.h | 13 | ||||
-rw-r--r-- | xen/include/xen/domain_page.h | 9 |
10 files changed, 113 insertions, 123 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index a356907079..d8ef1ae258 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -101,7 +101,7 @@ void hvm_set_guest_time(struct vcpu *v, u64 gtime) u64 hvm_get_guest_time(struct vcpu *v) { - u64 host_tsc; + u64 host_tsc; rdtscll(host_tsc); return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset; @@ -125,7 +125,7 @@ void hvm_do_resume(struct vcpu *v) pt_thaw_time(v); /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ - p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; + p = &get_ioreq(v)->vp_ioreq; while ( p->state != STATE_IOREQ_NONE ) { switch ( p->state ) @@ -146,54 +146,68 @@ void hvm_do_resume(struct vcpu *v) } } -static void hvm_clear_ioreq_pfn( - struct domain *d, unsigned long *pva) +static void hvm_init_ioreq_page( + struct domain *d, struct hvm_ioreq_page *iorp) { - unsigned long va, mfn; + memset(iorp, 0, sizeof(*iorp)); + spin_lock_init(&iorp->lock); + domain_pause(d); +} - BUG_ON(!d->is_dying); +static void hvm_destroy_ioreq_page( + struct domain *d, struct hvm_ioreq_page *iorp) +{ + spin_lock(&iorp->lock); - if ( (va = xchg(pva, 0UL)) == 0UL ) - return; + ASSERT(d->is_dying); + + if ( iorp->va != NULL ) + { + unmap_domain_page_global(iorp->va); + put_page_and_type(iorp->page); + iorp->va = NULL; + } - mfn = mfn_from_mapped_domain_page((void *)va); - unmap_domain_page_global((void *)va); - put_page_and_type(mfn_to_page(mfn)); + spin_unlock(&iorp->lock); } -static int hvm_set_ioreq_pfn( - struct domain *d, unsigned long *pva, unsigned long gmfn) +static int hvm_set_ioreq_page( + struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn) { + struct page_info *page; unsigned long mfn; void *va; mfn = gmfn_to_mfn(d, gmfn); - if ( !mfn_valid(mfn) || - !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) ) + if ( !mfn_valid(mfn) ) + return -EINVAL; + + page = mfn_to_page(mfn); + if ( !get_page_and_type(page, d, PGT_writable_page) ) return -EINVAL; va = map_domain_page_global(mfn); if ( va == NULL ) { - put_page_and_type(mfn_to_page(mfn)); + put_page_and_type(page); return -ENOMEM; } - if ( cmpxchg(pva, 0UL, (unsigned long)va) != 0UL ) + spin_lock(&iorp->lock); + + if ( (iorp->va != NULL) || d->is_dying ) { + spin_unlock(&iorp->lock); unmap_domain_page_global(va); put_page_and_type(mfn_to_page(mfn)); return -EINVAL; } - /* - * Check dying status /after/ setting *pva. cmpxchg() is a barrier. - * We race against hvm_domain_relinquish_resources(). - */ - if ( d->is_dying ) - hvm_clear_ioreq_pfn(d, pva); + iorp->va = va; + iorp->page = page; + + spin_unlock(&iorp->lock); - /* Balance the domain_pause() in hvm_domain_initialise(). */ domain_unpause(d); return 0; @@ -211,7 +225,6 @@ int hvm_domain_initialise(struct domain *d) } spin_lock_init(&d->arch.hvm_domain.pbuf_lock); - spin_lock_init(&d->arch.hvm_domain.buffered_io_lock); spin_lock_init(&d->arch.hvm_domain.irq_lock); rc = paging_enable(d, PG_refcounts|PG_translate|PG_external); @@ -221,17 +234,16 @@ int hvm_domain_initialise(struct domain *d) vpic_init(d); vioapic_init(d); - /* Do not allow domain to run until it has ioreq shared pages. */ - domain_pause(d); /* HVM_PARAM_IOREQ_PFN */ - domain_pause(d); /* HVM_PARAM_BUFIOREQ_PFN */ + hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq); + hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq); return 0; } void hvm_domain_relinquish_resources(struct domain *d) { - hvm_clear_ioreq_pfn(d, &d->arch.hvm_domain.shared_page_va); - hvm_clear_ioreq_pfn(d, &d->arch.hvm_domain.buffered_io_va); + hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq); + hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq); } void hvm_domain_destroy(struct domain *d) @@ -379,10 +391,20 @@ int hvm_vcpu_initialise(struct vcpu *v) } /* Create ioreq event channel. */ - v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0); - if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) ) - get_vio(v->domain, v->vcpu_id)->vp_eport = - v->arch.hvm_vcpu.xen_port; + rc = alloc_unbound_xen_event_channel(v, 0); + if ( rc < 0 ) + { + hvm_funcs.vcpu_destroy(v); + vlapic_destroy(v); + return rc; + } + + /* Register ioreq event channel. */ + v->arch.hvm_vcpu.xen_port = rc; + spin_lock(&v->domain->arch.hvm_domain.ioreq.lock); + if ( v->domain->arch.hvm_domain.ioreq.va != NULL ) + get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; + spin_unlock(&v->domain->arch.hvm_domain.ioreq.lock); INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list); @@ -463,7 +485,7 @@ void hvm_send_assist_req(struct vcpu *v) if ( unlikely(!vcpu_start_shutdown_deferral(v)) ) return; /* implicitly bins the i/o operation */ - p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; + p = &get_ioreq(v)->vp_ioreq; if ( unlikely(p->state != STATE_IOREQ_NONE) ) { /* This indicates a bug in the device model. Crash the domain. */ @@ -981,6 +1003,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) case HVMOP_get_param: { struct xen_hvm_param a; + struct hvm_ioreq_page *iorp; struct domain *d; struct vcpu *v; @@ -1009,19 +1032,18 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) switch ( a.index ) { case HVM_PARAM_IOREQ_PFN: - rc = hvm_set_ioreq_pfn( - d, &d->arch.hvm_domain.shared_page_va, a.value); - if ( rc == 0 ) - { + iorp = &d->arch.hvm_domain.ioreq; + rc = hvm_set_ioreq_page(d, iorp, a.value); + spin_lock(&iorp->lock); + if ( (rc == 0) && (iorp->va != NULL) ) /* Initialise evtchn port info if VCPUs already created. */ for_each_vcpu ( d, v ) - get_vio(d, v->vcpu_id)->vp_eport = - v->arch.hvm_vcpu.xen_port; - } + get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; + spin_unlock(&iorp->lock); break; - case HVM_PARAM_BUFIOREQ_PFN: - rc = hvm_set_ioreq_pfn( - d, &d->arch.hvm_domain.buffered_io_va, a.value); + case HVM_PARAM_BUFIOREQ_PFN: + iorp = &d->arch.hvm_domain.buf_ioreq; + rc = hvm_set_ioreq_page(d, iorp, a.value); break; case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index fb8497a996..91749b676b 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -158,34 +158,26 @@ static inline void hvm_mmio_access(struct vcpu *v, int hvm_buffered_io_send(ioreq_t *p) { struct vcpu *v = current; - spinlock_t *buffered_io_lock; - buffered_iopage_t *buffered_iopage = - (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va); - unsigned long tmp_write_pointer = 0; - - buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock; - spin_lock(buffered_io_lock); - - if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer == - (unsigned int)IOREQ_BUFFER_SLOT_NUM ) { - /* the queue is full. - * send the iopacket through the normal path. - * NOTE: The arithimetic operation could handle the situation for - * write_pointer overflow. - */ - spin_unlock(buffered_io_lock); + struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq; + buffered_iopage_t *pg = iorp->va; + + spin_lock(&iorp->lock); + + if ( (pg->write_pointer - pg->read_pointer) == IOREQ_BUFFER_SLOT_NUM ) + { + /* The queue is full: send the iopacket through the normal path. */ + spin_unlock(&iorp->lock); return 0; } - tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM; - - memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t)); + memcpy(&pg->ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM], + p, sizeof(ioreq_t)); - /*make the ioreq_t visible before write_pointer*/ + /* Make the ioreq_t visible /before/ write_pointer. */ wmb(); - buffered_iopage->write_pointer++; + pg->write_pointer++; - spin_unlock(buffered_io_lock); + spin_unlock(&iorp->lock); return 1; } diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 03820f0ebd..f0f8ea8ecb 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -832,7 +832,7 @@ void hvm_io_assist(void) io_opp = &v->arch.hvm_vcpu.io_op; regs = &io_opp->io_context; - vio = get_vio(d, v->vcpu_id); + vio = get_ioreq(v); p = &vio->vp_ioreq; if ( p->state != STATE_IORESP_READY ) diff --git a/xen/arch/x86/hvm/platform.c b/xen/arch/x86/hvm/platform.c index 0ab2fb1fe7..8bb3e3439b 100644 --- a/xen/arch/x86/hvm/platform.c +++ b/xen/arch/x86/hvm/platform.c @@ -866,7 +866,7 @@ void send_pio_req(unsigned long port, unsigned long count, int size, port, count, size, value, dir, value_is_ptr); } - vio = get_vio(v->domain, v->vcpu_id); + vio = get_ioreq(v); if ( vio == NULL ) { printk("bad shared page: %lx\n", (unsigned long) vio); domain_crash_synchronous(); @@ -915,7 +915,7 @@ static void send_mmio_req(unsigned char type, unsigned long gpa, type, gpa, count, size, value, dir, value_is_ptr); } - vio = get_vio(v->domain, v->vcpu_id); + vio = get_ioreq(v); if (vio == NULL) { printk("bad shared page\n"); domain_crash_synchronous(); @@ -976,7 +976,7 @@ void send_invalidate_req(void) vcpu_iodata_t *vio; ioreq_t *p; - vio = get_vio(v->domain, v->vcpu_id); + vio = get_ioreq(v); if ( vio == NULL ) { printk("bad shared page: %lx\n", (unsigned long) vio); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index fac87bcf02..ce49f743fe 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2041,7 +2041,7 @@ int do_mmuext_op( MEM_LOG("Error while pinning mfn %lx", mfn); break; } - + if ( unlikely(test_and_set_bit(_PGT_pinned, &page->u.inuse.type_info)) ) { @@ -2054,14 +2054,18 @@ int do_mmuext_op( /* A page is dirtied when its pin status is set. */ mark_dirty(d, mfn); - /* - * We can race domain destruction (domain_relinquish_resources). - * NB. The dying-flag test must happen /after/ setting PGT_pinned. - */ - if ( unlikely(this_cpu(percpu_mm_info).foreign != NULL) && - this_cpu(percpu_mm_info).foreign->is_dying && - test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) - put_page_and_type(page); + /* We can race domain destruction (domain_relinquish_resources). */ + if ( unlikely(this_cpu(percpu_mm_info).foreign != NULL) ) + { + int drop_ref; + spin_lock(&FOREIGNDOM->page_alloc_lock); + drop_ref = (FOREIGNDOM->is_dying && + test_and_clear_bit(_PGT_pinned, + &page->u.inuse.type_info)); + spin_unlock(&FOREIGNDOM->page_alloc_lock); + if ( drop_ref ) + put_page_and_type(page); + } break; diff --git a/xen/arch/x86/x86_32/domain_page.c b/xen/arch/x86/x86_32/domain_page.c index 3f21d458ea..551ce50593 100644 --- a/xen/arch/x86/x86_32/domain_page.c +++ b/xen/arch/x86/x86_32/domain_page.c @@ -251,24 +251,3 @@ void unmap_domain_page_global(void *va) idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT; set_bit(idx, garbage); } - -unsigned long mfn_from_mapped_domain_page(void *va) -{ - unsigned long __va = (unsigned long)va; - l2_pgentry_t *pl2e; - l1_pgentry_t *pl1e; - unsigned int idx; - struct mapcache *cache; - - if ( (__va >= MAPCACHE_VIRT_START) && (__va < MAPCACHE_VIRT_END) ) - { - cache = &mapcache_current_vcpu()->domain->arch.mapcache; - idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; - return l1e_get_pfn(cache->l1tab[idx]); - } - - ASSERT(__va >= IOREMAP_VIRT_START); - pl2e = virt_to_xen_l2e(__va); - pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(__va); - return l1e_get_pfn(*pl1e); -} diff --git a/xen/common/domain.c b/xen/common/domain.c index 2ff5ff42ce..8ade10f4ab 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -313,9 +313,6 @@ void domain_kill(struct domain *d) return; } - /* Tear down state /after/ setting the dying flag. */ - smp_mb(); - gnttab_release_mappings(d); domain_relinquish_resources(d); put_domain(d); diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index ce1a2fc8ad..880e988b96 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -28,10 +28,16 @@ #include <public/hvm/params.h> #include <public/hvm/save.h> +struct hvm_ioreq_page { + spinlock_t lock; + struct page_info *page; + void *va; +}; + struct hvm_domain { - unsigned long shared_page_va; - unsigned long buffered_io_va; - spinlock_t buffered_io_lock; + struct hvm_ioreq_page ioreq; + struct hvm_ioreq_page buf_ioreq; + s64 tsc_frequency; struct pl_time pl_time; diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index 1d331ef5da..efbc2b39b0 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -32,14 +32,13 @@ #define HVM_DEBUG 1 #endif -static inline shared_iopage_t *get_sp(struct domain *d) +static inline vcpu_iodata_t *get_ioreq(struct vcpu *v) { - return (shared_iopage_t *) d->arch.hvm_domain.shared_page_va; -} - -static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu) -{ - return &get_sp(d)->vcpu_iodata[cpu]; + struct domain *d = v->domain; + shared_iopage_t *p = d->arch.hvm_domain.ioreq.va; + ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock)); + ASSERT(d->arch.hvm_domain.ioreq.va != NULL); + return &p->vcpu_iodata[v->vcpu_id]; } /* XXX these are really VMX specific */ diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h index 5453323867..cd2226678b 100644 --- a/xen/include/xen/domain_page.h +++ b/xen/include/xen/domain_page.h @@ -34,13 +34,6 @@ void unmap_domain_page(void *va); void *map_domain_page_global(unsigned long mfn); void unmap_domain_page_global(void *va); -/* - * Convert a VA (within a page previously mapped in the context of the - * currently-executing VCPU via a call to map_domain_page(), or via a - * previous call to map_domain_page_global()) to the mapped page frame. - */ -unsigned long mfn_from_mapped_domain_page(void *va); - #define DMCACHE_ENTRY_VALID 1U #define DMCACHE_ENTRY_HELD 2U @@ -109,8 +102,6 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache) #define map_domain_page_global(mfn) mfn_to_virt(mfn) #define unmap_domain_page_global(va) ((void)(va)) -#define mfn_from_mapped_domain_page(va) virt_to_mfn(va) - struct domain_mmap_cache { }; |