aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/hvm.c1
-rw-r--r--xen/arch/x86/hvm/vmx/vvmx.c42
-rw-r--r--xen/arch/x86/mm/guest_walk.c15
-rw-r--r--xen/arch/x86/mm/hap/Makefile1
-rw-r--r--xen/arch/x86/mm/hap/nested_ept.c287
-rw-r--r--xen/arch/x86/mm/hap/nested_hap.c2
-rw-r--r--xen/include/asm-x86/guest_pt.h4
-rw-r--r--xen/include/asm-x86/hvm/nestedhvm.h1
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmcs.h1
-rw-r--r--xen/include/asm-x86/hvm/vmx/vmx.h31
-rw-r--r--xen/include/asm-x86/hvm/vmx/vvmx.h13
11 files changed, 388 insertions, 10 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 292559dae2..374a74046f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1326,6 +1326,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
access_r, access_w, access_x);
switch (rv) {
case NESTEDHVM_PAGEFAULT_DONE:
+ case NESTEDHVM_PAGEFAULT_RETRY:
return 1;
case NESTEDHVM_PAGEFAULT_L1_ERROR:
/* An error occured while translating gpa from
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 76a484f0b9..e2e2f0040c 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -943,9 +943,18 @@ static void sync_vvmcs_ro(struct vcpu *v)
{
int i;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ void *vvmcs = nvcpu->nv_vvmcx;
for ( i = 0; i < ARRAY_SIZE(vmcs_ro_field); i++ )
shadow_to_vvmcs(nvcpu->nv_vvmcx, vmcs_ro_field[i]);
+
+ /* Adjust exit_reason/exit_qualifciation for violation case */
+ if ( __get_vvmcs(vvmcs, VM_EXIT_REASON) == EXIT_REASON_EPT_VIOLATION )
+ {
+ __set_vvmcs(vvmcs, EXIT_QUALIFICATION, nvmx->ept_exit.exit_qual);
+ __set_vvmcs(vvmcs, VM_EXIT_REASON, nvmx->ept_exit.exit_reason);
+ }
}
static void load_vvmcs_host_state(struct vcpu *v)
@@ -1493,8 +1502,37 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
unsigned int *page_order,
bool_t access_r, bool_t access_w, bool_t access_x)
{
- /*TODO:*/
- return 0;
+ int rc;
+ unsigned long gfn;
+ uint64_t exit_qual = __vmread(EXIT_QUALIFICATION);
+ uint32_t exit_reason = EXIT_REASON_EPT_VIOLATION;
+ uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+
+ rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn,
+ &exit_qual, &exit_reason);
+ switch ( rc )
+ {
+ case EPT_TRANSLATE_SUCCEED:
+ *L1_gpa = (gfn << PAGE_SHIFT) + (L2_gpa & ~PAGE_MASK);
+ rc = NESTEDHVM_PAGEFAULT_DONE;
+ break;
+ case EPT_TRANSLATE_VIOLATION:
+ case EPT_TRANSLATE_MISCONFIG:
+ rc = NESTEDHVM_PAGEFAULT_INJECT;
+ nvmx->ept_exit.exit_reason = exit_reason;
+ nvmx->ept_exit.exit_qual = exit_qual;
+ break;
+ case EPT_TRANSLATE_RETRY:
+ rc = NESTEDHVM_PAGEFAULT_RETRY;
+ break;
+ default:
+ gdprintk(XENLOG_ERR, "GUEST EPT translation error!:%d\n", rc);
+ BUG();
+ break;
+ }
+
+ return rc;
}
void nvmx_idtv_handling(void)
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 0f08fb0b92..70460b6ad7 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -88,18 +88,15 @@ static uint32_t set_ad_bits(void *guest_p, void *walk_p, int set_dirty)
/* If the map is non-NULL, we leave this function having
* acquired an extra ref on mfn_to_page(*mfn) */
-static inline void *map_domain_gfn(struct p2m_domain *p2m,
- gfn_t gfn,
- mfn_t *mfn,
- p2m_type_t *p2mt,
- uint32_t *rc)
+void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
+ p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc)
{
struct page_info *page;
void *map;
/* Translate the gfn, unsharing if shared */
page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL,
- P2M_ALLOC | P2M_UNSHARE);
+ q);
if ( p2m_is_paging(*p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
@@ -149,6 +146,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
uint32_t gflags, mflags, iflags, rc = 0;
int smep;
bool_t pse1G = 0, pse2M = 0;
+ p2m_query_t qt = P2M_ALLOC | P2M_UNSHARE;
perfc_incr(guest_walk);
memset(gw, 0, sizeof(*gw));
@@ -188,7 +186,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
l3p = map_domain_gfn(p2m,
guest_l4e_get_gfn(gw->l4e),
&gw->l3mfn,
- &p2mt,
+ &p2mt,
+ qt,
&rc);
if(l3p == NULL)
goto out;
@@ -249,6 +248,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
guest_l3e_get_gfn(gw->l3e),
&gw->l2mfn,
&p2mt,
+ qt,
&rc);
if(l2p == NULL)
goto out;
@@ -322,6 +322,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
guest_l2e_get_gfn(gw->l2e),
&gw->l1mfn,
&p2mt,
+ qt,
&rc);
if(l1p == NULL)
goto out;
diff --git a/xen/arch/x86/mm/hap/Makefile b/xen/arch/x86/mm/hap/Makefile
index 80a6bec7db..68f2bb575d 100644
--- a/xen/arch/x86/mm/hap/Makefile
+++ b/xen/arch/x86/mm/hap/Makefile
@@ -3,6 +3,7 @@ obj-y += guest_walk_2level.o
obj-y += guest_walk_3level.o
obj-$(x86_64) += guest_walk_4level.o
obj-y += nested_hap.o
+obj-y += nested_ept.o
guest_walk_%level.o: guest_walk.c Makefile
$(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff --git a/xen/arch/x86/mm/hap/nested_ept.c b/xen/arch/x86/mm/hap/nested_ept.c
new file mode 100644
index 0000000000..bc72c97139
--- /dev/null
+++ b/xen/arch/x86/mm/hap/nested_ept.c
@@ -0,0 +1,287 @@
+/*
+ * nested_ept.c: Handling virtulized EPT for guest in nested case.
+ *
+ * Copyright (c) 2012, Intel Corporation
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+#include <asm/domain.h>
+#include <asm/page.h>
+#include <asm/paging.h>
+#include <asm/p2m.h>
+#include <asm/mem_event.h>
+#include <public/mem_event.h>
+#include <asm/mem_sharing.h>
+#include <xen/event.h>
+#include <asm/hap.h>
+#include <asm/hvm/support.h>
+
+#include <asm/hvm/nestedhvm.h>
+
+#include "private.h"
+
+#include <asm/hvm/vmx/vmx.h>
+#include <asm/hvm/vmx/vvmx.h>
+
+/* EPT always use 4-level paging structure */
+#define GUEST_PAGING_LEVELS 4
+#include <asm/guest_pt.h>
+
+/* Must reserved bits in all level entries */
+#define EPT_MUST_RSV_BITS (((1ull << PADDR_BITS) - 1) & \
+ ~((1ull << paddr_bits) - 1))
+
+/*
+ *TODO: Just leave it as 0 here for compile pass, will
+ * define real capabilities in the subsequent patches.
+ */
+#define NEPT_VPID_CAP_BITS 0
+
+
+#define NEPT_1G_ENTRY_FLAG (1 << 11)
+#define NEPT_2M_ENTRY_FLAG (1 << 10)
+#define NEPT_4K_ENTRY_FLAG (1 << 9)
+
+bool_t nept_sp_entry(ept_entry_t e)
+{
+ return !!(e.sp);
+}
+
+static bool_t nept_rsv_bits_check(ept_entry_t e, uint32_t level)
+{
+ uint64_t rsv_bits = EPT_MUST_RSV_BITS;
+
+ switch ( level )
+ {
+ case 1:
+ break;
+ case 2 ... 3:
+ if ( nept_sp_entry(e) )
+ rsv_bits |= ((1ull << (9 * (level - 1))) - 1) << PAGE_SHIFT;
+ else
+ rsv_bits |= EPTE_EMT_MASK | EPTE_IGMT_MASK;
+ break;
+ case 4:
+ rsv_bits |= EPTE_EMT_MASK | EPTE_IGMT_MASK | EPTE_SUPER_PAGE_MASK;
+ break;
+ default:
+ gdprintk(XENLOG_ERR,"Unsupported EPT paging level: %d\n", level);
+ BUG();
+ break;
+ }
+ return !!(e.epte & rsv_bits);
+}
+
+/* EMT checking*/
+static bool_t nept_emt_bits_check(ept_entry_t e, uint32_t level)
+{
+ if ( e.sp || level == 1 )
+ {
+ if ( e.emt == EPT_EMT_RSV0 || e.emt == EPT_EMT_RSV1 ||
+ e.emt == EPT_EMT_RSV2 )
+ return 1;
+ }
+ return 0;
+}
+
+static bool_t nept_permission_check(uint32_t rwx_acc, uint32_t rwx_bits)
+{
+ return !(EPTE_RWX_MASK & rwx_acc & ~rwx_bits);
+}
+
+/* nept's non-present check */
+static bool_t nept_non_present_check(ept_entry_t e)
+{
+ if ( e.epte & EPTE_RWX_MASK )
+ return 0;
+ return 1;
+}
+
+uint64_t nept_get_ept_vpid_cap(void)
+{
+ uint64_t caps = NEPT_VPID_CAP_BITS;
+
+ if ( !cpu_has_vmx_ept_exec_only_supported )
+ caps &= ~VMX_EPT_EXEC_ONLY_SUPPORTED;
+ return caps;
+}
+
+static bool_t nept_rwx_bits_check(ept_entry_t e)
+{
+ /*write only or write/execute only*/
+ uint8_t rwx_bits = e.epte & EPTE_RWX_MASK;
+
+ if ( rwx_bits == ept_access_w || rwx_bits == ept_access_wx )
+ return 1;
+
+ if ( rwx_bits == ept_access_x &&
+ !(nept_get_ept_vpid_cap() & VMX_EPT_EXEC_ONLY_SUPPORTED) )
+ return 1;
+
+ return 0;
+}
+
+/* nept's misconfiguration check */
+static bool_t nept_misconfiguration_check(ept_entry_t e, uint32_t level)
+{
+ return nept_rsv_bits_check(e, level) ||
+ nept_emt_bits_check(e, level) ||
+ nept_rwx_bits_check(e);
+}
+
+static int ept_lvl_table_offset(unsigned long gpa, int lvl)
+{
+ return (gpa >> (EPT_L4_PAGETABLE_SHIFT -(4 - lvl) * 9)) &
+ (EPT_PAGETABLE_ENTRIES - 1);
+}
+
+static uint32_t
+nept_walk_tables(struct vcpu *v, unsigned long l2ga, ept_walk_t *gw)
+{
+ int lvl;
+ p2m_type_t p2mt;
+ uint32_t rc = 0, ret = 0, gflags;
+ struct domain *d = v->domain;
+ struct p2m_domain *p2m = d->arch.p2m;
+ gfn_t base_gfn = _gfn(nhvm_vcpu_p2m_base(v) >> PAGE_SHIFT);
+ mfn_t lxmfn;
+ ept_entry_t *lxp = NULL;
+
+ memset(gw, 0, sizeof(*gw));
+
+ for (lvl = 4; lvl > 0; lvl--)
+ {
+ lxp = map_domain_gfn(p2m, base_gfn, &lxmfn, &p2mt, P2M_ALLOC, &rc);
+ if ( !lxp )
+ goto map_err;
+ gw->lxe[lvl] = lxp[ept_lvl_table_offset(l2ga, lvl)];
+ unmap_domain_page(lxp);
+ put_page(mfn_to_page(mfn_x(lxmfn)));
+
+ if ( nept_non_present_check(gw->lxe[lvl]) )
+ goto non_present;
+
+ if ( nept_misconfiguration_check(gw->lxe[lvl], lvl) )
+ goto misconfig_err;
+
+ if ( (lvl == 2 || lvl == 3) && nept_sp_entry(gw->lxe[lvl]) )
+ {
+ /* Generate a fake l1 table entry so callers don't all
+ * have to understand superpages. */
+ unsigned long gfn_lvl_mask = (1ull << ((lvl - 1) * 9)) - 1;
+ gfn_t start = _gfn(gw->lxe[lvl].mfn);
+ /* Increment the pfn by the right number of 4k pages. */
+ start = _gfn((gfn_x(start) & ~gfn_lvl_mask) +
+ ((l2ga >> PAGE_SHIFT) & gfn_lvl_mask));
+ gflags = (gw->lxe[lvl].epte & EPTE_FLAG_MASK) |
+ (lvl == 3 ? NEPT_1G_ENTRY_FLAG: NEPT_2M_ENTRY_FLAG);
+ gw->lxe[0].epte = (gfn_x(start) << PAGE_SHIFT) | gflags;
+ goto done;
+ }
+ if ( lvl > 1 )
+ base_gfn = _gfn(gw->lxe[lvl].mfn);
+ }
+
+ /* If this is not a super entry, we can reach here. */
+ gflags = (gw->lxe[1].epte & EPTE_FLAG_MASK) | NEPT_4K_ENTRY_FLAG;
+ gw->lxe[0].epte = (gw->lxe[1].epte & PAGE_MASK) | gflags;
+
+done:
+ ret = EPT_TRANSLATE_SUCCEED;
+ goto out;
+
+map_err:
+ if ( rc == _PAGE_PAGED )
+ {
+ ret = EPT_TRANSLATE_RETRY;
+ goto out;
+ }
+ /* fall through to misconfig error */
+misconfig_err:
+ ret = EPT_TRANSLATE_MISCONFIG;
+ goto out;
+
+non_present:
+ ret = EPT_TRANSLATE_VIOLATION;
+ /* fall through. */
+out:
+ return ret;
+}
+
+/* Translate a L2 guest address to L1 gpa via L1 EPT paging structure */
+
+int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
+ unsigned int *page_order, uint32_t rwx_acc,
+ unsigned long *l1gfn, uint64_t *exit_qual,
+ uint32_t *exit_reason)
+{
+ uint32_t rc, rwx_bits = 0;
+ ept_walk_t gw;
+ rwx_acc &= EPTE_RWX_MASK;
+
+ *l1gfn = INVALID_GFN;
+
+ rc = nept_walk_tables(v, l2ga, &gw);
+ switch ( rc )
+ {
+ case EPT_TRANSLATE_SUCCEED:
+ if ( likely(gw.lxe[0].epte & NEPT_2M_ENTRY_FLAG) )
+ {
+ rwx_bits = gw.lxe[4].epte & gw.lxe[3].epte & gw.lxe[2].epte &
+ EPTE_RWX_MASK;
+ *page_order = 9;
+ }
+ else if ( gw.lxe[0].epte & NEPT_4K_ENTRY_FLAG )
+ {
+ rwx_bits = gw.lxe[4].epte & gw.lxe[3].epte & gw.lxe[2].epte &
+ gw.lxe[1].epte & EPTE_RWX_MASK;
+ *page_order = 0;
+ }
+ else if ( gw.lxe[0].epte & NEPT_1G_ENTRY_FLAG )
+ {
+ rwx_bits = gw.lxe[4].epte & gw.lxe[3].epte & EPTE_RWX_MASK;
+ *page_order = 18;
+ }
+ else
+ {
+ gdprintk(XENLOG_ERR, "Uncorrect l1 entry!\n");
+ BUG();
+ }
+ if ( nept_permission_check(rwx_acc, rwx_bits) )
+ {
+ *l1gfn = gw.lxe[0].mfn;
+ break;
+ }
+ rc = EPT_TRANSLATE_VIOLATION;
+ /* Fall through to EPT violation if permission check fails. */
+ case EPT_TRANSLATE_VIOLATION:
+ *exit_qual = (*exit_qual & 0xffffffc0) | (rwx_bits << 3) | rwx_acc;
+ *exit_reason = EXIT_REASON_EPT_VIOLATION;
+ break;
+
+ case EPT_TRANSLATE_MISCONFIG:
+ rc = EPT_TRANSLATE_MISCONFIG;
+ *exit_qual = 0;
+ *exit_reason = EXIT_REASON_EPT_MISCONFIG;
+ break;
+ case EPT_TRANSLATE_RETRY:
+ break;
+ default:
+ gdprintk(XENLOG_ERR, "Unsupported ept translation type!:%d\n", rc);
+ BUG();
+ break;
+ }
+ return rc;
+}
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index a6c641ddb6..d73bfa2d05 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -218,7 +218,7 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
/* let caller to handle these two cases */
switch (rv) {
case NESTEDHVM_PAGEFAULT_INJECT:
- return rv;
+ case NESTEDHVM_PAGEFAULT_RETRY:
case NESTEDHVM_PAGEFAULT_L1_ERROR:
return rv;
case NESTEDHVM_PAGEFAULT_DONE:
diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
index 4e1dda0f59..b62bc6aaa3 100644
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -315,6 +315,10 @@ guest_walk_to_page_order(walk_t *gw)
#define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels
#define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l)
#define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS)
+#define map_domain_gfn GPT_RENAME(map_domain_gfn, GUEST_PAGING_LEVELS)
+
+void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
+ p2m_type_t *p2mt, p2m_query_t q, uint32_t *rc);
extern uint32_t
guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long va,
diff --git a/xen/include/asm-x86/hvm/nestedhvm.h b/xen/include/asm-x86/hvm/nestedhvm.h
index 91fde0b21e..649c511d34 100644
--- a/xen/include/asm-x86/hvm/nestedhvm.h
+++ b/xen/include/asm-x86/hvm/nestedhvm.h
@@ -52,6 +52,7 @@ bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v);
#define NESTEDHVM_PAGEFAULT_L1_ERROR 2
#define NESTEDHVM_PAGEFAULT_L0_ERROR 3
#define NESTEDHVM_PAGEFAULT_MMIO 4
+#define NESTEDHVM_PAGEFAULT_RETRY 5
int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
bool_t access_r, bool_t access_w, bool_t access_x);
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 3adffccac8..4e183c4358 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -194,6 +194,7 @@ extern u32 vmx_secondary_exec_control;
extern bool_t cpu_has_vmx_ins_outs_instr_info;
+#define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001
#define VMX_EPT_WALK_LENGTH_4_SUPPORTED 0x00000040
#define VMX_EPT_MEMORY_TYPE_UC 0x00000100
#define VMX_EPT_MEMORY_TYPE_WB 0x00004000
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index aa5b0809d1..c73946f99f 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -51,6 +51,22 @@ typedef union {
u64 epte;
} ept_entry_t;
+typedef struct {
+ /*use lxe[0] to save result */
+ ept_entry_t lxe[5];
+} ept_walk_t;
+
+typedef enum {
+ ept_access_n = 0, /* No access permissions allowed */
+ ept_access_r = 1, /* Read only */
+ ept_access_w = 2, /* Write only */
+ ept_access_rw = 3, /* Read & Write */
+ ept_access_x = 4, /* Exec Only */
+ ept_access_rx = 5, /* Read & Exec */
+ ept_access_wx = 6, /* Write & Exec*/
+ ept_access_all = 7, /* Full permissions */
+} ept_access_t;
+
#define EPT_TABLE_ORDER 9
#define EPTE_SUPER_PAGE_MASK 0x80
#define EPTE_MFN_MASK 0xffffffffff000ULL
@@ -60,6 +76,17 @@ typedef union {
#define EPTE_AVAIL1_SHIFT 8
#define EPTE_EMT_SHIFT 3
#define EPTE_IGMT_SHIFT 6
+#define EPTE_RWX_MASK 0x7
+#define EPTE_FLAG_MASK 0x7f
+
+#define EPT_EMT_UC 0
+#define EPT_EMT_WC 1
+#define EPT_EMT_RSV0 2
+#define EPT_EMT_RSV1 3
+#define EPT_EMT_WT 4
+#define EPT_EMT_WP 5
+#define EPT_EMT_WB 6
+#define EPT_EMT_RSV2 7
void vmx_asm_vmexit_handler(struct cpu_user_regs);
void vmx_asm_do_vmentry(void);
@@ -191,6 +218,9 @@ void vmx_update_secondary_exec_control(struct vcpu *v);
extern u64 vmx_ept_vpid_cap;
+#define cpu_has_vmx_ept_exec_only_supported \
+ (vmx_ept_vpid_cap & VMX_EPT_EXEC_ONLY_SUPPORTED)
+
#define cpu_has_vmx_ept_wl4_supported \
(vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED)
#define cpu_has_vmx_ept_mt_uc \
@@ -419,6 +449,7 @@ void update_guest_eip(void);
#define _EPT_GLA_FAULT 8
#define EPT_GLA_FAULT (1UL<<_EPT_GLA_FAULT)
+#define EPT_L4_PAGETABLE_SHIFT 39
#define EPT_PAGETABLE_ENTRIES 512
#endif /* __ASM_X86_HVM_VMX_VMX_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 689e684f06..33d059a31f 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -32,6 +32,10 @@ struct nestedvmx {
unsigned long intr_info;
u32 error_code;
} intr;
+ struct {
+ uint32_t exit_reason;
+ uint32_t exit_qual;
+ } ept_exit;
};
#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx)
@@ -109,6 +113,11 @@ void nvmx_domain_relinquish_resources(struct domain *d);
int nvmx_handle_vmxon(struct cpu_user_regs *regs);
int nvmx_handle_vmxoff(struct cpu_user_regs *regs);
+#define EPT_TRANSLATE_SUCCEED 0
+#define EPT_TRANSLATE_VIOLATION 1
+#define EPT_TRANSLATE_MISCONFIG 2
+#define EPT_TRANSLATE_RETRY 3
+
int
nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
unsigned int *page_order,
@@ -192,5 +201,9 @@ u64 nvmx_get_tsc_offset(struct vcpu *v);
int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
unsigned int exit_reason);
+int nept_translate_l2ga(struct vcpu *v, paddr_t l2ga,
+ unsigned int *page_order, uint32_t rwx_acc,
+ unsigned long *l1gfn, uint64_t *exit_qual,
+ uint32_t *exit_reason);
#endif /* __ASM_X86_HVM_VVMX_H__ */