aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/mm
diff options
context:
space:
mode:
authorZhang Xiantao <xiantao.zhang@intel.com>2013-01-15 11:09:33 +0100
committerZhang Xiantao <xiantao.zhang@intel.com>2013-01-15 11:09:33 +0100
commit199707892b20ba62578338ddf5b33e21aea89a3c (patch)
tree65d51f5ecf4864623fb7d069d15978f86c968ddf /xen/arch/x86/mm
parent836f8188f26db0148e6844d11c15edb66bec7b8f (diff)
downloadxen-199707892b20ba62578338ddf5b33e21aea89a3c.tar.gz
xen-199707892b20ba62578338ddf5b33e21aea89a3c.tar.bz2
xen-199707892b20ba62578338ddf5b33e21aea89a3c.zip
nestedhap: Change hostcr3 and p2m->cr3 to meaningful words
VMX doesn't have the concept about host cr3 for nested p2m, and only SVM has, so change it to netural words. Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com> Acked-by: Tim Deegan <tim@xen.org> Acked-by: Jun Nakajima <jun.nakajima@intel.com> Acked-by: Eddie Dong <eddie.dong@intel.com> Committed-by: Jan Beulich <jbeulich@suse.com>
Diffstat (limited to 'xen/arch/x86/mm')
-rw-r--r--xen/arch/x86/mm/hap/nested_hap.c16
-rw-r--r--xen/arch/x86/mm/mm-locks.h2
-rw-r--r--xen/arch/x86/mm/p2m.c26
3 files changed, 23 insertions, 21 deletions
diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 317875d682..9a97d366fd 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -48,9 +48,11 @@
* 1. If #NPF is from L1 guest, then we crash the guest VM (same as old
* code)
* 2. If #NPF is from L2 guest, then we continue from (3)
- * 3. Get h_cr3 from L1 guest. Map h_cr3 into L0 hypervisor address space.
- * 4. Walk the h_cr3 page table
- * 5. - if not present, then we inject #NPF back to L1 guest and
+ * 3. Get np2m base from L1 guest. Map np2m base into L0 hypervisor address
+ * space.
+ * 4. Walk the np2m's page table
+ * 5. - if not present or permission check failure, then we inject #NPF
+ * back to L1 guest and
* re-launch L1 guest (L1 guest will either treat this #NPF as MMIO,
* or fix its p2m table for L2 guest)
* 6. - if present, then we will get the a new translated value L1-GPA
@@ -89,7 +91,7 @@ nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
if (old_flags & _PAGE_PRESENT)
flush_tlb_mask(p2m->dirty_cpumask);
-
+
paging_unlock(d);
}
@@ -110,7 +112,7 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
/* If this p2m table has been flushed or recycled under our feet,
* leave it alone. We'll pick up the right one as we try to
* vmenter the guest. */
- if ( p2m->cr3 == nhvm_vcpu_hostcr3(v) )
+ if ( p2m->np2m_base == nhvm_vcpu_p2m_base(v) )
{
unsigned long gfn, mask;
mfn_t mfn;
@@ -186,7 +188,7 @@ nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
uint32_t pfec;
unsigned long nested_cr3, gfn;
- nested_cr3 = nhvm_vcpu_hostcr3(v);
+ nested_cr3 = nhvm_vcpu_p2m_base(v);
pfec = PFEC_user_mode | PFEC_page_present;
if (access_w)
@@ -221,7 +223,7 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
p2m_type_t p2mt_10;
p2m = p2m_get_hostp2m(d); /* L0 p2m */
- nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v));
+ nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_p2m_base(v));
/* walk the L1 P2M table */
rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21,
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 3700e32cc8..3ce3489f20 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -249,7 +249,7 @@ declare_mm_order_constraint(per_page_sharing)
* A per-domain lock that protects the mapping from nested-CR3 to
* nested-p2m. In particular it covers:
* - the array of nested-p2m tables, and all LRU activity therein; and
- * - setting the "cr3" field of any p2m table to a non-CR3_EADDR value.
+ * - setting the "cr3" field of any p2m table to a non-P2M_BASE_EAADR value.
* (i.e. assigning a p2m table to be the shadow of that cr3 */
/* PoD lock (per-p2m-table)
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 258f46e21a..41a461b55b 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -69,7 +69,7 @@ static void p2m_initialise(struct domain *d, struct p2m_domain *p2m)
p2m->domain = d;
p2m->default_access = p2m_access_rwx;
- p2m->cr3 = CR3_EADDR;
+ p2m->np2m_base = P2M_BASE_EADDR;
if ( hap_enabled(d) && cpu_has_vmx )
ept_p2m_init(p2m);
@@ -1433,7 +1433,7 @@ p2m_flush_table(struct p2m_domain *p2m)
ASSERT(page_list_empty(&p2m->pod.single));
/* This is no longer a valid nested p2m for any address space */
- p2m->cr3 = CR3_EADDR;
+ p2m->np2m_base = P2M_BASE_EADDR;
/* Zap the top level of the trie */
top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
@@ -1471,7 +1471,7 @@ p2m_flush_nestedp2m(struct domain *d)
}
struct p2m_domain *
-p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
+p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base)
{
/* Use volatile to prevent gcc to cache nv->nv_p2m in a cpu register as
* this may change within the loop by an other (v)cpu.
@@ -1480,8 +1480,8 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
struct domain *d;
struct p2m_domain *p2m;
- /* Mask out low bits; this avoids collisions with CR3_EADDR */
- cr3 &= ~(0xfffull);
+ /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */
+ np2m_base &= ~(0xfffull);
if (nv->nv_flushp2m && nv->nv_p2m) {
nv->nv_p2m = NULL;
@@ -1493,14 +1493,14 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
if ( p2m )
{
p2m_lock(p2m);
- if ( p2m->cr3 == cr3 || p2m->cr3 == CR3_EADDR )
+ if ( p2m->np2m_base == np2m_base || p2m->np2m_base == P2M_BASE_EADDR )
{
nv->nv_flushp2m = 0;
p2m_getlru_nestedp2m(d, p2m);
nv->nv_p2m = p2m;
- if (p2m->cr3 == CR3_EADDR)
+ if ( p2m->np2m_base == P2M_BASE_EADDR )
hvm_asid_flush_vcpu(v);
- p2m->cr3 = cr3;
+ p2m->np2m_base = np2m_base;
cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
p2m_unlock(p2m);
nestedp2m_unlock(d);
@@ -1515,7 +1515,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
p2m_flush_table(p2m);
p2m_lock(p2m);
nv->nv_p2m = p2m;
- p2m->cr3 = cr3;
+ p2m->np2m_base = np2m_base;
nv->nv_flushp2m = 0;
hvm_asid_flush_vcpu(v);
cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
@@ -1531,7 +1531,7 @@ p2m_get_p2m(struct vcpu *v)
if (!nestedhvm_is_n2(v))
return p2m_get_hostp2m(v->domain);
- return p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v));
+ return p2m_get_nestedp2m(v, nhvm_vcpu_p2m_base(v));
}
unsigned long paging_gva_to_gfn(struct vcpu *v,
@@ -1549,15 +1549,15 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
struct p2m_domain *p2m;
const struct paging_mode *mode;
uint32_t pfec_21 = *pfec;
- uint64_t ncr3 = nhvm_vcpu_hostcr3(v);
+ uint64_t np2m_base = nhvm_vcpu_p2m_base(v);
/* translate l2 guest va into l2 guest gfn */
- p2m = p2m_get_nestedp2m(v, ncr3);
+ p2m = p2m_get_nestedp2m(v, np2m_base);
mode = paging_get_nestedmode(v);
gfn = mode->gva_to_gfn(v, p2m, va, pfec);
/* translate l2 guest gfn into l1 guest gfn */
- return hostmode->p2m_ga_to_gfn(v, hostp2m, ncr3,
+ return hostmode->p2m_ga_to_gfn(v, hostp2m, np2m_base,
gfn << PAGE_SHIFT, &pfec_21, NULL);
}