aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-06-01 09:06:47 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-06-01 09:06:47 +0000
commit8ddeb426ef6bc0a89fafec8bab91feec387fa8f9 (patch)
tree954de4e185eee9aca6f859868fc1027b51fc148c
parentfc415a2b7104b694295024b2887282707e589b77 (diff)
downloadxen-8ddeb426ef6bc0a89fafec8bab91feec387fa8f9.tar.gz
xen-8ddeb426ef6bc0a89fafec8bab91feec387fa8f9.tar.bz2
xen-8ddeb426ef6bc0a89fafec8bab91feec387fa8f9.zip
bitkeeper revision 1.1624 (429d7aa7Gb7U1ztIGbXeQ0gPWlG51g)
Rename some of the pagetable access macros: l?e_create_* -> l?e_from_* *_phys -> *_paddr *_value -> *_intpte l?e_add_flags, l?e_remove_flags, l?e_has_changed now take ptes by value rather than by reference. The pagetable hypercalls are fixed to use l?e_from_intpte() rather than abusing l?e_from_paddr(), which munged the NX bit incorrectly on x86/64. Thanks to Scott Parish for spotting this one. Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/x86/audit.c4
-rw-r--r--xen/arch/x86/dom0_ops.c2
-rw-r--r--xen/arch/x86/domain.c16
-rw-r--r--xen/arch/x86/domain_build.c42
-rw-r--r--xen/arch/x86/mm.c89
-rw-r--r--xen/arch/x86/shadow.c142
-rw-r--r--xen/arch/x86/traps.c2
-rw-r--r--xen/arch/x86/vmx.c10
-rw-r--r--xen/arch/x86/vmx_io.c6
-rw-r--r--xen/arch/x86/vmx_vmcs.c4
-rw-r--r--xen/arch/x86/x86_32/domain_page.c4
-rw-r--r--xen/arch/x86/x86_32/mm.c14
-rw-r--r--xen/arch/x86/x86_32/traps.c6
-rw-r--r--xen/arch/x86/x86_64/mm.c37
-rw-r--r--xen/common/grant_table.c4
-rw-r--r--xen/include/asm-x86/page.h101
-rw-r--r--xen/include/asm-x86/shadow.h69
-rw-r--r--xen/include/asm-x86/x86_32/page-2level.h4
-rw-r--r--xen/include/asm-x86/x86_32/page-3level.h4
-rw-r--r--xen/include/asm-x86/x86_64/page.h6
20 files changed, 287 insertions, 279 deletions
diff --git a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c
index 3750ea788a..d389758a93 100644
--- a/xen/arch/x86/audit.c
+++ b/xen/arch/x86/audit.c
@@ -408,9 +408,9 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
for_each_exec_domain(d, ed)
{
- if ( pagetable_get_phys(ed->arch.guest_table) )
+ if ( pagetable_get_paddr(ed->arch.guest_table) )
adjust(&frame_table[pagetable_get_pfn(ed->arch.guest_table)], 1);
- if ( pagetable_get_phys(ed->arch.shadow_table) )
+ if ( pagetable_get_paddr(ed->arch.shadow_table) )
adjust(&frame_table[pagetable_get_pfn(ed->arch.shadow_table)], 0);
if ( ed->arch.monitor_shadow_ref )
adjust(&frame_table[ed->arch.monitor_shadow_ref], 0);
diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
index d8fee9e15d..70138111fd 100644
--- a/xen/arch/x86/dom0_ops.c
+++ b/xen/arch/x86/dom0_ops.c
@@ -405,7 +405,7 @@ void arch_getdomaininfo_ctxt(
c->flags |= VGCF_VMX_GUEST;
#endif
- c->pt_base = pagetable_get_phys(ed->arch.guest_table);
+ c->pt_base = pagetable_get_paddr(ed->arch.guest_table);
c->vm_assist = ed->domain->vm_assist;
}
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 200db3be4c..4d49724c4e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -250,7 +250,7 @@ void arch_do_createdomain(struct exec_domain *ed)
PAGE_SHIFT] = INVALID_M2P_ENTRY;
ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
- l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+ l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
ed->arch.guest_vtable = __linear_l2_table;
ed->arch.shadow_vtable = __shadow_linear_l2_table;
@@ -262,12 +262,12 @@ void arch_do_createdomain(struct exec_domain *ed)
d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
- l2e_create_page(virt_to_page(d->arch.mm_perdomain_pt),
+ l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt),
__PAGE_HYPERVISOR);
d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
- l3e_create_page(virt_to_page(d->arch.mm_perdomain_l2),
+ l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
__PAGE_HYPERVISOR);
#endif
@@ -288,7 +288,7 @@ void arch_do_boot_vcpu(struct exec_domain *ed)
ed->arch.perdomain_ptes =
d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
- l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
+ l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
}
#ifdef CONFIG_VMX
@@ -460,7 +460,7 @@ int arch_set_info_guest(
// trust the VMX domain builder. Xen should validate this
// page table, and/or build the table itself, or ???
//
- if ( !pagetable_get_phys(d->arch.phys_table) )
+ if ( !pagetable_get_paddr(d->arch.phys_table) )
d->arch.phys_table = ed->arch.guest_table;
if ( (error = vmx_final_setup_guest(ed, c)) )
@@ -660,7 +660,7 @@ long do_switch_to_user(void)
struct exec_domain *ed = current;
if ( unlikely(copy_from_user(&stu, (void *)regs->rsp, sizeof(stu))) ||
- unlikely(pagetable_get_phys(ed->arch.guest_table_user) == 0) )
+ unlikely(pagetable_get_paddr(ed->arch.guest_table_user) == 0) )
return -EFAULT;
toggle_guest_mode(ed);
@@ -978,7 +978,7 @@ void domain_relinquish_resources(struct domain *d)
/* Drop the in-use references to page-table bases. */
for_each_exec_domain ( d, ed )
{
- if ( pagetable_get_phys(ed->arch.guest_table) != 0 )
+ if ( pagetable_get_paddr(ed->arch.guest_table) != 0 )
{
if ( shadow_mode_refcounts(d) )
put_page(&frame_table[pagetable_get_pfn(ed->arch.guest_table)]);
@@ -988,7 +988,7 @@ void domain_relinquish_resources(struct domain *d)
ed->arch.guest_table = mk_pagetable(0);
}
- if ( pagetable_get_phys(ed->arch.guest_table_user) != 0 )
+ if ( pagetable_get_paddr(ed->arch.guest_table_user) != 0 )
{
if ( shadow_mode_refcounts(d) )
put_page(&frame_table[pagetable_get_pfn(ed->arch.guest_table_user)]);
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 01e80e89be..a19bbd16d7 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -263,15 +263,15 @@ int construct_dom0(struct domain *d,
l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += 4*PAGE_SIZE;
memcpy(l2tab, idle_pg_table_l2, 4*PAGE_SIZE);
for (i = 0; i < 4; i++) {
- l3tab[i] = l3e_create_phys((u32)l2tab + i*PAGE_SIZE, L3_PROT);
+ l3tab[i] = l3e_from_paddr((u32)l2tab + i*PAGE_SIZE, L3_PROT);
l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] =
- l2e_create_phys((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
+ l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
}
unsigned long v;
for (v = PERDOMAIN_VIRT_START; v < PERDOMAIN_VIRT_END;
v += (1 << L2_PAGETABLE_SHIFT)) {
l2tab[v >> L2_PAGETABLE_SHIFT] =
- l2e_create_phys(__pa(d->arch.mm_perdomain_pt) + (v-PERDOMAIN_VIRT_START),
+ l2e_from_paddr(__pa(d->arch.mm_perdomain_pt) + (v-PERDOMAIN_VIRT_START),
__PAGE_HYPERVISOR);
}
ed->arch.guest_table = mk_pagetable((unsigned long)l3start);
@@ -279,9 +279,9 @@ int construct_dom0(struct domain *d,
l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
- l2e_create_phys((unsigned long)l2start, __PAGE_HYPERVISOR);
+ l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR);
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
- l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
+ l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
#endif
@@ -293,13 +293,13 @@ int construct_dom0(struct domain *d,
{
l1start = l1tab = (l1_pgentry_t *)mpt_alloc;
mpt_alloc += PAGE_SIZE;
- *l2tab = l2e_create_phys((unsigned long)l1start, L2_PROT);
+ *l2tab = l2e_from_paddr((unsigned long)l1start, L2_PROT);
l2tab++;
clear_page(l1tab);
if ( count == 0 )
l1tab += l1_table_offset(dsi.v_start);
}
- *l1tab = l1e_create_pfn(mfn, L1_PROT);
+ *l1tab = l1e_from_pfn(mfn, L1_PROT);
l1tab++;
page = &frame_table[mfn];
@@ -311,13 +311,13 @@ int construct_dom0(struct domain *d,
/* Pages that are part of page tables must be read only. */
l2tab = l2start + l2_linear_offset(vpt_start);
- l1start = l1tab = (l1_pgentry_t *)(u32)l2e_get_phys(*l2tab);
+ l1start = l1tab = (l1_pgentry_t *)(u32)l2e_get_paddr(*l2tab);
l1tab += l1_table_offset(vpt_start);
for ( count = 0; count < nr_pt_pages; count++ )
{
page = &frame_table[l1e_get_pfn(*l1tab)];
if ( !opt_dom0_shadow )
- l1e_remove_flags(l1tab, _PAGE_RW);
+ l1e_remove_flags(*l1tab, _PAGE_RW);
else
if ( !get_page_type(page, PGT_writable_page) )
BUG();
@@ -384,7 +384,7 @@ int construct_dom0(struct domain *d,
}
#endif
if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
- l1start = l1tab = (l1_pgentry_t *)(u32)l2e_get_phys(*++l2tab);
+ l1start = l1tab = (l1_pgentry_t *)(u32)l2e_get_paddr(*++l2tab);
}
#elif defined(__x86_64__)
@@ -402,9 +402,9 @@ int construct_dom0(struct domain *d,
l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_create_phys(__pa(l4start), __PAGE_HYPERVISOR);
+ l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_create_phys(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
+ l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
ed->arch.guest_table = mk_pagetable(__pa(l4start));
l4tab += l4_table_offset(dsi.v_start);
@@ -433,16 +433,16 @@ int construct_dom0(struct domain *d,
clear_page(l3tab);
if ( count == 0 )
l3tab += l3_table_offset(dsi.v_start);
- *l4tab = l4e_create_phys(__pa(l3start), L4_PROT);
+ *l4tab = l4e_from_paddr(__pa(l3start), L4_PROT);
l4tab++;
}
- *l3tab = l3e_create_phys(__pa(l2start), L3_PROT);
+ *l3tab = l3e_from_paddr(__pa(l2start), L3_PROT);
l3tab++;
}
- *l2tab = l2e_create_phys(__pa(l1start), L2_PROT);
+ *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
l2tab++;
}
- *l1tab = l1e_create_pfn(mfn, L1_PROT);
+ *l1tab = l1e_from_pfn(mfn, L1_PROT);
l1tab++;
page = &frame_table[mfn];
@@ -463,7 +463,7 @@ int construct_dom0(struct domain *d,
l1tab += l1_table_offset(vpt_start);
for ( count = 0; count < nr_pt_pages; count++ )
{
- l1e_remove_flags(l1tab, _PAGE_RW);
+ l1e_remove_flags(*l1tab, _PAGE_RW);
page = &frame_table[l1e_get_pfn(*l1tab)];
/* Read-only mapping + PGC_allocated + page-table page. */
@@ -633,10 +633,10 @@ int construct_dom0(struct domain *d,
// map this domain's p2m table into current page table,
// so that we can easily access it.
//
- ASSERT( root_get_value(idle_pg_table[1]) == 0 );
- ASSERT( pagetable_get_phys(d->arch.phys_table) );
- idle_pg_table[1] = root_create_phys(
- pagetable_get_phys(d->arch.phys_table), __PAGE_HYPERVISOR);
+ ASSERT( root_get_intpte(idle_pg_table[1]) == 0 );
+ ASSERT( pagetable_get_paddr(d->arch.phys_table) );
+ idle_pg_table[1] = root_from_paddr(
+ pagetable_get_paddr(d->arch.phys_table), __PAGE_HYPERVISOR);
translate_l2pgtable(d, (l1_pgentry_t *)(1u << L2_PAGETABLE_SHIFT),
pagetable_get_pfn(ed->arch.guest_table));
idle_pg_table[1] = root_empty();
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 8d384d839b..872fa49e5f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -234,7 +234,7 @@ void arch_init_memory(void)
void write_ptbase(struct exec_domain *ed)
{
- write_cr3(pagetable_get_phys(ed->arch.monitor_table));
+ write_cr3(pagetable_get_paddr(ed->arch.monitor_table));
}
void invalidate_shadow_ldt(struct exec_domain *d)
@@ -333,7 +333,7 @@ int map_ldt_shadow_page(unsigned int off)
if ( unlikely(!res) )
return 0;
- nl1e = l1e_create_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
+ nl1e = l1e_from_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
ed->arch.perdomain_ptes[off + 16] = nl1e;
ed->arch.shadow_ldt_mapcnt++;
@@ -699,13 +699,13 @@ static inline int fixup_pae_linear_mappings(l3_pgentry_t *pl3e)
return 0;
}
- pl2e = map_domain_mem(l3e_get_phys(pl3e[3]));
+ pl2e = map_domain_mem(l3e_get_paddr(pl3e[3]));
for (i = 0; i < 4; i++) {
vaddr = LINEAR_PT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
idx = (vaddr >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1);
if (l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) {
- pl2e[idx] = l2e_create_phys(l3e_get_phys(pl3e[i]),
- __PAGE_HYPERVISOR);
+ pl2e[idx] = l2e_from_paddr(l3e_get_paddr(pl3e[i]),
+ __PAGE_HYPERVISOR);
} else
pl2e[idx] = l2e_empty();
}
@@ -765,9 +765,9 @@ static int alloc_l2_table(struct pfn_info *page, unsigned int type)
&idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
- l2e_create_pfn(pfn, __PAGE_HYPERVISOR);
+ l2e_from_pfn(pfn, __PAGE_HYPERVISOR);
pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
- l2e_create_page(
+ l2e_from_page(
virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt),
__PAGE_HYPERVISOR);
#endif
@@ -784,7 +784,7 @@ static int alloc_l2_table(struct pfn_info *page, unsigned int type)
v += (1 << L2_PAGETABLE_SHIFT)) {
dst = (v >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1);
virt = page_get_owner(page)->arch.mm_perdomain_pt + (v-PERDOMAIN_VIRT_START);
- pl2e[dst] = l2e_create_page(virt_to_page(virt), __PAGE_HYPERVISOR);
+ pl2e[dst] = l2e_from_page(virt_to_page(virt), __PAGE_HYPERVISOR);
}
/* see fixup_pae_linear_mappings() for linear pagetables */
}
@@ -865,9 +865,9 @@ static int alloc_l4_table(struct pfn_info *page)
&idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_create_pfn(pfn, __PAGE_HYPERVISOR);
+ l4e_from_pfn(pfn, __PAGE_HYPERVISOR);
pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_create_page(
+ l4e_from_page(
virt_to_page(page_get_owner(page)->arch.mm_perdomain_l3),
__PAGE_HYPERVISOR);
@@ -956,16 +956,16 @@ static inline int update_l1e(l1_pgentry_t *pl1e,
l1_pgentry_t ol1e,
l1_pgentry_t nl1e)
{
- intpte_t o = l1e_get_value(ol1e);
- intpte_t n = l1e_get_value(nl1e);
+ intpte_t o = l1e_get_intpte(ol1e);
+ intpte_t n = l1e_get_intpte(nl1e);
if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
- unlikely(o != l1e_get_value(ol1e)) )
+ unlikely(o != l1e_get_intpte(ol1e)) )
{
MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte
": saw %" PRIpte "\n",
- l1e_get_value(ol1e),
- l1e_get_value(nl1e),
+ l1e_get_intpte(ol1e),
+ l1e_get_intpte(nl1e),
o);
return 0;
}
@@ -995,7 +995,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e)
}
/* Fast path for identical mapping, r/w and presence. */
- if ( !l1e_has_changed(&ol1e, &nl1e, _PAGE_RW | _PAGE_PRESENT))
+ if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT))
return update_l1e(pl1e, ol1e, nl1e);
if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
@@ -1019,14 +1019,15 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e)
#define UPDATE_ENTRY(_t,_p,_o,_n) ({ \
intpte_t __o = cmpxchg((intpte_t *)(_p), \
- _t ## e_get_value(_o), \
- _t ## e_get_value(_n)); \
- if ( __o != _t ## e_get_value(_o) ) \
- MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte ": saw %" PRIpte "", \
- (_t ## e_get_value(_o)), \
- (_t ## e_get_value(_n)), \
+ _t ## e_get_intpte(_o), \
+ _t ## e_get_intpte(_n)); \
+ if ( __o != _t ## e_get_intpte(_o) ) \
+ MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte \
+ ": saw %" PRIpte "", \
+ (_t ## e_get_intpte(_o)), \
+ (_t ## e_get_intpte(_n)), \
(__o)); \
- (__o == _t ## e_get_value(_o)); })
+ (__o == _t ## e_get_intpte(_o)); })
/* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */
static int mod_l2_entry(l2_pgentry_t *pl2e,
@@ -1056,7 +1057,7 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
}
/* Fast path for identical mapping and presence. */
- if ( !l2e_has_changed(&ol2e, &nl2e, _PAGE_PRESENT))
+ if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT))
return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e);
vaddr = (((unsigned long)pl2e & ~PAGE_MASK) / sizeof(l2_pgentry_t))
@@ -1111,7 +1112,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
}
/* Fast path for identical mapping and presence. */
- if (!l3e_has_changed(&ol3e, &nl3e, _PAGE_PRESENT))
+ if (!l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT))
return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e);
vaddr = (((unsigned long)pl3e & ~PAGE_MASK) / sizeof(l3_pgentry_t))
@@ -1168,7 +1169,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
}
/* Fast path for identical mapping and presence. */
- if (!l4e_has_changed(&ol4e, &nl4e, _PAGE_PRESENT))
+ if (!l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT))
return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e);
if ( unlikely(!get_page_from_l4e(nl4e, pfn, current->domain)) )
@@ -2029,7 +2030,7 @@ int do_mmu_update(
l1_pgentry_t l1e;
/* FIXME: doesn't work with PAE */
- l1e = l1e_create_phys(req.val, req.val);
+ l1e = l1e_from_intpte(req.val);
okay = mod_l1_entry(va, l1e);
if ( okay && unlikely(shadow_mode_enabled(d)) )
shadow_l1_normal_pt_update(d, req.ptr, l1e, &sh_mapcache);
@@ -2044,7 +2045,7 @@ int do_mmu_update(
l2_pgentry_t l2e;
/* FIXME: doesn't work with PAE */
- l2e = l2e_create_phys(req.val, req.val);
+ l2e = l2e_from_intpte(req.val);
okay = mod_l2_entry((l2_pgentry_t *)va, l2e, mfn, type_info);
if ( okay && unlikely(shadow_mode_enabled(d)) )
shadow_l2_normal_pt_update(d, req.ptr, l2e, &sh_mapcache);
@@ -2059,7 +2060,7 @@ int do_mmu_update(
l3_pgentry_t l3e;
/* FIXME: doesn't work with PAE */
- l3e = l3e_create_phys(req.val,req.val);
+ l3e = l3e_from_intpte(req.val);
okay = mod_l3_entry(va, l3e, mfn);
if ( okay && unlikely(shadow_mode_enabled(d)) )
shadow_l3_normal_pt_update(d, req.ptr, l3e, &sh_mapcache);
@@ -2074,7 +2075,7 @@ int do_mmu_update(
{
l4_pgentry_t l4e;
- l4e = l4e_create_phys(req.val,req.val);
+ l4e = l4e_from_intpte(req.val);
okay = mod_l4_entry(va, l4e, mfn);
if ( okay && unlikely(shadow_mode_enabled(d)) )
shadow_l4_normal_pt_update(d, req.ptr, l4e, &sh_mapcache);
@@ -2251,7 +2252,7 @@ int do_update_va_mapping(unsigned long va,
unsigned long val32,
unsigned long flags)
{
- l1_pgentry_t val = l1e_create_phys(val32,val32);
+ l1_pgentry_t val = l1e_from_intpte(val32);
struct exec_domain *ed = current;
struct domain *d = ed->domain;
unsigned int cpu = ed->processor;
@@ -2420,7 +2421,7 @@ long set_gdt(struct exec_domain *ed,
{
ed->arch.guest_context.gdt_frames[i] = frames[i];
ed->arch.perdomain_ptes[i] =
- l1e_create_pfn(frames[i], __PAGE_HYPERVISOR);
+ l1e_from_pfn(frames[i], __PAGE_HYPERVISOR);
}
return 0;
@@ -2562,7 +2563,7 @@ int revalidate_l1(struct domain *d, l1_pgentry_t *l1page, l1_pgentry_t *snapshot
ol1e = snapshot[i];
nl1e = l1page[i];
- if ( likely(l1e_get_value(ol1e) == l1e_get_value(nl1e)) )
+ if ( likely(l1e_get_intpte(ol1e) == l1e_get_intpte(nl1e)) )
continue;
/* Update number of entries modified. */
@@ -2572,7 +2573,7 @@ int revalidate_l1(struct domain *d, l1_pgentry_t *l1page, l1_pgentry_t *snapshot
* Fast path for PTEs that have merely been write-protected
* (e.g., during a Unix fork()). A strict reduction in privilege.
*/
- if ( likely(l1e_get_value(ol1e) == (l1e_get_value(nl1e)|_PAGE_RW)) )
+ if ( likely(l1e_get_intpte(ol1e) == (l1e_get_intpte(nl1e)|_PAGE_RW)) )
{
if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
put_page_type(&frame_table[l1e_get_pfn(nl1e)]);
@@ -2666,7 +2667,7 @@ void ptwr_flush(struct domain *d, const int which)
if ( which == PTWR_PT_ACTIVE )
{
pl2e = &__linear_l2_table[d->arch.ptwr[which].l2_idx];
- l2e_add_flags(pl2e, _PAGE_PRESENT);
+ l2e_add_flags(*pl2e, _PAGE_PRESENT);
}
/*
@@ -2741,7 +2742,7 @@ static int ptwr_emulated_update(
}
/* Check the new PTE. */
- nl1e = l1e_create_phys(val, val & ~PAGE_MASK);
+ nl1e = l1e_from_intpte(val);
if ( unlikely(!get_page_from_l1e(nl1e, d)) )
return X86EMUL_UNHANDLEABLE;
@@ -2749,7 +2750,7 @@ static int ptwr_emulated_update(
pl1e = map_domain_mem(page_to_phys(page) + (addr & ~PAGE_MASK));
if ( do_cmpxchg )
{
- ol1e = l1e_create_phys(old, old & ~PAGE_MASK);
+ ol1e = l1e_from_intpte(old);
if ( cmpxchg((unsigned long *)pl1e, old, val) != old )
{
unmap_domain_mem(pl1e);
@@ -2909,7 +2910,7 @@ int ptwr_do_page_fault(struct domain *d, unsigned long addr)
/* For safety, disconnect the L1 p.t. page from current space. */
if ( which == PTWR_PT_ACTIVE )
{
- l2e_remove_flags(pl2e, _PAGE_PRESENT);
+ l2e_remove_flags(*pl2e, _PAGE_PRESENT);
flush_tlb_mask(d->cpuset);
}
@@ -2920,7 +2921,7 @@ int ptwr_do_page_fault(struct domain *d, unsigned long addr)
L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
/* Finally, make the p.t. page writable by the guest OS. */
- l1e_add_flags(&pte, _PAGE_RW);
+ l1e_add_flags(pte, _PAGE_RW);
if ( unlikely(__copy_to_user(&linear_pg_table[addr>>PAGE_SHIFT],
&pte, sizeof(pte))) )
{
@@ -2993,7 +2994,7 @@ int map_pages_to_xen(
{
/* Super-page mapping. */
ol2e = *pl2e;
- *pl2e = l2e_create_pfn(pfn, flags|_PAGE_PSE);
+ *pl2e = l2e_from_pfn(pfn, flags|_PAGE_PSE);
if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
{
@@ -3013,22 +3014,22 @@ int map_pages_to_xen(
{
pl1e = page_to_virt(alloc_xen_pagetable());
clear_page(pl1e);
- *pl2e = l2e_create_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
+ *pl2e = l2e_from_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
}
else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
{
pl1e = page_to_virt(alloc_xen_pagetable());
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
- pl1e[i] = l1e_create_pfn(
+ pl1e[i] = l1e_from_pfn(
l2e_get_pfn(*pl2e) + i,
l2e_get_flags(*pl2e) & ~_PAGE_PSE);
- *pl2e = l2e_create_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
+ *pl2e = l2e_from_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
local_flush_tlb_pge();
}
pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
ol1e = *pl1e;
- *pl1e = l1e_create_pfn(pfn, flags);
+ *pl1e = l1e_from_pfn(pfn, flags);
if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
local_flush_tlb_one(virt);
diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
index f14c6efbfb..0d89d1931c 100644
--- a/xen/arch/x86/shadow.c
+++ b/xen/arch/x86/shadow.c
@@ -573,7 +573,7 @@ static void free_shadow_pages(struct domain *d)
//
for_each_exec_domain(d, ed)
{
- if ( pagetable_get_phys(ed->arch.shadow_table) )
+ if ( pagetable_get_paddr(ed->arch.shadow_table) )
{
put_shadow_ref(pagetable_get_pfn(ed->arch.shadow_table));
ed->arch.shadow_table = mk_pagetable(0);
@@ -684,7 +684,7 @@ static void alloc_monitor_pagetable(struct exec_domain *ed)
struct pfn_info *mmfn_info;
struct domain *d = ed->domain;
- ASSERT(pagetable_get_phys(ed->arch.monitor_table) == 0);
+ ASSERT(pagetable_get_paddr(ed->arch.monitor_table) == 0);
mmfn_info = alloc_domheap_page(NULL);
ASSERT(mmfn_info != NULL);
@@ -700,12 +700,12 @@ static void alloc_monitor_pagetable(struct exec_domain *ed)
#endif
mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
- l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
+ l2e_from_paddr(__pa(d->arch.mm_perdomain_pt),
__PAGE_HYPERVISOR);
// map the phys_to_machine map into the Read-Only MPT space for this domain
mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
- l2e_create_phys(pagetable_get_phys(d->arch.phys_table),
+ l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
__PAGE_HYPERVISOR);
// Don't (yet) have mappings for these...
@@ -726,7 +726,7 @@ void free_monitor_pagetable(struct exec_domain *ed)
l2_pgentry_t *mpl2e, hl2e, sl2e;
unsigned long mfn;
- ASSERT( pagetable_get_phys(ed->arch.monitor_table) );
+ ASSERT( pagetable_get_paddr(ed->arch.monitor_table) );
mpl2e = ed->arch.monitor_vtable;
@@ -766,7 +766,7 @@ set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn,
struct map_dom_mem_cache *l2cache,
struct map_dom_mem_cache *l1cache)
{
- unsigned long phystab = pagetable_get_phys(d->arch.phys_table);
+ unsigned long phystab = pagetable_get_paddr(d->arch.phys_table);
l2_pgentry_t *l2, l2e;
l1_pgentry_t *l1;
struct pfn_info *l1page;
@@ -789,13 +789,13 @@ set_p2m_entry(struct domain *d, unsigned long pfn, unsigned long mfn,
memset(l1, 0, PAGE_SIZE);
unmap_domain_mem_with_cache(l1, l1cache);
- l2e = l2e_create_page(l1page, __PAGE_HYPERVISOR);
+ l2e = l2e_from_page(l1page, __PAGE_HYPERVISOR);
l2[l2_table_offset(va)] = l2e;
}
unmap_domain_mem_with_cache(l2, l2cache);
- l1 = map_domain_mem_with_cache(l2e_get_phys(l2e), l1cache);
- l1[l1_table_offset(va)] = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
+ l1 = map_domain_mem_with_cache(l2e_get_paddr(l2e), l1cache);
+ l1[l1_table_offset(va)] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
unmap_domain_mem_with_cache(l1, l1cache);
return 1;
@@ -965,7 +965,7 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode)
{
if ( !(new_modes & SHM_external) )
{
- ASSERT( !pagetable_get_phys(d->arch.phys_table) );
+ ASSERT( !pagetable_get_paddr(d->arch.phys_table) );
if ( !alloc_p2m_table(d) )
{
printk("alloc_p2m_table failed (out-of-memory?)\n");
@@ -1051,7 +1051,7 @@ int __shadow_mode_enable(struct domain *d, unsigned int mode)
d->arch.shadow_dirty_bitmap = NULL;
}
if ( (new_modes & SHM_translate) && !(new_modes & SHM_external) &&
- pagetable_get_phys(d->arch.phys_table) )
+ pagetable_get_paddr(d->arch.phys_table) )
{
free_p2m_table(d);
}
@@ -1082,7 +1082,7 @@ translate_l1pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l1mfn)
unsigned long mfn = l1e_get_pfn(l1[i]);
unsigned long gpfn = __mfn_to_gpfn(d, mfn);
ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
- l1[i] = l1e_create_pfn(gpfn, l1e_get_flags(l1[i]));
+ l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i]));
}
}
unmap_domain_mem(l1);
@@ -1110,7 +1110,7 @@ translate_l2pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn,
unsigned long mfn = l2e_get_pfn(l2[i]);
unsigned long gpfn = __mfn_to_gpfn(d, mfn);
ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
- l2[i] = l2e_create_pfn(gpfn, l2e_get_flags(l2[i]));
+ l2[i] = l2e_from_pfn(gpfn, l2e_get_flags(l2[i]));
translate_l1pgtable(d, p2m, mfn);
}
}
@@ -1404,17 +1404,17 @@ gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
perfc_incrc(gpfn_to_mfn_foreign);
unsigned long va = gpfn << PAGE_SHIFT;
- unsigned long phystab = pagetable_get_phys(d->arch.phys_table);
+ unsigned long phystab = pagetable_get_paddr(d->arch.phys_table);
l2_pgentry_t *l2 = map_domain_mem(phystab);
l2_pgentry_t l2e = l2[l2_table_offset(va)];
unmap_domain_mem(l2);
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
{
printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l2e=%" PRIpte "\n",
- d->domain_id, gpfn, l2e_get_value(l2e));
+ d->domain_id, gpfn, l2e_get_intpte(l2e));
return INVALID_MFN;
}
- unsigned long l1tab = l2e_get_phys(l2e);
+ unsigned long l1tab = l2e_get_paddr(l2e);
l1_pgentry_t *l1 = map_domain_mem(l1tab);
l1_pgentry_t l1e = l1[l1_table_offset(va)];
unmap_domain_mem(l1);
@@ -1427,7 +1427,7 @@ gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
{
printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l1e=%" PRIpte "\n",
- d->domain_id, gpfn, l1e_get_value(l1e));
+ d->domain_id, gpfn, l1e_get_intpte(l1e));
return INVALID_MFN;
}
@@ -1476,11 +1476,11 @@ shadow_hl2_table(struct domain *d, unsigned long gpfn, unsigned long gmfn,
// Setup easy access to the GL2, SL2, and HL2 frames.
//
hl2[l2_table_offset(LINEAR_PT_VIRT_START)] =
- l1e_create_pfn(gmfn, __PAGE_HYPERVISOR);
+ l1e_from_pfn(gmfn, __PAGE_HYPERVISOR);
hl2[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
- l1e_create_pfn(smfn, __PAGE_HYPERVISOR);
+ l1e_from_pfn(smfn, __PAGE_HYPERVISOR);
hl2[l2_table_offset(PERDOMAIN_VIRT_START)] =
- l1e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
+ l1e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
}
unmap_domain_mem(hl2);
@@ -1530,10 +1530,10 @@ static unsigned long shadow_l2_table(
HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
spl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
- l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
+ l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
- l2e_create_phys(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
+ l2e_from_paddr(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
__PAGE_HYPERVISOR);
if ( shadow_mode_translate(d) ) // NB: not external
@@ -1541,7 +1541,7 @@ static unsigned long shadow_l2_table(
unsigned long hl2mfn;
spl2e[l2_table_offset(RO_MPT_VIRT_START)] =
- l2e_create_phys(pagetable_get_phys(d->arch.phys_table),
+ l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
__PAGE_HYPERVISOR);
if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
@@ -1554,11 +1554,11 @@ static unsigned long shadow_l2_table(
BUG();
spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
- l2e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
+ l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
}
else
spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
- l2e_create_pfn(gmfn, __PAGE_HYPERVISOR);
+ l2e_from_pfn(gmfn, __PAGE_HYPERVISOR);
}
else
{
@@ -1885,7 +1885,7 @@ void shadow_mark_va_out_of_sync(
// NB: this is stored as a machine address.
entry->writable_pl1e =
- l2e_get_phys(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
+ l2e_get_paddr(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
// Increment shadow's page count to represent the reference
@@ -1920,7 +1920,7 @@ static int snapshot_entry_matches(
// This could probably be smarter, but this is sufficent for
// our current needs.
//
- entries_match = !l1e_has_changed(&guest_pt[index], &snapshot[index],
+ entries_match = !l1e_has_changed(guest_pt[index], snapshot[index],
PAGE_FLAG_MASK);
unmap_domain_mem(snapshot);
@@ -2074,7 +2074,7 @@ static u32 remove_all_write_access_in_ptpage(
((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
PGT_l1_shadow);
- match = l1e_create_pfn(readonly_gmfn, flags);
+ match = l1e_from_pfn(readonly_gmfn, flags);
// returns true if all refs have been found and fixed.
//
@@ -2083,7 +2083,7 @@ static u32 remove_all_write_access_in_ptpage(
l1_pgentry_t old = pt[i];
l1_pgentry_t new = old;
- l1e_remove_flags(&new,_PAGE_RW);
+ l1e_remove_flags(new,_PAGE_RW);
if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
BUG();
found++;
@@ -2101,7 +2101,7 @@ static u32 remove_all_write_access_in_ptpage(
}
i = readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1);
- if ( !l1e_has_changed(&pt[i], &match, flags) && fix_entry(i) )
+ if ( !l1e_has_changed(pt[i], match, flags) && fix_entry(i) )
{
perfc_incrc(remove_write_fast_exit);
increase_writable_pte_prediction(d, readonly_gpfn, prediction);
@@ -2111,7 +2111,7 @@ static u32 remove_all_write_access_in_ptpage(
for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
{
- if ( unlikely(!l1e_has_changed(&pt[i], &match, flags)) && fix_entry(i) )
+ if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && fix_entry(i) )
break;
}
@@ -2216,11 +2216,11 @@ static u32 remove_all_access_in_page(
((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
PGT_l1_shadow);
- match = l1e_create_pfn(forbidden_gmfn, flags);
+ match = l1e_from_pfn(forbidden_gmfn, flags);
for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
{
- if ( unlikely(!l1e_has_changed(&pl1e[i], &match, flags) == 0) )
+ if ( unlikely(!l1e_has_changed(pl1e[i], match, flags) == 0) )
{
l1_pgentry_t ol2e = pl1e[i];
pl1e[i] = l1e_empty();
@@ -2361,7 +2361,7 @@ static int resync_all(struct domain *d, u32 stype)
for ( i = min_shadow; i <= max_shadow; i++ )
{
if ( (i < min_snapshot) || (i > max_snapshot) ||
- l1e_has_changed(&guest1[i], &snapshot1[i], PAGE_FLAG_MASK) )
+ l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) )
{
need_flush |= validate_pte_change(d, guest1[i], &shadow1[i]);
@@ -2399,7 +2399,7 @@ static int resync_all(struct domain *d, u32 stype)
continue;
l2_pgentry_t new_pde = guest2[i];
- if ( l2e_has_changed(&new_pde, &snapshot2[i], PAGE_FLAG_MASK))
+ if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK))
{
need_flush |= validate_pde_change(d, new_pde, &shadow2[i]);
@@ -2410,13 +2410,13 @@ static int resync_all(struct domain *d, u32 stype)
changed++;
}
- if ( l2e_get_value(new_pde) != 0 ) /* FIXME: check flags? */
+ if ( l2e_get_intpte(new_pde) != 0 ) /* FIXME: check flags? */
max = i;
// XXX - This hack works for linux guests.
// Need a better solution long term.
if ( !(l2e_get_flags(new_pde) & _PAGE_PRESENT) &&
- unlikely(l2e_get_value(new_pde) != 0) &&
+ unlikely(l2e_get_intpte(new_pde) != 0) &&
!unshadow && MFN_PINNED(smfn) )
unshadow = 1;
}
@@ -2445,7 +2445,7 @@ static int resync_all(struct domain *d, u32 stype)
continue;
l2_pgentry_t new_pde = guest2[i];
- if ( l2e_has_changed(&new_pde, &snapshot2[i], PAGE_FLAG_MASK) )
+ if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK) )
{
need_flush |= validate_hl2e_change(d, new_pde, &shadow2[i]);
@@ -2510,7 +2510,7 @@ void __shadow_sync_all(struct domain *d)
l1_pgentry_t *ppte = map_domain_mem(entry->writable_pl1e);
l1_pgentry_t opte = *ppte;
l1_pgentry_t npte = opte;
- l1e_remove_flags(&npte, _PAGE_RW);
+ l1e_remove_flags(npte, _PAGE_RW);
if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
!shadow_get_page_from_l1e(npte, d) )
@@ -2595,7 +2595,7 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_PRESENT)) )
{
SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",
- l1e_get_value(gpte));
+ l1e_get_intpte(gpte));
perfc_incrc(shadow_fault_bail_pte_not_present);
goto fail;
}
@@ -2610,13 +2610,13 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
if ( shadow_mode_page_writable(d, l1e_get_pfn(gpte)) )
{
allow_writes = 1;
- l1e_add_flags(&gpte, _PAGE_RW);
+ l1e_add_flags(gpte, _PAGE_RW);
}
else
{
/* Write fault on a read-only mapping. */
SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)",
- l1e_get_value(gpte));
+ l1e_get_intpte(gpte));
perfc_incrc(shadow_fault_bail_ro_mapping);
goto fail;
}
@@ -2631,7 +2631,7 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
}
if ( allow_writes )
- l1e_remove_flags(&gpte, _PAGE_RW);
+ l1e_remove_flags(gpte, _PAGE_RW);
}
else
{
@@ -2647,7 +2647,7 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
/*
* STEP 3. Write the modified shadow PTE and guest PTE back to the tables.
*/
- if ( l1e_has_changed(&orig_gpte, &gpte, PAGE_FLAG_MASK) )
+ if ( l1e_has_changed(orig_gpte, gpte, PAGE_FLAG_MASK) )
{
/* XXX Watch out for read-only L2 entries! (not used in Linux). */
if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(va)],
@@ -2655,7 +2655,7 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
{
printk("%s() failed, crashing domain %d "
"due to a read-only L2 page table (gpde=%" PRIpte "), va=%lx\n",
- __func__,d->domain_id, l2e_get_value(gpde), va);
+ __func__,d->domain_id, l2e_get_intpte(gpde), va);
domain_crash_synchronous();
}
@@ -2693,7 +2693,7 @@ void shadow_l1_normal_pt_update(
if ( sl1mfn )
{
SH_VVLOG("shadow_l1_normal_pt_update pa=%p, gpte=%08lx",
- (void *)pa, l1e_get_value(gpte));
+ (void *)pa, l1e_get_intpte(gpte));
l1pte_propagate_from_guest(current->domain, gpte, &spte);
spl1e = map_domain_mem_with_cache(sl1mfn << PAGE_SHIFT, cache);
@@ -2718,7 +2718,7 @@ void shadow_l2_normal_pt_update(
if ( sl2mfn )
{
SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%08lx",
- (void *)pa, l2e_get_value(gpde));
+ (void *)pa, l2e_get_intpte(gpde));
spl2e = map_domain_mem_with_cache(sl2mfn << PAGE_SHIFT, cache);
validate_pde_change(d, gpde,
&spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)]);
@@ -2758,7 +2758,7 @@ int shadow_do_update_va_mapping(unsigned long va,
shadow_lock(d);
- //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void *)l1e_get_value(val));
+ //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void *)l1e_get_intpte(val));
// This is actually overkill - we don't need to sync the L1 itself,
// just everything involved in getting to this L1 (i.e. we need
@@ -2889,14 +2889,14 @@ void __update_pagetables(struct exec_domain *ed)
if ( !get_shadow_ref(hl2mfn) )
BUG();
mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
- l2e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
+ l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
if ( l2e_get_flags(old_hl2e) & _PAGE_PRESENT )
put_shadow_ref(l2e_get_pfn(old_hl2e));
if ( !get_shadow_ref(smfn) )
BUG();
mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
- l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
+ l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
if ( l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
put_shadow_ref(l2e_get_pfn(old_sl2e));
@@ -2938,7 +2938,7 @@ mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn)
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
if ( is_guest_l1_slot(i) &&
(l1e_get_flags(l1e[i]) & _PAGE_PRESENT) )
- l1e_add_flags(&l1e[i], SHADOW_REFLECTS_SNAPSHOT);
+ l1e_add_flags(l1e[i], SHADOW_REFLECTS_SNAPSHOT);
unmap_domain_mem(l1e);
}
@@ -2948,7 +2948,7 @@ mark_shadows_as_reflecting_snapshot(struct domain *d, unsigned long gpfn)
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
if ( is_guest_l2_slot(i) &&
(l2e_get_flags(l2e[i]) & _PAGE_PRESENT) )
- l2e_add_flags(&l2e[i], SHADOW_REFLECTS_SNAPSHOT);
+ l2e_add_flags(l2e[i], SHADOW_REFLECTS_SNAPSHOT);
unmap_domain_mem(l2e);
}
}
@@ -2968,7 +2968,7 @@ int shadow_status_noswap;
l1_pgentry_t _pte; \
_pte = shadow_linear_pg_table[l1_linear_offset(_a)]; \
if ( l1e_get_flags(_pte) & _PAGE_PRESENT ) \
- _pa = l1e_get_phys(_pte); \
+ _pa = l1e_get_paddr(_pte); \
} \
_pa | (_a & ~PAGE_MASK); \
})
@@ -2981,8 +2981,8 @@ int shadow_status_noswap;
printk("guest_pte=%lx eff_guest_pte=%lx shadow_pte=%lx " \
"snapshot_pte=%lx &guest=%p &shadow=%p &snap=%p " \
"v2m(&guest)=%p v2m(&shadow)=%p v2m(&snap)=%p ea=%08x\n", \
- l1e_get_value(guest_pte), l1e_get_value(eff_guest_pte), \
- l1e_get_value(shadow_pte), l1e_get_value(snapshot_pte), \
+ l1e_get_intpte(guest_pte), l1e_get_intpte(eff_guest_pte), \
+ l1e_get_intpte(shadow_pte), l1e_get_intpte(snapshot_pte), \
p_guest_pte, p_shadow_pte, p_snapshot_pte, \
(void *)v2m(ed, p_guest_pte), (void *)v2m(ed, p_shadow_pte), \
(void *)v2m(ed, p_snapshot_pte), \
@@ -3007,9 +3007,9 @@ static int check_pte(
int errors = 0, guest_writable;
int page_table_page;
- if ( (l1e_get_value(shadow_pte) == 0) ||
- (l1e_get_value(shadow_pte) == 0xdeadface) ||
- (l1e_get_value(shadow_pte) == 0x00000E00) )
+ if ( (l1e_get_intpte(shadow_pte) == 0) ||
+ (l1e_get_intpte(shadow_pte) == 0xdeadface) ||
+ (l1e_get_intpte(shadow_pte) == 0x00000E00) )
return errors; /* always safe */
if ( !(l1e_get_flags(shadow_pte) & _PAGE_PRESENT) )
@@ -3028,7 +3028,7 @@ static int check_pte(
mask = ~(_PAGE_GLOBAL|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|_PAGE_AVAIL|PAGE_MASK);
- if ( ((l1e_get_value(shadow_pte) & mask) != (l1e_get_value(eff_guest_pte) & mask)) )
+ if ( ((l1e_get_intpte(shadow_pte) & mask) != (l1e_get_intpte(eff_guest_pte) & mask)) )
FAIL("Corrupt?");
if ( (level == 1) &&
@@ -3049,7 +3049,7 @@ static int check_pte(
if ( !VALID_MFN(eff_guest_mfn) && !shadow_mode_refcounts(d) )
FAIL("%s: invalid eff_guest_pfn=%lx eff_guest_pte=%lx\n", __func__, eff_guest_pfn,
- l1e_get_value(eff_guest_pte));
+ l1e_get_intpte(eff_guest_pte));
page_table_page = mfn_is_page_table(eff_guest_mfn);
@@ -3179,26 +3179,26 @@ int check_l2_table(
FAILPT("hypervisor linear map inconsistent");
#endif
- match = l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
+ match = l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
if ( !shadow_mode_external(d) &&
- l2e_has_changed(&spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
- &match, PAGE_FLAG_MASK))
+ l2e_has_changed(spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
+ match, PAGE_FLAG_MASK))
{
FAILPT("hypervisor shadow linear map inconsistent %lx %lx",
- l2e_get_value(spl2e[SH_LINEAR_PT_VIRT_START >>
+ l2e_get_intpte(spl2e[SH_LINEAR_PT_VIRT_START >>
L2_PAGETABLE_SHIFT]),
- l2e_get_value(match));
+ l2e_get_intpte(match));
}
- match = l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
+ match = l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
if ( !shadow_mode_external(d) &&
- l2e_has_changed(&spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT],
- &match, PAGE_FLAG_MASK))
+ l2e_has_changed(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT],
+ match, PAGE_FLAG_MASK))
{
FAILPT("hypervisor per-domain map inconsistent saw %lx, expected (va=%p) %lx",
- l2e_get_value(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]),
+ l2e_get_intpte(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]),
d->arch.mm_perdomain_pt,
- l2e_get_value(match));
+ l2e_get_intpte(match));
}
#ifdef __i386__
@@ -3285,7 +3285,7 @@ int _check_pagetable(struct exec_domain *ed, char *s)
unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);
- if ( l2e_get_value(spl2e[i]) != 0 ) /* FIXME: check flags? */
+ if ( l2e_get_intpte(spl2e[i]) != 0 ) /* FIXME: check flags? */
{
errors += check_l1_table(ed, gl1pfn, gl1mfn, sl1mfn, i);
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 989a07a381..f9d0b1893e 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -797,7 +797,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
break;
case 3: /* Read CR3 */
- *reg = pagetable_get_phys(ed->arch.guest_table);
+ *reg = pagetable_get_paddr(ed->arch.guest_table);
break;
default:
diff --git a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
index ef51424275..09088ab2de 100644
--- a/xen/arch/x86/vmx.c
+++ b/xen/arch/x86/vmx.c
@@ -142,7 +142,7 @@ static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs)
gpte = gva_to_gpte(va);
if (!(l1e_get_flags(gpte) & _PAGE_PRESENT) )
return 0;
- gpa = l1e_get_phys(gpte) + (va & ~PAGE_MASK);
+ gpa = l1e_get_paddr(gpte) + (va & ~PAGE_MASK);
/* Use 1:1 page table to identify MMIO address space */
if (mmio_space(gpa))
@@ -567,7 +567,7 @@ vmx_world_restore(struct exec_domain *d, struct vmx_assist_context *c)
if (!vmx_paging_enabled(d)) {
VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
- __vmwrite(GUEST_CR3, pagetable_get_phys(d->domain->arch.phys_table));
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
goto skip_cr3;
}
@@ -603,7 +603,7 @@ vmx_world_restore(struct exec_domain *d, struct vmx_assist_context *c)
*/
d->arch.arch_vmx.cpu_cr3 = c->cr3;
VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", c->cr3);
- __vmwrite(GUEST_CR3, pagetable_get_phys(d->arch.shadow_table));
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
}
skip_cr3:
@@ -769,7 +769,7 @@ static int vmx_set_cr0(unsigned long value)
VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
(unsigned long) (mfn << PAGE_SHIFT));
- __vmwrite(GUEST_CR3, pagetable_get_phys(d->arch.shadow_table));
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
/*
* arch->shadow_table should hold the next CR3 for shadow
*/
@@ -896,7 +896,7 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
d->arch.arch_vmx.cpu_cr3 = value;
VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
value);
- __vmwrite(GUEST_CR3, pagetable_get_phys(d->arch.shadow_table));
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
}
break;
}
diff --git a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c
index 3c49b7b905..5010468567 100644
--- a/xen/arch/x86/vmx_io.c
+++ b/xen/arch/x86/vmx_io.c
@@ -466,12 +466,12 @@ void vmx_do_resume(struct exec_domain *d)
{
vmx_stts();
if ( vmx_paging_enabled(d) )
- __vmwrite(GUEST_CR3, pagetable_get_phys(d->arch.shadow_table));
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
else
// paging is not enabled in the guest
- __vmwrite(GUEST_CR3, pagetable_get_phys(d->domain->arch.phys_table));
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
- __vmwrite(HOST_CR3, pagetable_get_phys(d->arch.monitor_table));
+ __vmwrite(HOST_CR3, pagetable_get_paddr(d->arch.monitor_table));
__vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
if (event_pending(d)) {
diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
index b21db2a40a..472b1afbe5 100644
--- a/xen/arch/x86/vmx_vmcs.c
+++ b/xen/arch/x86/vmx_vmcs.c
@@ -196,8 +196,8 @@ void vmx_do_launch(struct exec_domain *ed)
error |= __vmwrite(GUEST_TR_BASE, 0);
error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
- __vmwrite(GUEST_CR3, pagetable_get_phys(ed->arch.guest_table));
- __vmwrite(HOST_CR3, pagetable_get_phys(ed->arch.monitor_table));
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(ed->arch.guest_table));
+ __vmwrite(HOST_CR3, pagetable_get_paddr(ed->arch.monitor_table));
__vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
ed->arch.schedule_tail = arch_vmx_do_resume;
diff --git a/xen/arch/x86/x86_32/domain_page.c b/xen/arch/x86/x86_32/domain_page.c
index 8f3a813f5e..6322f5d411 100644
--- a/xen/arch/x86/x86_32/domain_page.c
+++ b/xen/arch/x86/x86_32/domain_page.c
@@ -74,7 +74,7 @@ void *map_domain_mem(unsigned long pa)
}
while ( l1e_get_flags(cache[idx]) & _PAGE_PRESENT );
- cache[idx] = l1e_create_phys(pa, __PAGE_HYPERVISOR);
+ cache[idx] = l1e_from_paddr(pa, __PAGE_HYPERVISOR);
spin_unlock(&map_lock);
@@ -88,5 +88,5 @@ void unmap_domain_mem(void *va)
ASSERT((void *)MAPCACHE_VIRT_START <= va);
ASSERT(va < (void *)MAPCACHE_VIRT_END);
idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
- l1e_add_flags(&mapcache[idx], READY_FOR_TLB_FLUSH);
+ l1e_add_flags(mapcache[idx], READY_FOR_TLB_FLUSH);
}
diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c
index 0bbdabb1d6..507407ffc0 100644
--- a/xen/arch/x86/x86_32/mm.c
+++ b/xen/arch/x86/x86_32/mm.c
@@ -85,9 +85,9 @@ void __init paging_init(void)
if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER)) == NULL )
panic("Not enough memory to bootstrap Xen.\n");
idle_pg_table_l2[l2_linear_offset(v)] =
- l2e_create_page(pg, __PAGE_HYPERVISOR | _PAGE_PSE);
+ l2e_from_page(pg, __PAGE_HYPERVISOR | _PAGE_PSE);
idle_pg_table_l2[l2_linear_offset(v2)] =
- l2e_create_page(pg, (__PAGE_HYPERVISOR | _PAGE_PSE) & ~_PAGE_RW);
+ l2e_from_page(pg, (__PAGE_HYPERVISOR | _PAGE_PSE) & ~_PAGE_RW);
}
memset((void *)RDWR_MPT_VIRT_START, 0x55, mpt_size);
@@ -99,7 +99,7 @@ void __init paging_init(void)
continue;
if (v >= RO_MPT_VIRT_START && v < RO_MPT_VIRT_END)
continue;
- l2e_add_flags(&idle_pg_table_l2[l2_linear_offset(v)],
+ l2e_add_flags(idle_pg_table_l2[l2_linear_offset(v)],
_PAGE_GLOBAL);
}
}
@@ -109,7 +109,7 @@ void __init paging_init(void)
ioremap_pt = (void *)alloc_xenheap_page();
clear_page(ioremap_pt);
idle_pg_table_l2[l2_linear_offset(v)] =
- l2e_create_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
+ l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
}
/* Set up mapping cache for domain pages. */
@@ -119,13 +119,13 @@ void __init paging_init(void)
v += (1 << L2_PAGETABLE_SHIFT), i++) {
clear_page(mapcache + i*L1_PAGETABLE_ENTRIES);
idle_pg_table_l2[l2_linear_offset(v)] =
- l2e_create_page(virt_to_page(mapcache + i*L1_PAGETABLE_ENTRIES),
+ l2e_from_page(virt_to_page(mapcache + i*L1_PAGETABLE_ENTRIES),
__PAGE_HYPERVISOR);
}
for (v = LINEAR_PT_VIRT_START; v != LINEAR_PT_VIRT_END; v += (1 << L2_PAGETABLE_SHIFT)) {
idle_pg_table_l2[l2_linear_offset(v)] =
- l2e_create_page(virt_to_page(idle_pg_table_l2 + ((v-RDWR_MPT_VIRT_START) >> PAGETABLE_ORDER)),
+ l2e_from_page(virt_to_page(idle_pg_table_l2 + ((v-RDWR_MPT_VIRT_START) >> PAGETABLE_ORDER)),
__PAGE_HYPERVISOR);
}
}
@@ -139,7 +139,7 @@ void __init zap_low_mappings(l2_pgentry_t *base)
addr = (i << L2_PAGETABLE_SHIFT);
if (addr >= HYPERVISOR_VIRT_START)
break;
- if (l2e_get_phys(base[i]) != addr)
+ if (l2e_get_paddr(base[i]) != addr)
continue;
base[i] = l2e_empty();
}
diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c
index 8bb876f3f3..98b6036c76 100644
--- a/xen/arch/x86/x86_32/traps.c
+++ b/xen/arch/x86/x86_32/traps.c
@@ -101,15 +101,15 @@ void show_page_walk(unsigned long addr)
printk("Pagetable walk from %08lx:\n", addr);
pmd = idle_pg_table_l2[l2_linear_offset(addr)];
- printk(" L2 = %08llx %s\n", (u64)l2e_get_value(pmd),
+ printk(" L2 = %"PRIpte" %s\n", l2e_get_intpte(pmd),
(l2e_get_flags(pmd) & _PAGE_PSE) ? "(2/4MB)" : "");
if ( !(l2e_get_flags(pmd) & _PAGE_PRESENT) ||
(l2e_get_flags(pmd) & _PAGE_PSE) )
return;
- pte = __va(l2e_get_phys(pmd));
+ pte = __va(l2e_get_paddr(pmd));
pte += l1_table_offset(addr);
- printk(" L1 = %08llx\n", (u64)l1e_get_value(*pte));
+ printk(" L1 = %"PRIpte"\n", l1e_get_intpte(*pte));
}
#define DOUBLEFAULT_STACK_SIZE 1024
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 147d4eed31..28c7da6590 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1,21 +1,20 @@
/******************************************************************************
* arch/x86/x86_64/mm.c
*
- * Modifications to Linux original are copyright (c) 2004, K A Fraser
+ * Modifications to Linux original are copyright (c) 2004, K A Fraser tr This
+ * program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <xen/config.h>
@@ -57,7 +56,7 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
{
pl3e = page_to_virt(alloc_xen_pagetable());
clear_page(pl3e);
- *pl4e = l4e_create_phys(__pa(pl3e), __PAGE_HYPERVISOR);
+ *pl4e = l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR);
}
pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(v);
@@ -65,7 +64,7 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
{
pl2e = page_to_virt(alloc_xen_pagetable());
clear_page(pl2e);
- *pl3e = l3e_create_phys(__pa(pl2e), __PAGE_HYPERVISOR);
+ *pl3e = l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR);
}
pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v);
@@ -85,12 +84,12 @@ void __init paging_init(void)
l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page();
clear_page(l3_ro_mpt);
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
- l4e_create_page(
+ l4e_from_page(
virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
l2_ro_mpt = (l2_pgentry_t *)alloc_xenheap_page();
clear_page(l2_ro_mpt);
l3_ro_mpt[l3_table_offset(RO_MPT_VIRT_START)] =
- l3e_create_page(
+ l3e_from_page(
virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
l2_ro_mpt += l2_table_offset(RO_MPT_VIRT_START);
@@ -109,14 +108,14 @@ void __init paging_init(void)
PAGE_HYPERVISOR);
memset((void *)(RDWR_MPT_VIRT_START + i*8), 0x55,
1UL << L2_PAGETABLE_SHIFT);
- *l2_ro_mpt++ = l2e_create_page(
+ *l2_ro_mpt++ = l2e_from_page(
pg, _PAGE_GLOBAL|_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
BUG_ON(((unsigned long)l2_ro_mpt & ~PAGE_MASK) == 0);
}
/* Set up linear page table mapping. */
idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
+ l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR);
}
void __init zap_low_mappings(void)
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 97467983de..4efd650546 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -257,9 +257,9 @@ __gnttab_activate_grant_ref(
{
/* Write update into the pagetable. */
l1_pgentry_t pte;
- pte = l1e_create_pfn(frame, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ pte = l1e_from_pfn(frame, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
- l1e_add_flags(&pte,_PAGE_RW);
+ l1e_add_flags(pte,_PAGE_RW);
rc = update_grant_va_mapping( host_virt_addr, pte,
mapping_d, mapping_ed );
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index e82303b6e7..258ec2df85 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -20,11 +20,11 @@
# include <asm/x86_64/page.h>
#endif
-/* Get pte contents as an integer (intpte_t). */
-#define l1e_get_value(x) ((x).l1)
-#define l2e_get_value(x) ((x).l2)
-#define l3e_get_value(x) ((x).l3)
-#define l4e_get_value(x) ((x).l4)
+/* Get direct integer representation of a pte's contents (intpte_t). */
+#define l1e_get_intpte(x) ((x).l1)
+#define l2e_get_intpte(x) ((x).l2)
+#define l3e_get_intpte(x) ((x).l3)
+#define l4e_get_intpte(x) ((x).l4)
/* Get pfn mapped by pte (unsigned long). */
#define l1e_get_pfn(x) \
@@ -37,15 +37,21 @@
((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
/* Get physical address of page mapped by pte (physaddr_t). */
-#define l1e_get_phys(x) \
+#define l1e_get_paddr(x) \
((physaddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
-#define l2e_get_phys(x) \
+#define l2e_get_paddr(x) \
((physaddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
-#define l3e_get_phys(x) \
+#define l3e_get_paddr(x) \
((physaddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
-#define l4e_get_phys(x) \
+#define l4e_get_paddr(x) \
((physaddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
+/* Get pointer to info structure of page mapped by pte (struct pfn_info *). */
+#define l1e_get_page(x) (pfn_to_page(l1e_get_pfn(x)))
+#define l2e_get_page(x) (pfn_to_page(l2e_get_pfn(x)))
+#define l3e_get_page(x) (pfn_to_page(l3e_get_pfn(x)))
+#define l4e_get_page(x) (pfn_to_page(l4e_get_pfn(x)))
+
/* Get pte access flags (unsigned int). */
#define l1e_get_flags(x) (get_pte_flags((x).l1))
#define l2e_get_flags(x) (get_pte_flags((x).l2))
@@ -59,51 +65,63 @@
#define l4e_empty() ((l4_pgentry_t) { 0 })
/* Construct a pte from a pfn and access flags. */
-#define l1e_create_pfn(pfn, flags) \
+#define l1e_from_pfn(pfn, flags) \
((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
-#define l2e_create_pfn(pfn, flags) \
+#define l2e_from_pfn(pfn, flags) \
((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
-#define l3e_create_pfn(pfn, flags) \
+#define l3e_from_pfn(pfn, flags) \
((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
-#define l4e_create_pfn(pfn, flags) \
+#define l4e_from_pfn(pfn, flags) \
((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
/* Construct a pte from a physical address and access flags. */
-#define l1e_create_phys(pa, flags) \
+#define l1e_from_paddr(pa, flags) \
((l1_pgentry_t) { (pa) | put_pte_flags(flags) })
-#define l2e_create_phys(pa, flags) \
+#define l2e_from_paddr(pa, flags) \
((l2_pgentry_t) { (pa) | put_pte_flags(flags) })
-#define l3e_create_phys(pa, flags) \
+#define l3e_from_paddr(pa, flags) \
((l3_pgentry_t) { (pa) | put_pte_flags(flags) })
-#define l4e_create_phys(pa, flags) \
+#define l4e_from_paddr(pa, flags) \
((l4_pgentry_t) { (pa) | put_pte_flags(flags) })
+/* Construct a pte from its direct integer representation. */
+#define l1e_from_intpte(intpte) ((l1_pgentry_t) { (intpte_t)(intpte) })
+#define l2e_from_intpte(intpte) ((l2_pgentry_t) { (intpte_t)(intpte) })
+#define l3e_from_intpte(intpte) ((l3_pgentry_t) { (intpte_t)(intpte) })
+#define l4e_from_intpte(intpte) ((l4_pgentry_t) { (intpte_t)(intpte) })
+
+/* Construct a pte from a page pointer and access flags. */
+#define l1e_from_page(page, flags) (l1e_from_pfn(page_to_pfn(page),(flags)))
+#define l2e_from_page(page, flags) (l2e_from_pfn(page_to_pfn(page),(flags)))
+#define l3e_from_page(page, flags) (l3e_from_pfn(page_to_pfn(page),(flags)))
+#define l4e_from_page(page, flags) (l4e_from_pfn(page_to_pfn(page),(flags)))
+
/* Add extra flags to an existing pte. */
-#define l1e_add_flags(x, flags) ((x)->l1 |= put_pte_flags(flags))
-#define l2e_add_flags(x, flags) ((x)->l2 |= put_pte_flags(flags))
-#define l3e_add_flags(x, flags) ((x)->l3 |= put_pte_flags(flags))
-#define l4e_add_flags(x, flags) ((x)->l4 |= put_pte_flags(flags))
+#define l1e_add_flags(x, flags) ((x).l1 |= put_pte_flags(flags))
+#define l2e_add_flags(x, flags) ((x).l2 |= put_pte_flags(flags))
+#define l3e_add_flags(x, flags) ((x).l3 |= put_pte_flags(flags))
+#define l4e_add_flags(x, flags) ((x).l4 |= put_pte_flags(flags))
/* Remove flags from an existing pte. */
-#define l1e_remove_flags(x, flags) ((x)->l1 &= ~put_pte_flags(flags))
-#define l2e_remove_flags(x, flags) ((x)->l2 &= ~put_pte_flags(flags))
-#define l3e_remove_flags(x, flags) ((x)->l3 &= ~put_pte_flags(flags))
-#define l4e_remove_flags(x, flags) ((x)->l4 &= ~put_pte_flags(flags))
+#define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
+#define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
+#define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
+#define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
/* Check if a pte's page mapping or significant access flags have changed. */
#define l1e_has_changed(x,y,flags) \
- ( !!(((x)->l1 ^ (y)->l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
+ ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
#define l2e_has_changed(x,y,flags) \
- ( !!(((x)->l2 ^ (y)->l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
+ ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
#define l3e_has_changed(x,y,flags) \
- ( !!(((x)->l3 ^ (y)->l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
+ ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
#define l4e_has_changed(x,y,flags) \
- ( !!(((x)->l4 ^ (y)->l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
+ ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
/* Pagetable walking. */
-#define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_phys(x)))
-#define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_phys(x)))
-#define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_phys(x)))
+#define l2e_to_l1e(x) ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
+#define l3e_to_l2e(x) ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
+#define l4e_to_l3e(x) ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
/* Given a virtual address, get an entry offset into a page table. */
#define l1_table_offset(a) \
@@ -116,7 +134,7 @@
(((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
/* Convert a pointer to a page-table entry into pagetable slot index. */
-#define pgentry_ptr_to_slot(_p) \
+#define pgentry_ptr_to_slot(_p) \
(((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
/* Page-table type. */
@@ -131,9 +149,10 @@ typedef struct { u32 pfn; } pagetable_t;
/* x86_64 */
typedef struct { u64 pfn; } pagetable_t;
#endif
-#define pagetable_get_phys(_x) ((physaddr_t)(_x).pfn << PAGE_SHIFT)
-#define pagetable_get_pfn(_x) ((_x).pfn)
-#define mk_pagetable(_phys) ({ pagetable_t __p; __p.pfn = _phys >> PAGE_SHIFT; __p; })
+#define pagetable_get_paddr(x) ((physaddr_t)(x).pfn << PAGE_SHIFT)
+#define pagetable_get_pfn(x) ((x).pfn)
+#define mk_pagetable(pa) \
+ ({ pagetable_t __p; __p.pfn = (pa) >> PAGE_SHIFT; __p; })
#endif
#define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE)
@@ -147,16 +166,6 @@ typedef struct { u64 pfn; } pagetable_t;
#define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
#define pfn_valid(_pfn) ((_pfn) < max_page)
-#define l1e_get_page(_x) (pfn_to_page(l1e_get_pfn(_x)))
-#define l2e_get_page(_x) (pfn_to_page(l2e_get_pfn(_x)))
-#define l3e_get_page(_x) (pfn_to_page(l3e_get_pfn(_x)))
-#define l4e_get_page(_x) (pfn_to_page(l4e_get_pfn(_x)))
-
-#define l1e_create_page(_x,_y) (l1e_create_pfn(page_to_pfn(_x),(_y)))
-#define l2e_create_page(_x,_y) (l2e_create_pfn(page_to_pfn(_x),(_y)))
-#define l3e_create_page(_x,_y) (l3e_create_pfn(page_to_pfn(_x),(_y)))
-#define l4e_create_page(_x,_y) (l4e_create_pfn(page_to_pfn(_x),(_y)))
-
/* High table entries are reserved by the hypervisor. */
/* FIXME: this breaks with PAE -- kraxel */
#define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 665e5e9026..d68ba114eb 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -377,7 +377,7 @@ shadow_get_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
return 1;
nl1e = l1e;
- l1e_remove_flags(&nl1e, _PAGE_GLOBAL);
+ l1e_remove_flags(nl1e, _PAGE_GLOBAL);
res = get_page_from_l1e(nl1e, d);
if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
@@ -398,7 +398,7 @@ shadow_get_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
{
perfc_incrc(shadow_get_page_fail);
FSH_LOG("%s failed to get ref l1e=%lx\n",
- __func__, l1e_get_value(l1e));
+ __func__, l1e_get_intpte(l1e));
}
return res;
@@ -558,13 +558,13 @@ update_hl2e(struct exec_domain *ed, unsigned long va)
if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) &&
VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) )
- new_hl2e = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
+ new_hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
else
new_hl2e = l1e_empty();
// only do the ref counting if something has changed.
//
- if ( (l1e_has_changed(&old_hl2e, &new_hl2e, PAGE_FLAG_MASK)) )
+ if ( (l1e_has_changed(old_hl2e, new_hl2e, PAGE_FLAG_MASK)) )
{
if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
!shadow_get_page(ed->domain, pfn_to_page(l1e_get_pfn(new_hl2e)),
@@ -735,11 +735,11 @@ static inline int l1pte_write_fault(
}
ASSERT(l1e_get_flags(gpte) & _PAGE_RW);
- l1e_add_flags(&gpte, _PAGE_DIRTY | _PAGE_ACCESSED);
- spte = l1e_create_pfn(gmfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL);
+ l1e_add_flags(gpte, _PAGE_DIRTY | _PAGE_ACCESSED);
+ spte = l1e_from_pfn(gmfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL);
SH_VVLOG("l1pte_write_fault: updating spte=0x%lx gpte=0x%lx",
- l1e_get_value(spte), l1e_get_value(gpte));
+ l1e_get_intpte(spte), l1e_get_intpte(gpte));
if ( shadow_mode_log_dirty(d) )
__mark_dirty(d, gmfn);
@@ -768,17 +768,17 @@ static inline int l1pte_read_fault(
return 0;
}
- l1e_add_flags(&gpte, _PAGE_ACCESSED);
- spte = l1e_create_pfn(mfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL);
+ l1e_add_flags(gpte, _PAGE_ACCESSED);
+ spte = l1e_from_pfn(mfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL);
if ( shadow_mode_log_dirty(d) || !(l1e_get_flags(gpte) & _PAGE_DIRTY) ||
mfn_is_page_table(mfn) )
{
- l1e_remove_flags(&spte, _PAGE_RW);
+ l1e_remove_flags(spte, _PAGE_RW);
}
SH_VVLOG("l1pte_read_fault: updating spte=0x%lx gpte=0x%lx",
- l1e_get_value(spte), l1e_get_value(gpte));
+ l1e_get_intpte(spte), l1e_get_intpte(gpte));
*gpte_p = gpte;
*spte_p = spte;
@@ -797,21 +797,20 @@ static inline void l1pte_propagate_from_guest(
(_PAGE_PRESENT|_PAGE_ACCESSED)) &&
VALID_MFN(mfn = __gpfn_to_mfn(d, l1e_get_pfn(gpte))) )
{
- spte = l1e_create_pfn(mfn,
- l1e_get_flags(gpte) &
- ~(_PAGE_GLOBAL | _PAGE_AVAIL));
+ spte = l1e_from_pfn(
+ mfn, l1e_get_flags(gpte) & ~(_PAGE_GLOBAL | _PAGE_AVAIL));
if ( shadow_mode_log_dirty(d) ||
!(l1e_get_flags(gpte) & _PAGE_DIRTY) ||
mfn_is_page_table(mfn) )
{
- l1e_remove_flags(&spte, _PAGE_RW);
+ l1e_remove_flags(spte, _PAGE_RW);
}
}
- if ( l1e_get_value(spte) || l1e_get_value(gpte) )
+ if ( l1e_get_intpte(spte) || l1e_get_intpte(gpte) )
SH_VVVLOG("%s: gpte=%lx, new spte=%lx",
- __func__, l1e_get_value(gpte), l1e_get_value(spte));
+ __func__, l1e_get_intpte(gpte), l1e_get_intpte(spte));
*spte_p = spte;
}
@@ -840,12 +839,12 @@ static inline void hl2e_propagate_from_guest(
mfn = __gpfn_to_mfn(d, pfn);
if ( VALID_MFN(mfn) && (mfn < max_page) )
- hl2e = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
+ hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
}
- if ( l1e_get_value(hl2e) || l2e_get_value(gpde) )
+ if ( l1e_get_intpte(hl2e) || l2e_get_intpte(gpde) )
SH_VVLOG("%s: gpde=%lx hl2e=%lx", __func__,
- l2e_get_value(gpde), l1e_get_value(hl2e));
+ l2e_get_intpte(gpde), l1e_get_intpte(hl2e));
*hl2e_p = hl2e;
}
@@ -862,19 +861,19 @@ static inline void l2pde_general(
spde = l2e_empty();
if ( (l2e_get_flags(gpde) & _PAGE_PRESENT) && (sl1mfn != 0) )
{
- spde = l2e_create_pfn(sl1mfn,
- (l2e_get_flags(gpde) | _PAGE_RW | _PAGE_ACCESSED)
- & ~(_PAGE_AVAIL));
+ spde = l2e_from_pfn(
+ sl1mfn,
+ (l2e_get_flags(gpde) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL);
/* N.B. PDEs do not have a dirty bit. */
- l2e_add_flags(&gpde, _PAGE_ACCESSED);
+ l2e_add_flags(gpde, _PAGE_ACCESSED);
*gpde_p = gpde;
}
- if ( l2e_get_value(spde) || l2e_get_value(gpde) )
+ if ( l2e_get_intpte(spde) || l2e_get_intpte(gpde) )
SH_VVLOG("%s: gpde=%lx, new spde=%lx", __func__,
- l2e_get_value(gpde), l2e_get_value(spde));
+ l2e_get_intpte(gpde), l2e_get_intpte(spde));
*spde_p = spde;
}
@@ -911,13 +910,13 @@ validate_pte_change(
{
old_spte = *shadow_pte_p;
- if ( l1e_get_value(old_spte) == l1e_get_value(new_spte) )
+ if ( l1e_get_intpte(old_spte) == l1e_get_intpte(new_spte) )
{
// No accounting required...
//
perfc_incrc(validate_pte_changes1);
}
- else if ( l1e_get_value(old_spte) == (l1e_get_value(new_spte)|_PAGE_RW) )
+ else if ( l1e_get_intpte(old_spte) == (l1e_get_intpte(new_spte)|_PAGE_RW) )
{
// Fast path for PTEs that have merely been write-protected
// (e.g., during a Unix fork()). A strict reduction in privilege.
@@ -928,7 +927,7 @@ validate_pte_change(
}
else if ( ((l1e_get_flags(old_spte) | l1e_get_flags(new_spte)) &
_PAGE_PRESENT ) &&
- l1e_has_changed(&old_spte, &new_spte, _PAGE_RW | _PAGE_PRESENT) )
+ l1e_has_changed(old_spte, new_spte, _PAGE_RW | _PAGE_PRESENT) )
{
// only do the ref counting if something important changed.
//
@@ -973,7 +972,7 @@ validate_hl2e_change(
// Only do the ref counting if something important changed.
//
if ( ((l1e_get_flags(old_hl2e) | l1e_get_flags(new_hl2e)) & _PAGE_PRESENT) &&
- l1e_has_changed(&old_hl2e, &new_hl2e, _PAGE_PRESENT) )
+ l1e_has_changed(old_hl2e, new_hl2e, _PAGE_PRESENT) )
{
perfc_incrc(validate_hl2e_changes);
@@ -1010,8 +1009,8 @@ validate_pde_change(
// Only do the ref counting if something important changed.
//
- if ( ((l2e_get_value(old_spde) | l2e_get_value(new_spde)) & _PAGE_PRESENT) &&
- l2e_has_changed(&old_spde, &new_spde, _PAGE_PRESENT) )
+ if ( ((l2e_get_intpte(old_spde) | l2e_get_intpte(new_spde)) & _PAGE_PRESENT) &&
+ l2e_has_changed(old_spde, new_spde, _PAGE_PRESENT) )
{
perfc_incrc(validate_pde_changes);
@@ -1590,7 +1589,7 @@ shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
// only do the ref counting if something important changed.
//
- if ( l1e_has_changed(&old_spte, &new_spte, _PAGE_RW | _PAGE_PRESENT) )
+ if ( l1e_has_changed(old_spte, new_spte, _PAGE_RW | _PAGE_PRESENT) )
{
if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
!shadow_get_page_from_l1e(new_spte, d) )
@@ -1664,7 +1663,7 @@ static inline unsigned long gva_to_gpa(unsigned long gva)
if ( !(l1e_get_flags(gpte) & _PAGE_PRESENT) )
return 0;
- return l1e_get_phys(gpte) + (gva & ~PAGE_MASK);
+ return l1e_get_paddr(gpte) + (gva & ~PAGE_MASK);
}
/************************************************************************/
@@ -1684,7 +1683,7 @@ static inline void update_pagetables(struct exec_domain *ed)
// HACK ALERT: there's currently no easy way to figure out if a domU
// has set its arch.guest_table to zero, vs not yet initialized it.
//
- paging_enabled = !!pagetable_get_phys(ed->arch.guest_table);
+ paging_enabled = !!pagetable_get_paddr(ed->arch.guest_table);
/*
* We don't call __update_pagetables() when vmx guest paging is
diff --git a/xen/include/asm-x86/x86_32/page-2level.h b/xen/include/asm-x86/x86_32/page-2level.h
index 7321782805..9659ce0430 100644
--- a/xen/include/asm-x86/x86_32/page-2level.h
+++ b/xen/include/asm-x86/x86_32/page-2level.h
@@ -31,9 +31,9 @@ typedef l2_pgentry_t root_pgentry_t;
/* root table */
#define root_get_pfn l2e_get_pfn
#define root_get_flags l2e_get_flags
-#define root_get_value l2e_get_value
+#define root_get_intpte l2e_get_intpte
#define root_empty l2e_empty
-#define root_create_phys l2e_create_phys
+#define root_from_paddr l2e_from_paddr
#define PGT_root_page_table PGT_l2_page_table
/* misc */
diff --git a/xen/include/asm-x86/x86_32/page-3level.h b/xen/include/asm-x86/x86_32/page-3level.h
index a83f27f1f3..0033759b99 100644
--- a/xen/include/asm-x86/x86_32/page-3level.h
+++ b/xen/include/asm-x86/x86_32/page-3level.h
@@ -41,9 +41,9 @@ typedef l3_pgentry_t root_pgentry_t;
/* root table */
#define root_get_pfn l3e_get_pfn
#define root_get_flags l3e_get_flags
-#define root_get_value l3e_get_value
+#define root_get_intpte l3e_get_intpte
#define root_empty l3e_empty
-#define root_init_phys l3e_create_phys
+#define root_from_paddr l3e_from_paddr
#define PGT_root_page_table PGT_l3_page_table
/* misc */
diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h
index b27e9076fd..21712d19d9 100644
--- a/xen/include/asm-x86/x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
@@ -53,10 +53,10 @@ typedef l4_pgentry_t root_pgentry_t;
#define root_get_pfn l4e_get_pfn
#define root_get_flags l4e_get_flags
-#define root_get_value l4e_get_value
+#define root_get_intpte l4e_get_intpte
#define root_empty l4e_empty
-#define root_create_phys l4e_create_phys
-#define PGT_root_page_table PGT_l4_page_table
+#define root_from_paddr l4e_from_paddr
+#define PGT_root_page_table PGT_l4_page_table
/*
* PTE pfn and flags: