aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/asm-x86/x86_64
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-09-22 08:16:49 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-09-22 08:16:49 +0100
commitbac2000063ba239d33b631f6edda48cc6b57425b (patch)
tree1202e917a4903bdf3975f4ba018c8a4233c80973 /xen/include/asm-x86/x86_64
parent615588563e99a23aaf37037c3fee0c413b051f4d (diff)
downloadxen-bac2000063ba239d33b631f6edda48cc6b57425b.tar.gz
xen-bac2000063ba239d33b631f6edda48cc6b57425b.tar.bz2
xen-bac2000063ba239d33b631f6edda48cc6b57425b.zip
x86-64: reduce range spanned by 1:1 mapping and frame table indexes
Introduces a virtual space conserving transformation on the MFN thus far used to index 1:1 mapping and frame table, removing the largest range of contiguous bits (below the most significant one) which are zero for all valid MFNs from the MFN representation, to be used to index into those arrays, thereby cutting the virtual range these tables must cover approximately by half with each bit removed. Since this should account for hotpluggable memory (in order to not requiring a re-write when that gets supported), the determination of which bits are candidates for removal must not be based on the E820 information, but instead has to use the SRAT. That in turn requires a change to the ordering of steps done during early boot. Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'xen/include/asm-x86/x86_64')
-rw-r--r--xen/include/asm-x86/x86_64/page.h54
1 files changed, 46 insertions, 8 deletions
diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h
index 5b88a3c198..8bdf9b7365 100644
--- a/xen/include/asm-x86/x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
@@ -35,25 +35,63 @@
/* Physical address where Xen was relocated to. */
extern unsigned long xen_phys_start;
+extern unsigned long max_page, max_pdx;
+extern unsigned long pfn_pdx_bottom_mask, ma_va_bottom_mask;
+extern unsigned int pfn_pdx_hole_shift;
+extern unsigned long pfn_hole_mask;
+extern unsigned long pfn_top_mask, ma_top_mask;
+extern void pfn_pdx_hole_setup(unsigned long);
+
+#define page_to_pdx(pg) ((pg) - frame_table)
+#define pdx_to_page(pdx) (frame_table + (pdx))
+/*
+ * Note: These are solely for the use by page_{get,set}_owner(), and
+ * therefore don't need to handle the XEN_VIRT_{START,END} range.
+ */
+#define virt_to_pdx(va) (((unsigned long)(va) - DIRECTMAP_VIRT_START) >> \
+ PAGE_SHIFT)
+#define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
+ ((unsigned long)(pdx) << PAGE_SHIFT)))
+
+static inline int __mfn_valid(unsigned long mfn)
+{
+ return mfn < max_page && !(mfn & pfn_hole_mask);
+}
+
+static inline unsigned long pfn_to_pdx(unsigned long pfn)
+{
+ return (pfn & pfn_pdx_bottom_mask) |
+ ((pfn & pfn_top_mask) >> pfn_pdx_hole_shift);
+}
+
+static inline unsigned long pdx_to_pfn(unsigned long pdx)
+{
+ return (pdx & pfn_pdx_bottom_mask) |
+ ((pdx << pfn_pdx_hole_shift) & pfn_top_mask);
+}
+
static inline unsigned long __virt_to_maddr(unsigned long va)
{
ASSERT(va >= XEN_VIRT_START);
ASSERT(va < DIRECTMAP_VIRT_END);
- ASSERT((va < XEN_VIRT_END) || (va >= DIRECTMAP_VIRT_START));
if ( va >= DIRECTMAP_VIRT_START )
- return va - DIRECTMAP_VIRT_START;
- return va - XEN_VIRT_START + xen_phys_start;
+ va -= DIRECTMAP_VIRT_START;
+ else
+ {
+ ASSERT(va < XEN_VIRT_END);
+ va += xen_phys_start - XEN_VIRT_START;
+ }
+ return (va & ma_va_bottom_mask) |
+ ((va << pfn_pdx_hole_shift) & ma_top_mask);
}
-#define virt_to_maddr(va) \
- (__virt_to_maddr((unsigned long)(va)))
static inline void *__maddr_to_virt(unsigned long ma)
{
ASSERT(ma < DIRECTMAP_VIRT_END - DIRECTMAP_VIRT_START);
- return (void *)(ma + DIRECTMAP_VIRT_START);
+ return (void *)(DIRECTMAP_VIRT_START +
+ ((ma & ma_va_bottom_mask) |
+ ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
}
-#define maddr_to_virt(ma) \
- (__maddr_to_virt((unsigned long)(ma)))
/* read access (should only be used for debug printk's) */
typedef u64 intpte_t;