diff options
author | Tim Deegan <Tim.Deegan@citrix.com> | 2011-04-07 15:19:52 +0100 |
---|---|---|
committer | Tim Deegan <Tim.Deegan@citrix.com> | 2011-04-07 15:19:52 +0100 |
commit | 6de19d13bdc66f068d7b03896fe4850fcc693b83 (patch) | |
tree | ab0e529bfc23af01c52e861fd17d5688145c71a0 | |
parent | c5acb1ae35409d705390da969af0e8860c9b2ab7 (diff) | |
download | xen-6de19d13bdc66f068d7b03896fe4850fcc693b83.tar.gz xen-6de19d13bdc66f068d7b03896fe4850fcc693b83.tar.bz2 xen-6de19d13bdc66f068d7b03896fe4850fcc693b83.zip |
x86/mm: fix PSE alignment checks in pagetable walker.
The PSE handling doesn't check bits 1...8 (or 1...9 for non-PAE
guests) being zero, thus allowing bad (not 2Mb/4Mb aligned) large
pages to be handled.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Acked-by: Tim Deegan <Tim.Deegan@citrix.com>
xen-unstable changeset: 23138:777aaa2172c8
xen-unstable date: Mon Apr 04 10:38:27 2011 +0100
-rw-r--r-- | xen/arch/x86/mm/guest_walk.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c index 0551579e75..b20fdaff8b 100644 --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -231,9 +231,28 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, /* _PAGE_PSE_PAT not set: remove _PAGE_PAT from flags. */ flags &= ~_PAGE_PAT; +#define GUEST_L2_GFN_ALIGN (1 << (GUEST_L2_PAGETABLE_SHIFT - \ + GUEST_L1_PAGETABLE_SHIFT)) + if ( gfn_x(start) & (GUEST_L2_GFN_ALIGN - 1) & ~0x1 ) + { +#if GUEST_PAGING_LEVELS == 2 + /* + * Note that _PAGE_INVALID_BITS is zero in this case, yielding a + * no-op here. + * + * Architecturally, the walk should fail if bit 21 is set (others + * aren't being checked at least in PSE36 mode), but we'll ignore + * this here in order to avoid specifying a non-natural, non-zero + * _PAGE_INVALID_BITS value just for that case. + */ +#endif + rc |= _PAGE_INVALID_BITS; + } + /* Increment the pfn by the right number of 4k pages. - * The ~0x1 is to mask out the PAT bit mentioned above. */ - start = _gfn((gfn_x(start) & ~0x1) + guest_l1_table_offset(va)); + * Mask out PAT and invalid bits. */ + start = _gfn((gfn_x(start) & ~(GUEST_L2_GFN_ALIGN - 1)) + + guest_l1_table_offset(va)); gw->l1e = guest_l1e_from_gfn(start, flags); gw->l1mfn = _mfn(INVALID_MFN); } |