aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Deegan <tim@xen.org>2013-05-02 11:37:56 +0100
committerTim Deegan <tim@xen.org>2013-05-09 13:00:26 +0100
commit078654af2de6733fb5b3da1e07c1962832e998c1 (patch)
tree500f15c3b082450f5e7b8643c1e29f3cf2cabb03
parent3b38da86878c36a7ec3e020879a0de3f5b293d95 (diff)
downloadxen-078654af2de6733fb5b3da1e07c1962832e998c1.tar.gz
xen-078654af2de6733fb5b3da1e07c1962832e998c1.tar.bz2
xen-078654af2de6733fb5b3da1e07c1962832e998c1.zip
x86/mm/shadow: remove dead code for avoiding Xen entries on 32-bit tables.
All non-external-mode (==PV) guests have 4-level pagetables now that the PAE build of Xen is gone. This patch should have no effect, since the condition it removes could never be true anyway: the l2 offset of HYPERVISOR_VIRT_START on 64-bit Xen is much higher than any l2 offset we could have seen in the tables (and indeed bigger than the 'int' type, which clang was complaining about). Actual compat PV guest xen entries are handled by the equivalent test in the 64-bit SHADOW_FOREACH_L2E() below. Reported-by: Julien Grall <julien.grall@linaro.org> Signed-off-by: Tim Deegan <tim@xen.org>
-rw-r--r--xen/arch/x86/mm/shadow/multi.c49
1 files changed, 21 insertions, 28 deletions
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index a8ef75eb14..e2160636ba 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1305,26 +1305,23 @@ do { \
#if GUEST_PAGING_LEVELS == 2
-/* 32-bit l2 on PAE/64: four pages, touch every second entry, and avoid Xen */
+/* 32-bit l2 on PAE/64: four pages, touch every second entry */
#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \
do { \
int _i, _j, __done = 0; \
- int _xen = !shadow_mode_external(_dom); \
- ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow);\
+ ASSERT(shadow_mode_external(_dom)); \
+ ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_32_shadow); \
for ( _j = 0; _j < 4 && !__done; _j++ ) \
{ \
shadow_l2e_t *_sp = sh_map_domain_page(_sl2mfn); \
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i += 2 ) \
- if ( (!(_xen)) \
- || ((_j * SHADOW_L2_PAGETABLE_ENTRIES) + _i) \
- < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT) ) \
- { \
- (_sl2e) = _sp + _i; \
- if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \
- {_code} \
- if ( (__done = (_done)) ) break; \
- increment_ptr_to_guest_entry(_gl2p); \
- } \
+ { \
+ (_sl2e) = _sp + _i; \
+ if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \
+ {_code} \
+ if ( (__done = (_done)) ) break; \
+ increment_ptr_to_guest_entry(_gl2p); \
+ } \
sh_unmap_domain_page(_sp); \
if ( _j < 3 ) _sl2mfn = sh_next_page(_sl2mfn); \
} \
@@ -1332,26 +1329,22 @@ do { \
#elif GUEST_PAGING_LEVELS == 3
-/* PAE: if it's an l2h, don't touch Xen mappings */
+/* PAE: touch all entries */
#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \
do { \
int _i; \
- int _xen = !shadow_mode_external(_dom); \
shadow_l2e_t *_sp = sh_map_domain_page((_sl2mfn)); \
- ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
- || mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow);\
+ ASSERT(shadow_mode_external(_dom)); \
+ ASSERT(mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2_pae_shadow \
+ || mfn_to_page(_sl2mfn)->u.sh.type == SH_type_l2h_pae_shadow); \
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
- if ( (!(_xen)) \
- || mfn_to_page(_sl2mfn)->u.sh.type != SH_type_l2h_pae_shadow\
- || ((_i + (3 * SHADOW_L2_PAGETABLE_ENTRIES)) \
- < (HYPERVISOR_VIRT_START >> SHADOW_L2_PAGETABLE_SHIFT)) ) \
- { \
- (_sl2e) = _sp + _i; \
- if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \
- {_code} \
- if ( _done ) break; \
- increment_ptr_to_guest_entry(_gl2p); \
- } \
+ { \
+ (_sl2e) = _sp + _i; \
+ if ( shadow_l2e_get_flags(*(_sl2e)) & _PAGE_PRESENT ) \
+ {_code} \
+ if ( _done ) break; \
+ increment_ptr_to_guest_entry(_gl2p); \
+ } \
sh_unmap_domain_page(_sp); \
} while (0)