aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/traps.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-10-11 09:28:26 +0200
committerJan Beulich <jbeulich@suse.com>2013-10-11 09:28:26 +0200
commit40d66baa46ca8a9ffa6df3e063a967d08ec92bcf (patch)
tree551daf93c5e8b6a8bd50cf58f5a30eb1da8ec1dc /xen/arch/x86/traps.c
parent4c37ed562224295c0f8b00211287d57cae629782 (diff)
downloadxen-40d66baa46ca8a9ffa6df3e063a967d08ec92bcf.tar.gz
xen-40d66baa46ca8a9ffa6df3e063a967d08ec92bcf.tar.bz2
xen-40d66baa46ca8a9ffa6df3e063a967d08ec92bcf.zip
x86: correct LDT checks
- MMUEXT_SET_LDT should behave as similarly to the LLDT instruction as possible: fail only if the base address is non-canonical - instead LDT descriptor accesses should fault if the descriptor address ends up being non-canonical (by ensuring this we at once avoid reading an entry from the mach-to-phys table and consider it a page table entry) - fault propagation on using LDT selectors must distinguish #PF and #GP (the latter must be raised for a non-canonical descriptor address, which also applies to several other uses of propagate_page_fault(), and hence the problem is being fixed there) - map_ldt_shadow_page() should properly wrap addresses for 32-bit VMs At once remove the odd invokation of map_ldt_shadow_page() from the MMUEXT_SET_LDT handler: There's nothing really telling us that the first LDT page is going to be preferred over others. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/traps.c')
-rw-r--r--xen/arch/x86/traps.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 4a0950280f..35be017dc8 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1065,12 +1065,24 @@ static void reserved_bit_page_fault(
show_execution_state(regs);
}
-void propagate_page_fault(unsigned long addr, u16 error_code)
+struct trap_bounce *propagate_page_fault(unsigned long addr, u16 error_code)
{
struct trap_info *ti;
struct vcpu *v = current;
struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
+ if ( unlikely(!is_canonical_address(addr)) )
+ {
+ ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
+ tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
+ tb->error_code = 0;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
+ if ( TI_GET_IF(ti) )
+ tb->flags |= TBF_INTERRUPT;
+ return tb;
+ }
+
v->arch.pv_vcpu.ctrlreg[2] = addr;
arch_set_cr2(v, addr);
@@ -1097,6 +1109,8 @@ void propagate_page_fault(unsigned long addr, u16 error_code)
if ( unlikely(error_code & PFEC_reserved_bit) )
reserved_bit_page_fault(addr, guest_cpu_user_regs());
+
+ return NULL;
}
static int handle_gdt_ldt_mapping_fault(
@@ -1130,13 +1144,16 @@ static int handle_gdt_ldt_mapping_fault(
}
else
{
+ struct trap_bounce *tb;
+
/* In hypervisor mode? Leave it to the #PF handler to fix up. */
if ( !guest_mode(regs) )
return 0;
- /* In guest mode? Propagate #PF to guest, with adjusted %cr2. */
- propagate_page_fault(
- curr->arch.pv_vcpu.ldt_base + offset,
- regs->error_code);
+ /* In guest mode? Propagate fault to guest, with adjusted %cr2. */
+ tb = propagate_page_fault(curr->arch.pv_vcpu.ldt_base + offset,
+ regs->error_code);
+ if ( tb )
+ tb->error_code = ((u16)offset & ~3) | 4;
}
}
else