diff options
author | Jan Beulich <jbeulich@suse.com> | 2013-09-30 14:17:46 +0200 |
---|---|---|
committer | Jan Beulich <jbeulich@suse.com> | 2013-09-30 14:17:46 +0200 |
commit | 6bb838e7375f5b031e9ac346b353775c90de45dc (patch) | |
tree | 9a3a447fa05f3e3462b8c1cbc279aa7229a4ca98 | |
parent | 0a6b415d5212af68249ddf41a20dfc3998c8d670 (diff) | |
download | xen-6bb838e7375f5b031e9ac346b353775c90de45dc.tar.gz xen-6bb838e7375f5b031e9ac346b353775c90de45dc.tar.bz2 xen-6bb838e7375f5b031e9ac346b353775c90de45dc.zip |
x86: properly handle hvm_copy_from_guest_{phys,virt}() errors
Ignoring them generally implies using uninitialized data and, in all
but two of the cases dealt with here, potentially leaking hypervisor
stack contents to guests.
This is CVE-2013-4355 / XSA-63.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 18 | ||||
-rw-r--r-- | xen/arch/x86/hvm/intercept.c | 49 | ||||
-rw-r--r-- | xen/arch/x86/hvm/io.c | 24 | ||||
-rw-r--r-- | xen/arch/x86/hvm/vmx/realmode.c | 6 |
4 files changed, 66 insertions, 31 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 150b0ec460..bf807bf7c8 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2310,11 +2310,7 @@ void hvm_task_switch( rc = hvm_copy_from_guest_virt( &tss, prev_tr.base, sizeof(tss), PFEC_page_present); - if ( rc == HVMCOPY_bad_gva_to_gfn ) - goto out; - if ( rc == HVMCOPY_gfn_paged_out ) - goto out; - if ( rc == HVMCOPY_gfn_shared ) + if ( rc != HVMCOPY_okay ) goto out; eflags = regs->eflags; @@ -2359,13 +2355,11 @@ void hvm_task_switch( rc = hvm_copy_from_guest_virt( &tss, tr.base, sizeof(tss), PFEC_page_present); - if ( rc == HVMCOPY_bad_gva_to_gfn ) - goto out; - if ( rc == HVMCOPY_gfn_paged_out ) - goto out; - /* Note: this could be optimised, if the callee functions knew we want RO - * access */ - if ( rc == HVMCOPY_gfn_shared ) + /* + * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee + * functions knew we want RO access. + */ + if ( rc != HVMCOPY_okay ) goto out; diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index 85caa0cf10..5bb1c17190 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -87,17 +87,28 @@ static int hvm_mmio_access(struct vcpu *v, { for ( i = 0; i < p->count; i++ ) { - int ret; - - ret = hvm_copy_from_guest_phys(&data, - p->data + (sign * i * p->size), - p->size); - if ( (ret == HVMCOPY_gfn_paged_out) || - (ret == HVMCOPY_gfn_shared) ) + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: rc = X86EMUL_RETRY; break; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + rc = X86EMUL_UNHANDLEABLE; + break; } + if ( rc != X86EMUL_OKAY ) + break; rc = write_handler(v, p->addr + (sign * i * p->size), p->size, data); if ( rc != X86EMUL_OKAY ) @@ -165,8 +176,28 @@ static int process_portio_intercept(portio_action_t action, ioreq_t *p) for ( i = 0; i < p->count; i++ ) { data = 0; - (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size, - p->size); + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) + { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: + rc = X86EMUL_RETRY; + break; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + rc = X86EMUL_UNHANDLEABLE; + break; + } + if ( rc != X86EMUL_OKAY ) + break; rc = action(IOREQ_WRITE, p->addr, p->size, &data); if ( rc != X86EMUL_OKAY ) break; diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index 4ae2c0c342..5f5009a179 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -340,14 +340,24 @@ static int dpci_ioport_write(uint32_t mport, ioreq_t *p) data = p->data; if ( p->data_is_ptr ) { - int ret; - - ret = hvm_copy_from_guest_phys(&data, - p->data + (sign * i * p->size), - p->size); - if ( (ret == HVMCOPY_gfn_paged_out) && - (ret == HVMCOPY_gfn_shared) ) + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) + { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: return X86EMUL_RETRY; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + return X86EMUL_UNHANDLEABLE; + } } switch ( p->size ) diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c index 2e12e24192..45066b270b 100644 --- a/xen/arch/x86/hvm/vmx/realmode.c +++ b/xen/arch/x86/hvm/vmx/realmode.c @@ -39,7 +39,9 @@ static void realmode_deliver_exception( again: last_byte = (vector * 4) + 3; - if ( idtr->limit < last_byte ) + if ( idtr->limit < last_byte || + hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) != + HVMCOPY_okay ) { /* Software interrupt? */ if ( insn_len != 0 ) @@ -64,8 +66,6 @@ static void realmode_deliver_exception( } } - (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4); - frame[0] = regs->eip + insn_len; frame[1] = csr->sel; frame[2] = regs->eflags & ~X86_EFLAGS_RF; |