aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/mm.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-06-26 15:32:58 +0200
committerJan Beulich <jbeulich@suse.com>2013-06-26 15:32:58 +0200
commit9b167bd2f394f821ae3252d74a15704a4bf91f6d (patch)
treebf628c65f125463b064fd11988135b8233609b35 /xen/arch/x86/mm.c
parentcb9ac9fda371beee0ec7753841684063cebe1da3 (diff)
downloadxen-9b167bd2f394f821ae3252d74a15704a4bf91f6d.tar.gz
xen-9b167bd2f394f821ae3252d74a15704a4bf91f6d.tar.bz2
xen-9b167bd2f394f821ae3252d74a15704a4bf91f6d.zip
x86: fix page refcount handling in page table pin error path
In the original patch 7 of the series addressing XSA-45 I mistakenly took the addition of the call to get_page_light() in alloc_page_type() to cover two decrements that would happen: One for the PGT_partial bit that is getting set along with the call, and the other for the page reference the caller hold (and would be dropping on its error path). But of course the additional page reference is tied to the PGT_partial bit, and hence any caller of a function that may leave ->arch.old_guest_table non-NULL for error cleanup purposes has to make sure a respective page reference gets retained. Similar issues were then also spotted elsewhere: In effect all callers of get_page_type_preemptible() need to deal with errors in similar ways. To make sure error handling can work this way without leaking page references, a respective assertion gets added to that function. This is CVE-2013-1432 / XSA-58. Reported-by: Andrew Cooper <andrew.cooper3@citrix.com> Signed-off-by: Jan Beulich <jbeulich@suse.com> Tested-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Tim Deegan <tim@xen.org>
Diffstat (limited to 'xen/arch/x86/mm.c')
-rw-r--r--xen/arch/x86/mm.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 5123860ffd..77dcafc261 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -640,7 +640,8 @@ static int get_page_and_type_from_pagenr(unsigned long page_nr,
get_page_type_preemptible(page, type) :
(get_page_type(page, type) ? 0 : -EINVAL));
- if ( unlikely(rc) && partial >= 0 )
+ if ( unlikely(rc) && partial >= 0 &&
+ (!preemptible || page != current->arch.old_guest_table) )
put_page(page);
return rc;
@@ -2427,6 +2428,7 @@ int put_page_type_preemptible(struct page_info *page)
int get_page_type_preemptible(struct page_info *page, unsigned long type)
{
+ ASSERT(!current->arch.old_guest_table);
return __get_page_type(page, type, 1);
}
@@ -2617,7 +2619,7 @@ static void put_superpage(unsigned long mfn)
return;
}
-static int put_old_guest_table(struct vcpu *v)
+int put_old_guest_table(struct vcpu *v)
{
int rc;
@@ -2988,7 +2990,8 @@ long do_mmuext_op(
rc = -EAGAIN;
else if ( rc != -EAGAIN )
MEM_LOG("Error while pinning mfn %lx", page_to_mfn(page));
- put_page(page);
+ if ( page != curr->arch.old_guest_table )
+ put_page(page);
break;
}