aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-06-18 10:04:57 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-06-18 10:04:57 +0100
commitcffdda11c15bd98dc6db8f1c3d917fc7eb521431 (patch)
tree5a4751b8db459e69822da680e73dc875538c47db
parentcc7728d8f79ea6cd2be3744a63c2b36033a5efd9 (diff)
downloadxen-cffdda11c15bd98dc6db8f1c3d917fc7eb521431.tar.gz
xen-cffdda11c15bd98dc6db8f1c3d917fc7eb521431.tar.bz2
xen-cffdda11c15bd98dc6db8f1c3d917fc7eb521431.zip
x86: properly handle vcpu-to-pcpu mask conversion
This is one of the prerequisites to extend to number of vCPU-s the hypervisor can support per guest. Signed-off-by: Jan Beulich <jbeulich@novell.com>
-rw-r--r--xen/arch/x86/mm.c71
-rw-r--r--xen/arch/x86/x86_64/compat/mm.c13
-rw-r--r--xen/include/asm-x86/guest_access.h2
3 files changed, 42 insertions, 44 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index c1928450d2..c737d8058b 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2565,29 +2565,41 @@ static int set_foreigndom(domid_t domid)
return okay;
}
-static inline cpumask_t vcpumask_to_pcpumask(
- struct domain *d, unsigned long vmask)
+static inline int vcpumask_to_pcpumask(
+ struct domain *d, XEN_GUEST_HANDLE(const_void) bmap, cpumask_t *pmask)
{
- unsigned int vcpu_id;
- cpumask_t pmask = CPU_MASK_NONE;
+ unsigned int vcpu_id, vcpu_bias, offs;
+ unsigned long vmask;
struct vcpu *v;
+ bool_t is_native = !is_pv_32on64_domain(d);
- /*
- * Callers copy only a single guest-sized longword from the guest.
- * This must be wide enough to reference all VCPUs. Worst case is 32 bits.
- */
- BUILD_BUG_ON(MAX_VIRT_CPUS > 32);
-
- while ( vmask != 0 )
+ cpus_clear(*pmask);
+ for ( vmask = 0, offs = 0; ; ++offs)
{
- vcpu_id = find_first_set_bit(vmask);
- vmask &= ~(1UL << vcpu_id);
- if ( (vcpu_id < MAX_VIRT_CPUS) &&
- ((v = d->vcpu[vcpu_id]) != NULL) )
- cpus_or(pmask, pmask, v->vcpu_dirty_cpumask);
- }
+ vcpu_bias = offs * (is_native ? BITS_PER_LONG : 32);
+ if ( vcpu_bias >= MAX_VIRT_CPUS )
+ return 0;
- return pmask;
+ if ( unlikely(is_native ?
+ copy_from_guest_offset(&vmask, bmap, offs, 1) :
+ copy_from_guest_offset((unsigned int *)&vmask, bmap,
+ offs, 1)) )
+ {
+ cpus_clear(*pmask);
+ return -EFAULT;
+ }
+
+ while ( vmask )
+ {
+ vcpu_id = find_first_set_bit(vmask);
+ vmask &= ~(1UL << vcpu_id);
+ vcpu_id += vcpu_bias;
+ if ( (vcpu_id >= MAX_VIRT_CPUS) )
+ return 0;
+ if ( ((v = d->vcpu[vcpu_id]) != NULL) )
+ cpus_or(*pmask, *pmask, v->vcpu_dirty_cpumask);
+ }
+ }
}
#ifdef __i386__
@@ -2816,14 +2828,13 @@ int do_mmuext_op(
case MMUEXT_TLB_FLUSH_MULTI:
case MMUEXT_INVLPG_MULTI:
{
- unsigned long vmask;
cpumask_t pmask;
- if ( unlikely(copy_from_guest(&vmask, op.arg2.vcpumask, 1)) )
+
+ if ( unlikely(vcpumask_to_pcpumask(d, op.arg2.vcpumask, &pmask)) )
{
okay = 0;
break;
}
- pmask = vcpumask_to_pcpumask(d, vmask);
if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
flush_tlb_mask(&pmask);
else
@@ -3630,7 +3641,7 @@ int do_update_va_mapping(unsigned long va, u64 val64,
struct domain *d = v->domain;
struct page_info *gl1pg;
l1_pgentry_t *pl1e;
- unsigned long vmask, bmap_ptr, gl1mfn;
+ unsigned long bmap_ptr, gl1mfn;
cpumask_t pmask;
int rc;
@@ -3682,11 +3693,9 @@ int do_update_va_mapping(unsigned long va, u64 val64,
default:
if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_ALL_TLBS )
break;
- if ( unlikely(!is_pv_32on64_domain(d) ?
- get_user(vmask, (unsigned long *)bmap_ptr) :
- get_user(vmask, (unsigned int *)bmap_ptr)) )
- rc = -EFAULT, vmask = 0;
- pmask = vcpumask_to_pcpumask(d, vmask);
+ rc = vcpumask_to_pcpumask(d, const_guest_handle_from_ptr(bmap_ptr,
+ void),
+ &pmask);
if ( cpu_isset(smp_processor_id(), pmask) )
this_cpu(percpu_mm_info).deferred_ops &= ~DOP_FLUSH_TLB;
flush_tlb_mask(&pmask);
@@ -3710,11 +3719,9 @@ int do_update_va_mapping(unsigned long va, u64 val64,
flush_tlb_one_mask(&d->domain_dirty_cpumask, va);
break;
default:
- if ( unlikely(!is_pv_32on64_domain(d) ?
- get_user(vmask, (unsigned long *)bmap_ptr) :
- get_user(vmask, (unsigned int *)bmap_ptr)) )
- rc = -EFAULT, vmask = 0;
- pmask = vcpumask_to_pcpumask(d, vmask);
+ rc = vcpumask_to_pcpumask(d, const_guest_handle_from_ptr(bmap_ptr,
+ void),
+ &pmask);
if ( this_cpu(percpu_mm_info).deferred_ops & DOP_FLUSH_TLB )
cpu_clear(smp_processor_id(), pmask);
flush_tlb_one_mask(&pmask, va);
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index 0ac685f366..4ececde057 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -278,18 +278,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_compat_t) cmp_uops,
}
#define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \
- do \
- { \
- unsigned int vcpumask; \
- if ( i < --limit ) \
- { \
- (_d_)->arg2.vcpumask.p = (void *)(nat_ops.p + limit); \
- if ( copy_from_compat(&vcpumask, (_s_)->arg2.vcpumask, 1) == 0 ) \
- *(unsigned long *)(_d_)->arg2.vcpumask.p = vcpumask; \
- else \
- rc = -EFAULT; \
- } \
- } while(0)
+ guest_from_compat_handle((_d_)->arg2.vcpumask, (_s_)->arg2.vcpumask)
XLAT_mmuext_op(nat_op, &cmp_op);
#undef XLAT_mmuext_op_HNDL_arg2_vcpumask
diff --git a/xen/include/asm-x86/guest_access.h b/xen/include/asm-x86/guest_access.h
index ab9cb752c0..99ea64dced 100644
--- a/xen/include/asm-x86/guest_access.h
+++ b/xen/include/asm-x86/guest_access.h
@@ -45,6 +45,8 @@
#define guest_handle_from_ptr(ptr, type) \
((XEN_GUEST_HANDLE(type)) { (type *)ptr })
+#define const_guest_handle_from_ptr(ptr, type) \
+ ((XEN_GUEST_HANDLE(const_##type)) { (const type *)ptr })
/*
* Copy an array of objects to guest context via a guest handle,