aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-10-04 12:32:25 +0200
committerJan Beulich <jbeulich@suse.com>2013-10-04 12:32:25 +0200
commit11b85dbd0ab068bad3beadda3aee2298205a3c01 (patch)
tree987f35bfb9ea63de8e3e955a22b11afc0db310af
parentc6f92aed0e209df823d2cb5780dbb1ea12fc6d4a (diff)
downloadxen-11b85dbd0ab068bad3beadda3aee2298205a3c01.tar.gz
xen-11b85dbd0ab068bad3beadda3aee2298205a3c01.tar.bz2
xen-11b85dbd0ab068bad3beadda3aee2298205a3c01.zip
x86: make hvm_cpuid() tolerate NULL pointers
Now that other HVM code started making more extensive use of hvm_cpuid(), let's not force every caller to declare dummy variables for output not cared about. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> Acked-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Acked-by: Jun Nakajima <jun.nakajima@intel.com>
-rw-r--r--xen/arch/x86/hvm/hvm.c30
-rw-r--r--xen/arch/x86/hvm/svm/svm.c8
-rw-r--r--xen/arch/x86/hvm/vmx/vvmx.c10
3 files changed, 29 insertions, 19 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index bf807bf7c8..a1a7780fcb 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2764,7 +2764,17 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
{
struct vcpu *v = current;
struct domain *d = v->domain;
- unsigned int count = *ecx;
+ unsigned int count, dummy = 0;
+
+ if ( !eax )
+ eax = &dummy;
+ if ( !ebx )
+ ebx = &dummy;
+ if ( !ecx )
+ ecx = &dummy;
+ count = *ecx;
+ if ( !edx )
+ edx = &dummy;
if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
return;
@@ -2772,7 +2782,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
if ( cpuid_hypervisor_leaves(input, count, eax, ebx, ecx, edx) )
return;
- domain_cpuid(d, input, *ecx, eax, ebx, ecx, edx);
+ domain_cpuid(d, input, count, eax, ebx, ecx, edx);
switch ( input )
{
@@ -2860,15 +2870,15 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
{
struct vcpu *v = current;
uint64_t *var_range_base, *fixed_range_base;
- int index, mtrr;
- uint32_t cpuid[4];
+ bool_t mtrr;
+ unsigned int edx, index;
int ret = X86EMUL_OKAY;
var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
fixed_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.fixed_ranges;
- hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
- mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+ hvm_cpuid(1, NULL, NULL, NULL, &edx);
+ mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
switch ( msr )
{
@@ -2976,15 +2986,15 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
struct vcpu *v = current;
- int index, mtrr;
- uint32_t cpuid[4];
+ bool_t mtrr;
+ unsigned int edx, index;
int ret = X86EMUL_OKAY;
HVMTRACE_3D(MSR_WRITE, msr,
(uint32_t)msr_content, (uint32_t)(msr_content >> 32));
- hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
- mtrr = !!(cpuid[3] & cpufeat_mask(X86_FEATURE_MTRR));
+ hvm_cpuid(1, NULL, NULL, NULL, &edx);
+ mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
hvm_memory_event_msr(msr, msr_content);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 695b53a928..22a63a7939 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -806,13 +806,13 @@ static inline void svm_lwp_load(struct vcpu *v)
/* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
{
- unsigned int eax, ebx, ecx, edx;
+ unsigned int edx;
uint32_t msr_low;
static uint8_t lwp_intr_vector;
if ( xsave_enabled(v) && cpu_has_lwp )
{
- hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
+ hvm_cpuid(0x8000001c, NULL, NULL, NULL, &edx);
msr_low = (uint32_t)msr_content;
/* generate #GP if guest tries to turn on unsupported features. */
@@ -1163,10 +1163,10 @@ static void svm_init_erratum_383(struct cpuinfo_x86 *c)
static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t read)
{
- uint eax, ebx, ecx, edx;
+ unsigned int ecx;
/* Guest OSVW support */
- hvm_cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+ hvm_cpuid(0x80000001, NULL, NULL, &ecx, NULL);
if ( !test_bit((X86_FEATURE_OSVW & 31), &ecx) )
return -1;
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index eb0218e0db..2b2de77441 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1815,7 +1815,7 @@ int nvmx_handle_invvpid(struct cpu_user_regs *regs)
int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
{
struct vcpu *v = current;
- unsigned int eax, ebx, ecx, edx, dummy;
+ unsigned int eax, ebx, ecx, edx;
u64 data = 0, host_data = 0;
int r = 1;
@@ -1823,7 +1823,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
return 0;
/* VMX capablity MSRs are available only when guest supports VMX. */
- hvm_cpuid(0x1, &dummy, &dummy, &ecx, &edx);
+ hvm_cpuid(0x1, NULL, NULL, &ecx, &edx);
if ( !(ecx & cpufeat_mask(X86_FEATURE_VMXE)) )
return 0;
@@ -1975,18 +1975,18 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
data |= X86_CR4_OSXSAVE;
- hvm_cpuid(0x0, &eax, &dummy, &dummy, &dummy);
+ hvm_cpuid(0x0, &eax, NULL, NULL, NULL);
switch ( eax )
{
default:
- hvm_cpuid(0xa, &eax, &dummy, &dummy, &dummy);
+ hvm_cpuid(0xa, &eax, NULL, NULL, NULL);
/* Check whether guest has the perf monitor feature. */
if ( (eax & 0xff) && (eax & 0xff00) )
data |= X86_CR4_PCE;
/* fall through */
case 0x7 ... 0x9:
ecx = 0;
- hvm_cpuid(0x7, &dummy, &ebx, &ecx, &dummy);
+ hvm_cpuid(0x7, NULL, &ebx, &ecx, NULL);
if ( ebx & cpufeat_mask(X86_FEATURE_FSGSBASE) )
data |= X86_CR4_FSGSBASE;
if ( ebx & cpufeat_mask(X86_FEATURE_SMEP) )