aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-08-14 12:26:35 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-08-14 12:26:35 +0100
commitfe3d9dce10a7618f941bdc7a906d023af53efa60 (patch)
treeffda53749876e4f862a97aa5bc036802886036c8
parentbb782513017665df4a258c8e57f3fa14d327debb (diff)
downloadxen-fe3d9dce10a7618f941bdc7a906d023af53efa60.tar.gz
xen-fe3d9dce10a7618f941bdc7a906d023af53efa60.tar.bz2
xen-fe3d9dce10a7618f941bdc7a906d023af53efa60.zip
x86: cleanup rdmsr/wrmsr
Use a 64bit value instead of extracting/merging two 32bit values. Signed-off-by: Christoph Egger <Christoph.Egger@amd.com> Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/arch/x86/cpu/mcheck/mce.c81
-rw-r--r--xen/arch/x86/cpu/mcheck/mce.h4
-rw-r--r--xen/arch/x86/cpu/mcheck/mce_intel.c4
-rw-r--r--xen/arch/x86/hvm/hvm.c6
-rw-r--r--xen/arch/x86/hvm/svm/svm.c25
-rw-r--r--xen/arch/x86/hvm/viridian.c23
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c17
-rw-r--r--xen/arch/x86/traps.c27
-rw-r--r--xen/include/asm-x86/hvm/viridian.h6
-rw-r--r--xen/include/asm-x86/processor.h6
-rw-r--r--xen/include/asm-x86/traps.h4
11 files changed, 95 insertions, 108 deletions
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 770b9d2894..e20898ed52 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -670,34 +670,33 @@ void mce_init_msr(struct domain *d)
spin_lock_init(&d->arch.vmca_msrs.lock);
}
-int mce_rdmsr(u32 msr, u32 *lo, u32 *hi)
+int mce_rdmsr(uint32_t msr, uint64_t *val)
{
struct domain *d = current->domain;
int ret = 1;
unsigned int bank;
struct bank_entry *entry = NULL;
- *lo = *hi = 0x0;
+ *val = 0;
spin_lock(&d->arch.vmca_msrs.lock);
switch ( msr )
{
case MSR_IA32_MCG_STATUS:
- *lo = (u32)d->arch.vmca_msrs.mcg_status;
- *hi = (u32)(d->arch.vmca_msrs.mcg_status >> 32);
- if (*lo || *hi)
+ *val = d->arch.vmca_msrs.mcg_status;
+ if (*val)
gdprintk(XENLOG_DEBUG,
- "MCE: rdmsr MCG_STATUS lo %x hi %x\n", *lo, *hi);
+ "MCE: rdmsr MCG_STATUS 0x%"PRIx64"\n", *val);
break;
case MSR_IA32_MCG_CAP:
- *lo = (u32)d->arch.vmca_msrs.mcg_cap;
- *hi = (u32)(d->arch.vmca_msrs.mcg_cap >> 32);
- gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CAP lo %x hi %x\n", *lo, *hi);
+ *val = d->arch.vmca_msrs.mcg_cap;
+ gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CAP 0x%"PRIx64"\n",
+ *val);
break;
case MSR_IA32_MCG_CTL:
- *lo = (u32)d->arch.vmca_msrs.mcg_ctl;
- *hi = (u32)(d->arch.vmca_msrs.mcg_ctl >> 32);
- gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CTL lo %x hi %x\n", *lo, *hi);
+ *val = d->arch.vmca_msrs.mcg_ctl;
+ gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CTL 0x%"PRIx64"\n",
+ *val);
break;
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * MAX_NR_BANKS - 1:
bank = (msr - MSR_IA32_MC0_CTL) / 4;
@@ -710,10 +709,9 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi)
switch (msr & (MSR_IA32_MC0_CTL | 3))
{
case MSR_IA32_MC0_CTL:
- *lo = (u32)d->arch.vmca_msrs.mci_ctl[bank];
- *hi = (u32)(d->arch.vmca_msrs.mci_ctl[bank] >> 32);
- gdprintk(XENLOG_DEBUG, "MCE: rdmsr MC%u_CTL lo %x hi %x\n",
- bank, *lo, *hi);
+ *val = d->arch.vmca_msrs.mci_ctl[bank];
+ gdprintk(XENLOG_DEBUG, "MCE: rdmsr MC%u_CTL 0x%"PRIx64"\n",
+ bank, *val);
break;
case MSR_IA32_MC0_STATUS:
/* Only error bank is read. Non-error banks simply return. */
@@ -722,11 +720,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi)
entry = list_entry(d->arch.vmca_msrs.impact_header.next,
struct bank_entry, list);
if (entry->bank == bank) {
- *lo = entry->mci_status;
- *hi = entry->mci_status >> 32;
+ *val = entry->mci_status;
gdprintk(XENLOG_DEBUG,
"MCE: rd MC%u_STATUS in vMCE# context "
- "lo %x hi %x\n", bank, *lo, *hi);
+ "value 0x%"PRIx64"\n", bank, *val);
}
else
entry = NULL;
@@ -739,11 +736,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi)
struct bank_entry, list);
if ( entry->bank == bank )
{
- *lo = entry->mci_addr;
- *hi = entry->mci_addr >> 32;
+ *val = entry->mci_addr;
gdprintk(XENLOG_DEBUG,
- "MCE: rd MC%u_ADDR in vMCE# context lo %x hi %x\n",
- bank, *lo, *hi);
+ "MCE: rdmsr MC%u_ADDR in vMCE# context "
+ "0x%"PRIx64"\n", bank, *val);
}
}
break;
@@ -754,11 +750,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi)
struct bank_entry, list);
if ( entry->bank == bank )
{
- *lo = entry->mci_misc;
- *hi = entry->mci_misc >> 32;
+ *val = entry->mci_misc;
gdprintk(XENLOG_DEBUG,
- "MCE: rd MC%u_MISC in vMCE# context lo %x hi %x\n",
- bank, *lo, *hi);
+ "MCE: rd MC%u_MISC in vMCE# context "
+ "0x%"PRIx64"\n", bank, *val);
}
}
break;
@@ -768,7 +763,7 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi)
switch ( boot_cpu_data.x86_vendor )
{
case X86_VENDOR_INTEL:
- ret = intel_mce_rdmsr(msr, lo, hi);
+ ret = intel_mce_rdmsr(msr, val);
break;
default:
ret = 0;
@@ -781,7 +776,7 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi)
return ret;
}
-int mce_wrmsr(u32 msr, u64 value)
+int mce_wrmsr(u32 msr, u64 val)
{
struct domain *d = current->domain;
struct bank_entry *entry = NULL;
@@ -796,18 +791,18 @@ int mce_wrmsr(u32 msr, u64 value)
switch ( msr )
{
case MSR_IA32_MCG_CTL:
- if ( value && (value + 1) )
+ if ( val && (val + 1) )
{
- gdprintk(XENLOG_WARNING, "MCE: value \"%"PRIx64"\" written "
- "to MCG_CTL should be all 0s or 1s\n", value);
+ gdprintk(XENLOG_WARNING, "MCE: val \"%"PRIx64"\" written "
+ "to MCG_CTL should be all 0s or 1s\n", val);
ret = -1;
break;
}
- d->arch.vmca_msrs.mcg_ctl = value;
+ d->arch.vmca_msrs.mcg_ctl = val;
break;
case MSR_IA32_MCG_STATUS:
- d->arch.vmca_msrs.mcg_status = value;
- gdprintk(XENLOG_DEBUG, "MCE: wrmsr MCG_STATUS %"PRIx64"\n", value);
+ d->arch.vmca_msrs.mcg_status = val;
+ gdprintk(XENLOG_DEBUG, "MCE: wrmsr MCG_STATUS %"PRIx64"\n", val);
/* For HVM guest, this is the point for deleting vMCE injection node */
if ( d->is_hvm && (d->arch.vmca_msrs.nr_injection > 0) )
{
@@ -845,15 +840,15 @@ int mce_wrmsr(u32 msr, u64 value)
switch ( msr & (MSR_IA32_MC0_CTL | 3) )
{
case MSR_IA32_MC0_CTL:
- if ( value && (value + 1) )
+ if ( val && (val + 1) )
{
- gdprintk(XENLOG_WARNING, "MCE: value written to MC%u_CTL "
+ gdprintk(XENLOG_WARNING, "MCE: val written to MC%u_CTL "
"should be all 0s or 1s (is %"PRIx64")\n",
- bank, value);
+ bank, val);
ret = -1;
break;
}
- d->arch.vmca_msrs.mci_ctl[bank] = value;
+ d->arch.vmca_msrs.mci_ctl[bank] = val;
break;
case MSR_IA32_MC0_STATUS:
/* Give the first entry of the list, it corresponds to current
@@ -866,14 +861,14 @@ int mce_wrmsr(u32 msr, u64 value)
entry = list_entry(d->arch.vmca_msrs.impact_header.next,
struct bank_entry, list);
if ( entry->bank == bank )
- entry->mci_status = value;
+ entry->mci_status = val;
gdprintk(XENLOG_DEBUG,
"MCE: wr MC%u_STATUS %"PRIx64" in vMCE#\n",
- bank, value);
+ bank, val);
}
else
gdprintk(XENLOG_DEBUG,
- "MCE: wr MC%u_STATUS %"PRIx64"\n", bank, value);
+ "MCE: wr MC%u_STATUS %"PRIx64"\n", bank, val);
break;
case MSR_IA32_MC0_ADDR:
gdprintk(XENLOG_WARNING, "MCE: MC%u_ADDR is read-only\n", bank);
@@ -889,7 +884,7 @@ int mce_wrmsr(u32 msr, u64 value)
switch ( boot_cpu_data.x86_vendor )
{
case X86_VENDOR_INTEL:
- ret = intel_mce_wrmsr(msr, value);
+ ret = intel_mce_wrmsr(msr, val);
break;
default:
ret = 0;
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index 4de9ddf053..bee5687ee8 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -25,8 +25,8 @@ void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
u64 mce_cap_init(void);
-int intel_mce_rdmsr(u32 msr, u32 *lo, u32 *hi);
-int intel_mce_wrmsr(u32 msr, u64 value);
+int intel_mce_rdmsr(uint32_t msr, uint64_t *val);
+int intel_mce_wrmsr(uint32_t msr, uint64_t val);
int mce_available(struct cpuinfo_x86 *c);
int mce_firstbank(struct cpuinfo_x86 *c);
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 68500d395a..8b6c563b2d 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -1080,7 +1080,7 @@ int intel_mcheck_init(struct cpuinfo_x86 *c)
return 1;
}
-int intel_mce_wrmsr(u32 msr, u64 value)
+int intel_mce_wrmsr(uint32_t msr, uint64_t val)
{
int ret = 1;
@@ -1098,7 +1098,7 @@ int intel_mce_wrmsr(u32 msr, u64 value)
return ret;
}
-int intel_mce_rdmsr(u32 msr, u32 *lo, u32 *hi)
+int intel_mce_rdmsr(uint32_t msr, uint64_t *val)
{
int ret = 1;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 650c29af23..15d2e32da5 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1782,7 +1782,6 @@ int hvm_msr_read_intercept(struct cpu_user_regs *regs)
uint64_t *var_range_base, *fixed_range_base;
int index, mtrr;
uint32_t cpuid[4];
- uint32_t lo, hi;
int ret;
var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges;
@@ -1852,14 +1851,11 @@ int hvm_msr_read_intercept(struct cpu_user_regs *regs)
break;
default:
- ret = mce_rdmsr(ecx, &lo, &hi);
+ ret = mce_rdmsr(ecx, &msr_content);
if ( ret < 0 )
goto gp_fault;
else if ( ret )
- {
- msr_content = ((u64)hi << 32) | lo;
break;
- }
/* ret == 0, This is not an MCE MSR, see other MSRs */
else if (!ret)
return hvm_funcs.msr_read_intercept(regs);
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 619ff70e0c..ead3eeca31 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1085,20 +1085,23 @@ static int svm_msr_read_intercept(struct cpu_user_regs *regs)
break;
default:
- if ( rdmsr_viridian_regs(ecx, &eax, &edx) ||
- rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
- rdmsr_safe(ecx, eax, edx) == 0 )
+
+ if ( rdmsr_viridian_regs(ecx, &msr_content) ||
+ rdmsr_hypervisor_regs(ecx, &msr_content) )
+ break;
+
+ if ( rdmsr_safe(ecx, eax, edx) == 0 )
{
- regs->eax = eax;
- regs->edx = edx;
- goto done;
+ msr_content = ((uint64_t)edx << 32) | eax;
+ break;
}
+
goto gpf;
}
- regs->eax = msr_content & 0xFFFFFFFF;
- regs->edx = msr_content >> 32;
- done:
+ regs->eax = (uint32_t)msr_content;
+ regs->edx = (uint32_t)(msr_content >> 32);
+
HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
@@ -1164,13 +1167,13 @@ static int svm_msr_write_intercept(struct cpu_user_regs *regs)
break;
default:
- if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) )
+ if ( wrmsr_viridian_regs(ecx, msr_content) )
break;
switch ( long_mode_do_msr_write(regs) )
{
case HNDL_unhandled:
- wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
+ wrmsr_hypervisor_regs(ecx, msr_content);
break;
case HNDL_exception_raised:
return X86EMUL_EXCEPTION;
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 37d2615e86..ef48dfa3e9 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -129,10 +129,9 @@ static void enable_hypercall_page(void)
put_page_and_type(mfn_to_page(mfn));
}
-int wrmsr_viridian_regs(uint32_t idx, uint32_t eax, uint32_t edx)
+int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
{
struct domain *d = current->domain;
- uint64_t val = ((uint64_t)edx << 32) | eax;
if ( !is_viridian_domain(d) )
return 0;
@@ -178,6 +177,7 @@ int wrmsr_viridian_regs(uint32_t idx, uint32_t eax, uint32_t edx)
break;
case VIRIDIAN_MSR_ICR: {
+ u32 eax = (u32)val, edx = (u32)(val >> 32);
struct vlapic *vlapic = vcpu_vlapic(current);
perfc_incr(mshv_wrmsr_icr);
eax &= ~(1 << 12);
@@ -190,7 +190,7 @@ int wrmsr_viridian_regs(uint32_t idx, uint32_t eax, uint32_t edx)
case VIRIDIAN_MSR_TPR:
perfc_incr(mshv_wrmsr_tpr);
- vlapic_set_reg(vcpu_vlapic(current), APIC_TASKPRI, eax & 0xff);
+ vlapic_set_reg(vcpu_vlapic(current), APIC_TASKPRI, (uint8_t)val);
break;
case VIRIDIAN_MSR_APIC_ASSIST:
@@ -224,9 +224,8 @@ int wrmsr_viridian_regs(uint32_t idx, uint32_t eax, uint32_t edx)
return 1;
}
-int rdmsr_viridian_regs(uint32_t idx, uint32_t *eax, uint32_t *edx)
+int rdmsr_viridian_regs(uint32_t idx, uint64_t *val)
{
- uint64_t val;
struct vcpu *v = current;
if ( !is_viridian_domain(v->domain) )
@@ -236,36 +235,34 @@ int rdmsr_viridian_regs(uint32_t idx, uint32_t *eax, uint32_t *edx)
{
case VIRIDIAN_MSR_GUEST_OS_ID:
perfc_incr(mshv_rdmsr_osid);
- val = v->domain->arch.hvm_domain.viridian.guest_os_id.raw;
+ *val = v->domain->arch.hvm_domain.viridian.guest_os_id.raw;
break;
case VIRIDIAN_MSR_HYPERCALL:
perfc_incr(mshv_rdmsr_hc_page);
- val = v->domain->arch.hvm_domain.viridian.hypercall_gpa.raw;
+ *val = v->domain->arch.hvm_domain.viridian.hypercall_gpa.raw;
break;
case VIRIDIAN_MSR_VP_INDEX:
perfc_incr(mshv_rdmsr_vp_index);
- val = v->vcpu_id;
+ *val = v->vcpu_id;
break;
case VIRIDIAN_MSR_ICR:
perfc_incr(mshv_rdmsr_icr);
- val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) |
- vlapic_get_reg(vcpu_vlapic(v), APIC_ICR));
+ *val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) |
+ vlapic_get_reg(vcpu_vlapic(v), APIC_ICR));
break;
case VIRIDIAN_MSR_TPR:
perfc_incr(mshv_rdmsr_tpr);
- val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI);
+ *val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI);
break;
default:
return 0;
}
- *eax = val;
- *edx = val >> 32;
return 1;
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 6a1a93394f..1bc8f4509c 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1849,13 +1849,14 @@ static int vmx_msr_read_intercept(struct cpu_user_regs *regs)
break;
}
- if ( rdmsr_viridian_regs(ecx, &eax, &edx) ||
- rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
- rdmsr_safe(ecx, eax, edx) == 0 )
+ if ( rdmsr_viridian_regs(ecx, &msr_content) ||
+ rdmsr_hypervisor_regs(ecx, &msr_content) )
+ break;
+
+ if ( rdmsr_safe(ecx, eax, edx) == 0 )
{
- regs->eax = eax;
- regs->edx = edx;
- goto done;
+ msr_content = ((uint64_t)edx << 32) | eax;
+ break;
}
goto gp_fault;
@@ -2029,7 +2030,7 @@ static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
if ( passive_domain_do_wrmsr(regs) )
return X86EMUL_OKAY;
- if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) )
+ if ( wrmsr_viridian_regs(ecx, msr_content) )
break;
switch ( long_mode_do_msr_write(regs) )
@@ -2037,7 +2038,7 @@ static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
case HNDL_unhandled:
if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&
!is_last_branch_msr(ecx) )
- wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
+ wrmsr_hypervisor_regs(ecx, msr_content);
break;
case HNDL_exception_raised:
return X86EMUL_EXCEPTION;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index b26e89f0b9..61e3407f0e 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -603,8 +603,7 @@ DO_ERROR_NOCODE(TRAP_copro_error, coprocessor_error)
DO_ERROR( TRAP_alignment_check, alignment_check)
DO_ERROR_NOCODE(TRAP_simd_error, simd_coprocessor_error)
-int rdmsr_hypervisor_regs(
- uint32_t idx, uint32_t *eax, uint32_t *edx)
+int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *val)
{
struct domain *d = current->domain;
/* Optionally shift out of the way of Viridian architectural MSRs. */
@@ -618,7 +617,7 @@ int rdmsr_hypervisor_regs(
{
case 0:
{
- *eax = *edx = 0;
+ *val = 0;
break;
}
default:
@@ -628,8 +627,7 @@ int rdmsr_hypervisor_regs(
return 1;
}
-int wrmsr_hypervisor_regs(
- uint32_t idx, uint32_t eax, uint32_t edx)
+int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val)
{
struct domain *d = current->domain;
/* Optionally shift out of the way of Viridian architectural MSRs. */
@@ -643,10 +641,10 @@ int wrmsr_hypervisor_regs(
{
case 0:
{
- void *hypercall_page;
+ void *hypercall_page;
unsigned long mfn;
- unsigned long gmfn = ((unsigned long)edx << 20) | (eax >> 12);
- unsigned int idx = eax & 0xfff;
+ unsigned long gmfn = val >> 12;
+ unsigned int idx = val & 0xfff;
if ( idx > 0 )
{
@@ -1696,7 +1694,8 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
unsigned long code_base, code_limit;
char io_emul_stub[32];
void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1)));
- u32 l, h;
+ uint32_t l, h;
+ uint64_t val;
if ( !read_descriptor(regs->cs, v, regs,
&code_base, &code_limit, &ar,
@@ -2246,7 +2245,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
goto fail;
break;
default:
- if ( wrmsr_hypervisor_regs(regs->ecx, eax, edx) )
+ if ( wrmsr_hypervisor_regs(regs->ecx, val) )
break;
rc = mce_wrmsr(regs->ecx, val);
@@ -2328,15 +2327,15 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case MSR_EFER:
case MSR_AMD_PATCHLEVEL:
default:
- if ( rdmsr_hypervisor_regs(regs->ecx, &l, &h) )
+ if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
{
rdmsr_writeback:
- regs->eax = l;
- regs->edx = h;
+ regs->eax = (uint32_t)val;
+ regs->edx = (uint32_t)(val >> 32);
break;
}
- rc = mce_rdmsr(regs->ecx, &l, &h);
+ rc = mce_rdmsr(regs->ecx, &val);
if ( rc < 0 )
goto fail;
if ( rc )
diff --git a/xen/include/asm-x86/hvm/viridian.h b/xen/include/asm-x86/hvm/viridian.h
index ac16966172..d8509cb081 100644
--- a/xen/include/asm-x86/hvm/viridian.h
+++ b/xen/include/asm-x86/hvm/viridian.h
@@ -50,14 +50,12 @@ cpuid_viridian_leaves(
int
wrmsr_viridian_regs(
uint32_t idx,
- uint32_t eax,
- uint32_t edx);
+ uint64_t val);
int
rdmsr_viridian_regs(
uint32_t idx,
- uint32_t *eax,
- uint32_t *edx);
+ uint64_t *val);
int
viridian_hypercall(struct cpu_user_regs *regs);
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index 9d0e77460c..61c9dde637 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -551,10 +551,8 @@ void cpu_mcheck_disable(void);
int cpuid_hypervisor_leaves(
uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
-int rdmsr_hypervisor_regs(
- uint32_t idx, uint32_t *eax, uint32_t *edx);
-int wrmsr_hypervisor_regs(
- uint32_t idx, uint32_t eax, uint32_t edx);
+int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *val);
+int wrmsr_hypervisor_regs(uint32_t idx, uint64_t val);
int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len);
int microcode_resume_cpu(int cpu);
diff --git a/xen/include/asm-x86/traps.h b/xen/include/asm-x86/traps.h
index 780de94a9a..7c2daf5877 100644
--- a/xen/include/asm-x86/traps.h
+++ b/xen/include/asm-x86/traps.h
@@ -49,7 +49,7 @@ extern int send_guest_trap(struct domain *d, uint16_t vcpuid,
/* Guest vMCE MSRs virtualization */
extern void mce_init_msr(struct domain *d);
-extern int mce_wrmsr(u32 msr, u64 value);
-extern int mce_rdmsr(u32 msr, u32 *lo, u32 *hi);
+extern int mce_wrmsr(uint32_t msr, uint64_t val);
+extern int mce_rdmsr(uint32_t msr, uint64_t *val);
#endif /* ASM_TRAP_H */