aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-10-14 10:45:29 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-10-14 10:45:29 +0100
commit39f97ffa298bba63e727ecce0117db95f0bb17f7 (patch)
treecde5f544991c3de8f56384cf12ce139fe8c87ee5
parentb525c05cf95fa9528cfb83ecc120087881013980 (diff)
downloadxen-39f97ffa298bba63e727ecce0117db95f0bb17f7.tar.gz
xen-39f97ffa298bba63e727ecce0117db95f0bb17f7.tar.bz2
xen-39f97ffa298bba63e727ecce0117db95f0bb17f7.zip
x86, hvm: Hyper-V guest interface support with small set of enlightenments
A minimal implementation of the Viridian (Hyper-V) guest interface. The only enlightenments advertised and supported are vAPIC MSRs and long-spin-wait notifications. The set of enlightenments can easily be extended in future, as they are found to provide a performance win, and configured via an extended HVM_PARAM_VIRIDIAN hvm parameter. Signed-off-by: Peter Johnston <peter.johnston@citrix.com> Signed-off-by: Tim Deegan <tim.deegan@citrix.com> Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--tools/firmware/hvmloader/hvmloader.c26
-rw-r--r--tools/python/xen/xend/XendConfig.py3
-rw-r--r--tools/python/xen/xend/XendConstants.py7
-rw-r--r--tools/python/xen/xend/XendDomainInfo.py5
-rw-r--r--tools/python/xen/xm/create.py8
-rw-r--r--tools/python/xen/xm/xenapi_create.py1
-rw-r--r--unmodified_drivers/linux-2.6/platform-pci/platform-pci.c51
-rw-r--r--xen/arch/x86/hvm/Makefile1
-rw-r--r--xen/arch/x86/hvm/hvm.c10
-rw-r--r--xen/arch/x86/hvm/svm/svm.c6
-rw-r--r--xen/arch/x86/hvm/viridian.c350
-rw-r--r--xen/arch/x86/hvm/vlapic.c2
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c7
-rw-r--r--xen/arch/x86/traps.c24
-rw-r--r--xen/arch/x86/x86_emulate/x86_emulate.c1517
-rw-r--r--xen/include/asm-x86/hvm/domain.h3
-rw-r--r--xen/include/asm-x86/hvm/hvm.h3
-rw-r--r--xen/include/asm-x86/hvm/viridian.h65
-rw-r--r--xen/include/asm-x86/hvm/vlapic.h4
-rw-r--r--xen/include/asm-x86/perfc_defn.h16
-rw-r--r--xen/include/public/arch-x86/hvm/save.h13
-rw-r--r--xen/include/public/hvm/params.h7
22 files changed, 1317 insertions, 812 deletions
diff --git a/tools/firmware/hvmloader/hvmloader.c b/tools/firmware/hvmloader/hvmloader.c
index 050a6266f6..9dff7cc08d 100644
--- a/tools/firmware/hvmloader/hvmloader.c
+++ b/tools/firmware/hvmloader/hvmloader.c
@@ -101,30 +101,36 @@ asm (
static enum { VGA_none, VGA_std, VGA_cirrus } virtual_vga = VGA_none;
-static void
-init_hypercalls(void)
+static void init_hypercalls(void)
{
uint32_t eax, ebx, ecx, edx;
unsigned long i;
char signature[13];
xen_extraversion_t extraversion;
+ uint32_t base;
- cpuid(0x40000000, &eax, &ebx, &ecx, &edx);
+ for ( base = 0x40000000; base < 0x40001000; base += 0x100 )
+ {
+ cpuid(base, &eax, &ebx, &ecx, &edx);
+
+ *(uint32_t *)(signature + 0) = ebx;
+ *(uint32_t *)(signature + 4) = ecx;
+ *(uint32_t *)(signature + 8) = edx;
+ signature[12] = '\0';
- *(uint32_t *)(signature + 0) = ebx;
- *(uint32_t *)(signature + 4) = ecx;
- *(uint32_t *)(signature + 8) = edx;
- signature[12] = '\0';
+ if ( !strcmp("XenVMMXenVMM", signature) )
+ break;
+ }
- BUG_ON(strcmp("XenVMMXenVMM", signature) || (eax < 0x40000002));
+ BUG_ON(strcmp("XenVMMXenVMM", signature) || ((eax - base) < 2));
/* Fill in hypercall transfer pages. */
- cpuid(0x40000002, &eax, &ebx, &ecx, &edx);
+ cpuid(base + 2, &eax, &ebx, &ecx, &edx);
for ( i = 0; i < eax; i++ )
wrmsr(ebx, HYPERCALL_PHYSICAL_ADDRESS + (i << 12) + i);
/* Print version information. */
- cpuid(0x40000001, &eax, &ebx, &ecx, &edx);
+ cpuid(base + 1, &eax, &ebx, &ecx, &edx);
hypercall_xen_version(XENVER_extraversion, extraversion);
printf("Detected Xen v%u.%u%s\n", eax >> 16, eax & 0xffff, extraversion);
}
diff --git a/tools/python/xen/xend/XendConfig.py b/tools/python/xen/xend/XendConfig.py
index bf7c6dcdc9..a7431bd7ef 100644
--- a/tools/python/xen/xend/XendConfig.py
+++ b/tools/python/xen/xend/XendConfig.py
@@ -155,6 +155,7 @@ XENAPI_PLATFORM_CFG_TYPES = {
'vncdisplay': int,
'vnclisten': str,
'timer_mode': int,
+ 'viridian': int,
'vncpasswd': str,
'vncunused': int,
'xauthority': str,
@@ -442,6 +443,8 @@ class XendConfig(dict):
if self.is_hvm():
if 'timer_mode' not in self['platform']:
self['platform']['timer_mode'] = 1
+ if 'viridian' not in self['platform']:
+ self['platform']['viridian'] = 0
if 'rtc_timeoffset' not in self['platform']:
self['platform']['rtc_timeoffset'] = 0
if 'hpet' not in self['platform']:
diff --git a/tools/python/xen/xend/XendConstants.py b/tools/python/xen/xend/XendConstants.py
index 8b98286841..13e046a086 100644
--- a/tools/python/xen/xend/XendConstants.py
+++ b/tools/python/xen/xend/XendConstants.py
@@ -43,9 +43,10 @@ HVM_PARAM_STORE_EVTCHN = 2
HVM_PARAM_PAE_ENABLED = 4
HVM_PARAM_IOREQ_PFN = 5
HVM_PARAM_BUFIOREQ_PFN = 6
-HVM_PARAM_NVRAM_FD = 7
-HVM_PARAM_VHPT_SIZE = 8
-HVM_PARAM_BUFPIOREQ_PFN = 9
+HVM_PARAM_NVRAM_FD = 7 # ia64
+HVM_PARAM_VHPT_SIZE = 8 # ia64
+HVM_PARAM_BUFPIOREQ_PFN = 9 # ia64
+HVM_PARAM_VIRIDIAN = 9 # x86
HVM_PARAM_TIMER_MODE = 10
HVM_PARAM_HPET_ENABLED = 11
HVM_PARAM_ACPI_S_STATE = 14
diff --git a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py
index 77b2598b1c..faf96a79c6 100644
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -2078,6 +2078,11 @@ class XendDomainInfo:
xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
long(timer_mode))
+ # Set Viridian interface configuration of domain
+ viridian = self.info["platform"].get("viridian")
+ if arch.type == "x86" and hvm and viridian is not None:
+ xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
+
# Optionally enable virtual HPET
hpet = self.info["platform"].get("hpet")
if hvm and hpet is not None:
diff --git a/tools/python/xen/xm/create.py b/tools/python/xen/xm/create.py
index dc333e4707..6c4753be5e 100644
--- a/tools/python/xen/xm/create.py
+++ b/tools/python/xen/xm/create.py
@@ -218,6 +218,11 @@ gopts.var('timer_mode', val='TIMER_MODE',
use="""Timer mode (0=delay virtual time when ticks are missed;
1=virtual time is always wallclock time.""")
+gopts.var('viridian', val='VIRIDIAN',
+ fn=set_int, default=0,
+ use="""Expose Viridian interface to x86 HVM guest?
+ (Default is 0).""")
+
gopts.var('acpi', val='ACPI',
fn=set_int, default=1,
use="Disable or enable ACPI of HVM domain.")
@@ -856,7 +861,8 @@ def configure_hvm(config_image, vals):
'vnc', 'vncdisplay', 'vncunused', 'vncconsole', 'vnclisten',
'sdl', 'display', 'xauthority', 'rtc_timeoffset', 'monitor',
'acpi', 'apic', 'usb', 'usbdevice', 'keymap', 'pci', 'hpet',
- 'guest_os_type', 'hap', 'opengl', 'cpuid', 'cpuid_check']
+ 'guest_os_type', 'hap', 'opengl', 'cpuid', 'cpuid_check',
+ 'viridian' ]
for a in args:
if a in vals.__dict__ and vals.__dict__[a] is not None:
diff --git a/tools/python/xen/xm/xenapi_create.py b/tools/python/xen/xm/xenapi_create.py
index ceac76f1e1..60992566b0 100644
--- a/tools/python/xen/xm/xenapi_create.py
+++ b/tools/python/xen/xm/xenapi_create.py
@@ -969,6 +969,7 @@ class sxp2xml:
'usbdevice',
'hpet',
'timer_mode',
+ 'viridian',
'vhpt',
'guest_os_type',
'hap',
diff --git a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
index 3d3a4c79a1..a1c17dd7db 100644
--- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
+++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
@@ -111,26 +111,37 @@ unsigned long alloc_xen_mmio(unsigned long len)
#ifndef __ia64__
-static int init_hypercall_stubs(void)
+static uint32_t xen_cpuid_base(void)
{
- uint32_t eax, ebx, ecx, edx, pages, msr, i;
+ uint32_t base, eax, ebx, ecx, edx;
char signature[13];
- cpuid(0x40000000, &eax, &ebx, &ecx, &edx);
- *(uint32_t*)(signature + 0) = ebx;
- *(uint32_t*)(signature + 4) = ecx;
- *(uint32_t*)(signature + 8) = edx;
- signature[12] = 0;
+ for (base = 0x40000000; base < 0x40001000; base += 0x100) {
+ cpuid(base, &eax, &ebx, &ecx, &edx);
+ *(uint32_t*)(signature + 0) = ebx;
+ *(uint32_t*)(signature + 4) = ecx;
+ *(uint32_t*)(signature + 8) = edx;
+ signature[12] = 0;
+
+ if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2))
+ return base;
+ }
+
+ return 0;
+}
- if (strcmp("XenVMMXenVMM", signature) || (eax < 0x40000002)) {
+static int init_hypercall_stubs(void)
+{
+ uint32_t eax, ebx, ecx, edx, pages, msr, i, base;
+
+ base = xen_cpuid_base();
+ if (base == 0) {
printk(KERN_WARNING
- "Detected Xen platform device but not Xen VMM?"
- " (sig %s, eax %x)\n",
- signature, eax);
+ "Detected Xen platform device but not Xen VMM?\n");
return -EINVAL;
}
- cpuid(0x40000001, &eax, &ebx, &ecx, &edx);
+ cpuid(base + 1, &eax, &ebx, &ecx, &edx);
printk(KERN_INFO "Xen version %d.%d.\n", eax >> 16, eax & 0xffff);
@@ -138,7 +149,7 @@ static int init_hypercall_stubs(void)
* Find largest supported number of hypercall pages.
* We'll create as many as possible up to this number.
*/
- cpuid(0x40000002, &pages, &msr, &ecx, &edx);
+ cpuid(base + 2, &pages, &msr, &ecx, &edx);
/*
* Use __vmalloc() because vmalloc_exec() is not an exported symbol.
@@ -174,18 +185,12 @@ static int init_hypercall_stubs(void)
static void resume_hypercall_stubs(void)
{
- uint32_t eax, ebx, ecx, edx, pages, msr, i;
- char signature[13];
-
- cpuid(0x40000000, &eax, &ebx, &ecx, &edx);
- *(uint32_t*)(signature + 0) = ebx;
- *(uint32_t*)(signature + 4) = ecx;
- *(uint32_t*)(signature + 8) = edx;
- signature[12] = 0;
+ uint32_t base, ecx, edx, pages, msr, i;
- BUG_ON(strcmp("XenVMMXenVMM", signature) || (eax < 0x40000002));
+ base = xen_cpuid_base();
+ BUG_ON(base == 0);
- cpuid(0x40000002, &pages, &msr, &ecx, &edx);
+ cpuid(base + 2, &pages, &msr, &ecx, &edx);
if (pages > max_hypercall_stub_pages)
pages = max_hypercall_stub_pages;
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 39d2f72919..f950f1d2a5 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -18,3 +18,4 @@ obj-y += vpic.o
obj-y += save.o
obj-y += vmsi.o
obj-y += stdvga.o
+obj-y += viridian.o
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6c6b0db13b..64116f2bf3 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1636,6 +1636,9 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
{
struct vcpu *v = current;
+ if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
+ return;
+
if ( cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
return;
@@ -1954,6 +1957,9 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
break;
}
+ if ( (eax & 0x80000000) && is_viridian_domain(curr->domain) )
+ return viridian_hypercall(regs);
+
if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] )
{
regs->eax = -ENOSYS;
@@ -2380,6 +2386,10 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
if ( a.value > HVMPTM_one_missed_tick_pending )
rc = -EINVAL;
break;
+ case HVM_PARAM_VIRIDIAN:
+ if ( a.value > 1 )
+ rc = -EINVAL;
+ break;
case HVM_PARAM_IDENT_PT:
rc = -EPERM;
if ( !IS_PRIV(current->domain) )
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a2179d6ebc..c635f5204a 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1005,7 +1005,8 @@ static int svm_msr_read_intercept(struct cpu_user_regs *regs)
break;
default:
- if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
+ if ( rdmsr_viridian_regs(ecx, &eax, &edx) ||
+ rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
rdmsr_safe(ecx, eax, edx) == 0 )
{
regs->eax = eax;
@@ -1073,6 +1074,9 @@ static int svm_msr_write_intercept(struct cpu_user_regs *regs)
break;
default:
+ if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) )
+ break;
+
switch ( long_mode_do_msr_write(regs) )
{
case HNDL_unhandled:
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
new file mode 100644
index 0000000000..ca3dd1378c
--- /dev/null
+++ b/xen/arch/x86/hvm/viridian.c
@@ -0,0 +1,350 @@
+/******************************************************************************
+ * viridian.c
+ *
+ * An implementation of the Viridian hypercall interface.
+ */
+
+#include <xen/sched.h>
+#include <xen/version.h>
+#include <xen/perfc.h>
+#include <xen/hypercall.h>
+#include <xen/domain_page.h>
+#include <asm/paging.h>
+#include <asm/p2m.h>
+#include <asm/hvm/support.h>
+#include <public/sched.h>
+#include <public/hvm/hvm_op.h>
+
+/* Viridian MSR numbers. */
+#define VIRIDIAN_MSR_GUEST_OS_ID 0x40000000
+#define VIRIDIAN_MSR_HYPERCALL 0x40000001
+#define VIRIDIAN_MSR_VP_INDEX 0x40000002
+#define VIRIDIAN_MSR_EOI 0x40000070
+#define VIRIDIAN_MSR_ICR 0x40000071
+#define VIRIDIAN_MSR_TPR 0x40000072
+
+/* Viridian Hypercall Status Codes. */
+#define HV_STATUS_SUCCESS 0x0000
+#define HV_STATUS_INVALID_HYPERCALL_CODE 0x0002
+
+/* Viridian Hypercall Codes and Parameters. */
+#define HvNotifyLongSpinWait 8
+
+/* Viridian CPUID 4000003, Viridian MSR availability. */
+#define CPUID3A_MSR_APIC_ACCESS (1 << 4)
+#define CPUID3A_MSR_HYPERCALL (1 << 5)
+#define CPUID3A_MSR_VP_INDEX (1 << 6)
+
+/* Viridian CPUID 4000004, Implementation Recommendations. */
+#define CPUID4A_MSR_BASED_APIC (1 << 3)
+
+int cpuid_viridian_leaves(unsigned int leaf, unsigned int *eax,
+ unsigned int *ebx, unsigned int *ecx,
+ unsigned int *edx)
+{
+ struct domain *d = current->domain;
+
+ if ( !is_viridian_domain(d) )
+ return 0;
+
+ leaf -= 0x40000000;
+ if ( leaf > 5 )
+ return 0;
+
+ *eax = *ebx = *ecx = *edx = 0;
+ switch ( leaf )
+ {
+ case 0:
+ *eax = 0x40000005; /* Maximum leaf */
+ *ebx = 0x7263694d; /* Magic numbers */
+ *ecx = 0x666F736F;
+ *edx = 0x76482074;
+ break;
+ case 1:
+ *eax = 0x31237648; /* Version number */
+ break;
+ case 2:
+ /* Hypervisor information, but only if the guest has set its
+ own version number. */
+ if ( d->arch.hvm_domain.viridian.guest_os_id.raw == 0 )
+ break;
+ *eax = 1; /* Build number */
+ *ebx = (xen_major_version() << 16) | xen_minor_version();
+ *ecx = 0; /* SP */
+ *edx = 0; /* Service branch and number */
+ break;
+ case 3:
+ /* Which hypervisor MSRs are available to the guest */
+ *eax = (CPUID3A_MSR_APIC_ACCESS |
+ CPUID3A_MSR_HYPERCALL |
+ CPUID3A_MSR_VP_INDEX);
+ break;
+ case 4:
+ /* Recommended hypercall usage. */
+ if ( (d->arch.hvm_domain.viridian.guest_os_id.raw == 0) ||
+ (d->arch.hvm_domain.viridian.guest_os_id.fields.os < 4) )
+ break;
+ *eax = CPUID4A_MSR_BASED_APIC;
+ *ebx = 2047; /* long spin count */
+ break;
+ }
+
+ return 1;
+}
+
+static void enable_hypercall_page(void)
+{
+ struct domain *d = current->domain;
+ unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
+ unsigned long mfn = gmfn_to_mfn(d, gmfn);
+ uint8_t *p;
+
+ if ( !mfn_valid(mfn) ||
+ !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ {
+ gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
+ return;
+ }
+
+ p = map_domain_page(mfn);
+
+ /*
+ * We set the bit 31 in %eax (reserved field in the Viridian hypercall
+ * calling convention) to differentiate Xen and Viridian hypercalls.
+ */
+ *(u8 *)(p + 0) = 0x0d; /* orl $0x80000000, %eax */
+ *(u32 *)(p + 1) = 0x80000000;
+ *(u8 *)(p + 5) = 0x0f; /* vmcall/vmmcall */
+ *(u8 *)(p + 6) = 0x01;
+ *(u8 *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ ? 0xc1 : 0xd9);
+ *(u8 *)(p + 8) = 0xc3; /* ret */
+ memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */
+
+ unmap_domain_page(p);
+
+ put_page_and_type(mfn_to_page(mfn));
+}
+
+int wrmsr_viridian_regs(uint32_t idx, uint32_t eax, uint32_t edx)
+{
+ struct domain *d = current->domain;
+ uint64_t val = ((uint64_t)edx << 32) | eax;
+
+ if ( !is_viridian_domain(d) )
+ return 0;
+
+ switch ( idx )
+ {
+ case VIRIDIAN_MSR_GUEST_OS_ID:
+ perfc_incr(mshv_wrmsr_osid);
+ d->arch.hvm_domain.viridian.guest_os_id.raw = val;
+ gdprintk(XENLOG_INFO, "Guest os:\n");
+ gdprintk(XENLOG_INFO, "\tvendor: %x\n",
+ d->arch.hvm_domain.viridian.guest_os_id.fields.vendor);
+ gdprintk(XENLOG_INFO, "\tos: %x\n",
+ d->arch.hvm_domain.viridian.guest_os_id.fields.os);
+ gdprintk(XENLOG_INFO, "\tmajor: %x\n",
+ d->arch.hvm_domain.viridian.guest_os_id.fields.major);
+ gdprintk(XENLOG_INFO, "\tminor: %x\n",
+ d->arch.hvm_domain.viridian.guest_os_id.fields.minor);
+ gdprintk(XENLOG_INFO, "\tsp: %x\n",
+ d->arch.hvm_domain.viridian.guest_os_id.fields.service_pack);
+ gdprintk(XENLOG_INFO, "\tbuild: %x\n",
+ d->arch.hvm_domain.viridian.guest_os_id.fields.build_number);
+ break;
+
+ case VIRIDIAN_MSR_HYPERCALL:
+ perfc_incr(mshv_wrmsr_hc_page);
+ gdprintk(XENLOG_INFO, "Set hypercall page %"PRIx64".\n", val);
+ if ( d->arch.hvm_domain.viridian.guest_os_id.raw == 0 )
+ break;
+ d->arch.hvm_domain.viridian.hypercall_gpa.raw = val;
+ if ( d->arch.hvm_domain.viridian.hypercall_gpa.fields.enabled )
+ enable_hypercall_page();
+ break;
+
+ case VIRIDIAN_MSR_VP_INDEX:
+ perfc_incr(mshv_wrmsr_vp_index);
+ gdprintk(XENLOG_INFO, "Set VP index %"PRIu64".\n", val);
+ break;
+
+ case VIRIDIAN_MSR_EOI:
+ perfc_incr(mshv_wrmsr_eoi);
+ vlapic_EOI_set(vcpu_vlapic(current));
+ break;
+
+ case VIRIDIAN_MSR_ICR: {
+ struct vlapic *vlapic = vcpu_vlapic(current);
+ perfc_incr(mshv_wrmsr_icr);
+ eax &= ~(1 << 12);
+ edx &= 0xff000000;
+ vlapic_set_reg(vlapic, APIC_ICR2, edx);
+ if ( vlapic_ipi(vlapic, eax, edx) == X86EMUL_OKAY )
+ vlapic_set_reg(vlapic, APIC_ICR, eax);
+ break;
+ }
+
+ case VIRIDIAN_MSR_TPR:
+ perfc_incr(mshv_wrmsr_tpr);
+ vlapic_set_reg(vcpu_vlapic(current), APIC_TASKPRI, eax & 0xff);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+int rdmsr_viridian_regs(uint32_t idx, uint32_t *eax, uint32_t *edx)
+{
+ uint64_t val;
+ struct vcpu *v = current;
+
+ if ( !is_viridian_domain(v->domain) )
+ return 0;
+
+ switch ( idx )
+ {
+ case VIRIDIAN_MSR_GUEST_OS_ID:
+ perfc_incr(mshv_rdmsr_osid);
+ val = v->domain->arch.hvm_domain.viridian.guest_os_id.raw;
+ break;
+
+ case VIRIDIAN_MSR_HYPERCALL:
+ perfc_incr(mshv_rdmsr_hc_page);
+ val = v->domain->arch.hvm_domain.viridian.hypercall_gpa.raw;
+ break;
+
+ case VIRIDIAN_MSR_VP_INDEX:
+ perfc_incr(mshv_rdmsr_vp_index);
+ val = v->vcpu_id;
+ break;
+
+ case VIRIDIAN_MSR_ICR:
+ perfc_incr(mshv_rdmsr_icr);
+ val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) |
+ vlapic_get_reg(vcpu_vlapic(v), APIC_ICR));
+ break;
+
+ case VIRIDIAN_MSR_TPR:
+ perfc_incr(mshv_rdmsr_tpr);
+ val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI);
+ break;
+
+ default:
+ return 0;
+ }
+
+ *eax = val;
+ *edx = val >> 32;
+ return 1;
+}
+
+int viridian_hypercall(struct cpu_user_regs *regs)
+{
+ struct domain *d = current->domain;
+ int mode = hvm_guest_x86_mode(current);
+ unsigned long input_params_gpa, output_params_gpa;
+ uint16_t status = HV_STATUS_SUCCESS;
+
+ union hypercall_input {
+ uint64_t raw;
+ struct {
+ uint16_t call_code;
+ uint16_t rsvd1;
+ unsigned rep_count:12;
+ unsigned rsvd2:4;
+ unsigned rep_start:12;
+ unsigned rsvd3:4;
+ };
+ } input;
+
+ union hypercall_output {
+ uint64_t raw;
+ struct {
+ uint16_t result;
+ uint16_t rsvd1;
+ unsigned rep_complete:12;
+ unsigned rsvd2:20;
+ };
+ } output = { 0 };
+
+ ASSERT(is_viridian_domain(d));
+
+ switch ( mode )
+ {
+#ifdef __x86_64__
+ case 8:
+ input.raw = regs->rcx;
+ input_params_gpa = regs->rdx;
+ output_params_gpa = regs->r8;
+ break;
+#endif
+ case 4:
+ input.raw = ((uint64_t)regs->edx << 32) | regs->eax;
+ input_params_gpa = ((uint64_t)regs->ebx << 32) | regs->ecx;
+ output_params_gpa = ((uint64_t)regs->edi << 32) | regs->esi;
+ break;
+ default:
+ goto out;
+ }
+
+ switch ( input.call_code )
+ {
+ case HvNotifyLongSpinWait:
+ perfc_incr(mshv_call_long_wait);
+ do_sched_op_compat(SCHEDOP_yield, 0);
+ status = HV_STATUS_SUCCESS;
+ break;
+ default:
+ status = HV_STATUS_INVALID_HYPERCALL_CODE;
+ break;
+ }
+
+out:
+ output.result = status;
+ switch (mode) {
+#ifdef __x86_64__
+ case 8:
+ regs->rax = output.raw;
+ break;
+#endif
+ default:
+ regs->edx = output.raw >> 32;
+ regs->eax = output.raw;
+ break;
+ }
+
+ return HVM_HCALL_completed;
+}
+
+static int viridian_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_viridian_context ctxt;
+
+ if ( !is_viridian_domain(d) )
+ return 0;
+
+ ctxt.hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
+ ctxt.guest_os_id = d->arch.hvm_domain.viridian.guest_os_id.raw;
+
+ return (hvm_save_entry(VIRIDIAN, 0, h, &ctxt) != 0);
+}
+
+static int viridian_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
+{
+ struct hvm_viridian_context ctxt;
+
+ if ( hvm_load_entry(VIRIDIAN, h, &ctxt) != 0 )
+ return -EINVAL;
+
+ d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
+ d->arch.hvm_domain.viridian.guest_os_id.raw = ctxt.guest_os_id;
+
+ return 0;
+}
+
+HVM_REGISTER_SAVE_RESTORE(VIRIDIAN, viridian_save_cpu_ctxt,
+ viridian_load_cpu_ctxt, 1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index b8dd3ce904..2da4b7fb73 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -417,7 +417,7 @@ void vlapic_EOI_set(struct vlapic *vlapic)
hvm_dpci_msi_eoi(current->domain, vector);
}
-static int vlapic_ipi(
+int vlapic_ipi(
struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high)
{
unsigned int dest = GET_xAPIC_DEST_FIELD(icr_high);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 1aba4c4d4d..140066bf97 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1677,7 +1677,8 @@ static int vmx_msr_read_intercept(struct cpu_user_regs *regs)
break;
}
- if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
+ if ( rdmsr_viridian_regs(ecx, &eax, &edx) ||
+ rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
rdmsr_safe(ecx, eax, edx) == 0 )
{
regs->eax = eax;
@@ -1852,6 +1853,10 @@ static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
default:
if ( vpmu_do_wrmsr(regs) )
return X86EMUL_OKAY;
+
+ if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) )
+ break;
+
switch ( long_mode_do_msr_write(regs) )
{
case HNDL_unhandled:
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 35db4efd42..a9bbc27e77 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -577,7 +577,11 @@ DO_ERROR_NOCODE(TRAP_simd_error, simd_coprocessor_error)
int rdmsr_hypervisor_regs(
uint32_t idx, uint32_t *eax, uint32_t *edx)
{
- idx -= 0x40000000;
+ struct domain *d = current->domain;
+ /* Optionally shift out of the way of Viridian architectural MSRs. */
+ uint32_t base = is_viridian_domain(d) ? 0x40000200 : 0x40000000;
+
+ idx -= base;
if ( idx > 0 )
return 0;
@@ -599,8 +603,10 @@ int wrmsr_hypervisor_regs(
uint32_t idx, uint32_t eax, uint32_t edx)
{
struct domain *d = current->domain;
+ /* Optionally shift out of the way of Viridian architectural MSRs. */
+ uint32_t base = is_viridian_domain(d) ? 0x40000200 : 0x40000000;
- idx -= 0x40000000;
+ idx -= base;
if ( idx > 0 )
return 0;
@@ -628,7 +634,7 @@ int wrmsr_hypervisor_regs(
{
gdprintk(XENLOG_WARNING,
"Bad GMFN %lx (MFN %lx) to MSR %08x\n",
- gmfn, mfn, 0x40000000);
+ gmfn, mfn, base + idx);
return 0;
}
@@ -650,14 +656,18 @@ int wrmsr_hypervisor_regs(
int cpuid_hypervisor_leaves(
uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
- idx -= 0x40000000;
- if ( idx > 2 )
+ struct domain *d = current->domain;
+ /* Optionally shift out of the way of Viridian architectural leaves. */
+ uint32_t base = is_viridian_domain(d) ? 0x40000100 : 0x40000000;
+
+ idx -= base;
+ if ( idx > 2 )
return 0;
switch ( idx )
{
case 0:
- *eax = 0x40000002; /* Largest leaf */
+ *eax = base + 2; /* Largest leaf */
*ebx = XEN_CPUID_SIGNATURE_EBX;
*ecx = XEN_CPUID_SIGNATURE_ECX;
*edx = XEN_CPUID_SIGNATURE_EDX;
@@ -673,6 +683,8 @@ int cpuid_hypervisor_leaves(
case 2:
*eax = 1; /* Number of hypercall-transfer pages */
*ebx = 0x40000000; /* MSR base address */
+ if ( is_viridian_domain(d) )
+ *ebx = 0x40000200;
*ecx = 0; /* Features 1 */
*edx = 0; /* Features 2 */
if ( !is_hvm_vcpu(current) )
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index b307114dba..a7de706506 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -24,8 +24,9 @@
/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp (1<<0) /* 8-bit operands. */
/* Destination operand type. */
-#define DstBitBase (0<<1) /* Memory operand, bit string. */
-#define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
+#define DstNone (0<<1) /* No destination operand. */
+#define DstImplicit (0<<1) /* Destination operand is implicit in the opcode. */
+#define DstBitBase (1<<1) /* Memory operand, bit string. */
#define DstReg (2<<1) /* Register operand. */
#define DstMem (3<<1) /* Memory operand. */
#define DstMask (3<<1)
@@ -42,6 +43,8 @@
#define ModRM (1<<6)
/* Destination is only written; never read. */
#define Mov (1<<7)
+/* All operands are implicit in the opcode. */
+#define ImplicitOps (DstImplicit|SrcImplicit)
static uint8_t opcode_table[256] = {
/* 0x00 - 0x07 */
@@ -1174,13 +1177,12 @@ x86_emulate(
int override_seg = -1, rc = X86EMUL_OKAY;
struct operand src, dst;
- /* Data operand effective address (usually computed from ModRM). */
- struct operand ea;
-
- /* Default is a memory operand relative to segment DS. */
- ea.type = OP_MEM;
- ea.mem.seg = x86_seg_ds;
- ea.mem.off = 0;
+ /*
+ * Data operand effective address (usually computed from ModRM).
+ * Default is a memory operand relative to segment DS.
+ */
+ struct operand ea = { .type = OP_MEM };
+ ea.mem.seg = x86_seg_ds; /* gcc may reject anon union initializer */
ctxt->retire.byte = 0;
@@ -1408,14 +1410,11 @@ x86_emulate(
if ( override_seg != -1 )
ea.mem.seg = override_seg;
- /* Special instructions do their own operand decoding. */
- if ( (d & DstMask) == ImplicitOps )
- goto special_insn;
-
/* Decode and fetch the source operand: register, memory or immediate. */
switch ( d & SrcMask )
{
- case SrcNone:
+ case SrcNone: /* case SrcImplicit: */
+ src.type = OP_NONE;
break;
case SrcReg:
src.type = OP_REG;
@@ -1479,7 +1478,21 @@ x86_emulate(
/* Decode and fetch the destination operand: register or memory. */
switch ( d & DstMask )
{
+ case DstNone: /* case DstImplicit: */
+ /*
+ * The only implicit-operands instructions allowed a LOCK prefix are
+ * CMPXCHG{8,16}B, MOV CRn, MOV DRn.
+ */
+ generate_exception_if(
+ lock_prefix &&
+ ((b < 0x20) || (b > 0x23)) && /* MOV CRn/DRn */
+ (b != 0xc7), /* CMPXCHG{8,16}B */
+ EXC_GP, 0);
+ dst.type = OP_NONE;
+ break;
+
case DstReg:
+ generate_exception_if(lock_prefix, EXC_GP, 0);
dst.type = OP_REG;
if ( d & ByteOp )
{
@@ -1535,6 +1548,7 @@ x86_emulate(
dst = ea;
if ( dst.type == OP_REG )
{
+ generate_exception_if(lock_prefix, EXC_GP, 0);
switch ( dst.bytes )
{
case 1: dst.val = *(uint8_t *)dst.reg; break;
@@ -1553,9 +1567,6 @@ x86_emulate(
break;
}
- /* LOCK prefix allowed only on instructions with memory destination. */
- generate_exception_if(lock_prefix && (dst.type != OP_MEM), EXC_GP, 0);
-
if ( twobyte )
goto twobyte_insn;
@@ -1618,583 +1629,6 @@ x86_emulate(
dst.type = OP_NONE;
break;
- case 0x62: /* bound */ {
- unsigned long src_val2;
- int lb, ub, idx;
- generate_exception_if(mode_64bit() || (src.type != OP_MEM),
- EXC_UD, -1);
- if ( (rc = read_ulong(src.mem.seg, src.mem.off + op_bytes,
- &src_val2, op_bytes, ctxt, ops)) )
- goto done;
- ub = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2;
- lb = (op_bytes == 2) ? (int16_t)src.val : (int32_t)src.val;
- idx = (op_bytes == 2) ? (int16_t)dst.val : (int32_t)dst.val;
- generate_exception_if((idx < lb) || (idx > ub), EXC_BR, -1);
- dst.type = OP_NONE;
- break;
- }
-
- case 0x63: /* movsxd (x86/64) / arpl (x86/32) */
- if ( mode_64bit() )
- {
- /* movsxd */
- if ( src.type == OP_REG )
- src.val = *(int32_t *)src.reg;
- else if ( (rc = read_ulong(src.mem.seg, src.mem.off,
- &src.val, 4, ctxt, ops)) )
- goto done;
- dst.val = (int32_t)src.val;
- }
- else
- {
- /* arpl */
- uint16_t src_val = dst.val;
- dst = src;
- _regs.eflags &= ~EFLG_ZF;
- _regs.eflags |= ((src_val & 3) > (dst.val & 3)) ? EFLG_ZF : 0;
- if ( _regs.eflags & EFLG_ZF )
- dst.val = (dst.val & ~3) | (src_val & 3);
- else
- dst.type = OP_NONE;
- generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1);
- }
- break;
-
- case 0x69: /* imul imm16/32 */
- case 0x6b: /* imul imm8 */ {
- unsigned long src1; /* ModR/M source operand */
- if ( ea.type == OP_REG )
- src1 = *ea.reg;
- else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
- &src1, op_bytes, ctxt, ops)) )
- goto done;
- _regs.eflags &= ~(EFLG_OF|EFLG_CF);
- switch ( dst.bytes )
- {
- case 2:
- dst.val = ((uint32_t)(int16_t)src.val *
- (uint32_t)(int16_t)src1);
- if ( (int16_t)dst.val != (uint32_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- break;
-#ifdef __x86_64__
- case 4:
- dst.val = ((uint64_t)(int32_t)src.val *
- (uint64_t)(int32_t)src1);
- if ( (int32_t)dst.val != dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- break;
-#endif
- default: {
- unsigned long m[2] = { src.val, src1 };
- if ( imul_dbl(m) )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- dst.val = m[0];
- break;
- }
- }
- break;
- }
-
- case 0x82: /* Grp1 (x86/32 only) */
- generate_exception_if(mode_64bit(), EXC_UD, -1);
- case 0x80: case 0x81: case 0x83: /* Grp1 */
- switch ( modrm_reg & 7 )
- {
- case 0: goto add;
- case 1: goto or;
- case 2: goto adc;
- case 3: goto sbb;
- case 4: goto and;
- case 5: goto sub;
- case 6: goto xor;
- case 7: goto cmp;
- }
- break;
-
- case 0xa8 ... 0xa9: /* test imm,%%eax */
- dst.reg = (unsigned long *)&_regs.eax;
- dst.val = _regs.eax;
- case 0x84 ... 0x85: test: /* test */
- emulate_2op_SrcV("test", src, dst, _regs.eflags);
- dst.type = OP_NONE;
- break;
-
- case 0x86 ... 0x87: xchg: /* xchg */
- /* Write back the register source. */
- switch ( dst.bytes )
- {
- case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break;
- case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break;
- case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */
- case 8: *src.reg = dst.val; break;
- }
- /* Write back the memory destination with implicit LOCK prefix. */
- dst.val = src.val;
- lock_prefix = 1;
- break;
-
- case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
- generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1);
- case 0x88 ... 0x8b: /* mov */
- dst.val = src.val;
- break;
-
- case 0x8c: /* mov Sreg,r/m */ {
- struct segment_register reg;
- enum x86_segment seg = decode_segment(modrm_reg);
- generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
- fail_if(ops->read_segment == NULL);
- if ( (rc = ops->read_segment(seg, &reg, ctxt)) != 0 )
- goto done;
- dst.val = reg.sel;
- if ( dst.type == OP_MEM )
- dst.bytes = 2;
- break;
- }
-
- case 0x8e: /* mov r/m,Sreg */ {
- enum x86_segment seg = decode_segment(modrm_reg);
- generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
- if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 )
- goto done;
- if ( seg == x86_seg_ss )
- ctxt->retire.flags.mov_ss = 1;
- dst.type = OP_NONE;
- break;
- }
-
- case 0x8d: /* lea */
- dst.val = ea.mem.off;
- break;
-
- case 0x8f: /* pop (sole member of Grp1a) */
- generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1);
- /* 64-bit mode: POP defaults to a 64-bit operand. */
- if ( mode_64bit() && (dst.bytes == 4) )
- dst.bytes = 8;
- if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
- &dst.val, dst.bytes, ctxt, ops)) != 0 )
- goto done;
- break;
-
- case 0xb0 ... 0xb7: /* mov imm8,r8 */
- dst.reg = decode_register(
- (b & 7) | ((rex_prefix & 1) << 3), &_regs, (rex_prefix == 0));
- dst.val = src.val;
- break;
-
- case 0xb8 ... 0xbf: /* mov imm{16,32,64},r{16,32,64} */
- if ( dst.bytes == 8 ) /* Fetch more bytes to obtain imm64 */
- src.val = ((uint32_t)src.val |
- ((uint64_t)insn_fetch_type(uint32_t) << 32));
- dst.reg = decode_register(
- (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0);
- dst.val = src.val;
- break;
-
- case 0xc0 ... 0xc1: grp2: /* Grp2 */
- switch ( modrm_reg & 7 )
- {
- case 0: /* rol */
- emulate_2op_SrcB("rol", src, dst, _regs.eflags);
- break;
- case 1: /* ror */
- emulate_2op_SrcB("ror", src, dst, _regs.eflags);
- break;
- case 2: /* rcl */
- emulate_2op_SrcB("rcl", src, dst, _regs.eflags);
- break;
- case 3: /* rcr */
- emulate_2op_SrcB("rcr", src, dst, _regs.eflags);
- break;
- case 4: /* sal/shl */
- case 6: /* sal/shl */
- emulate_2op_SrcB("sal", src, dst, _regs.eflags);
- break;
- case 5: /* shr */
- emulate_2op_SrcB("shr", src, dst, _regs.eflags);
- break;
- case 7: /* sar */
- emulate_2op_SrcB("sar", src, dst, _regs.eflags);
- break;
- }
- break;
-
- case 0xc4: /* les */ {
- unsigned long sel;
- dst.val = x86_seg_es;
- les: /* dst.val identifies the segment */
- generate_exception_if(src.type != OP_MEM, EXC_UD, -1);
- if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
- &sel, 2, ctxt, ops)) != 0 )
- goto done;
- if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 )
- goto done;
- dst.val = src.val;
- break;
- }
-
- case 0xc5: /* lds */
- dst.val = x86_seg_ds;
- goto les;
-
- case 0xd0 ... 0xd1: /* Grp2 */
- src.val = 1;
- goto grp2;
-
- case 0xd2 ... 0xd3: /* Grp2 */
- src.val = _regs.ecx;
- goto grp2;
-
- case 0xf6 ... 0xf7: /* Grp3 */
- switch ( modrm_reg & 7 )
- {
- case 0 ... 1: /* test */
- /* Special case in Grp3: test has an immediate source operand. */
- src.type = OP_IMM;
- src.bytes = (d & ByteOp) ? 1 : op_bytes;
- if ( src.bytes == 8 ) src.bytes = 4;
- switch ( src.bytes )
- {
- case 1: src.val = insn_fetch_type(int8_t); break;
- case 2: src.val = insn_fetch_type(int16_t); break;
- case 4: src.val = insn_fetch_type(int32_t); break;
- }
- goto test;
- case 2: /* not */
- dst.val = ~dst.val;
- break;
- case 3: /* neg */
- emulate_1op("neg", dst, _regs.eflags);
- break;
- case 4: /* mul */
- src = dst;
- dst.type = OP_REG;
- dst.reg = (unsigned long *)&_regs.eax;
- dst.val = *dst.reg;
- _regs.eflags &= ~(EFLG_OF|EFLG_CF);
- switch ( src.bytes )
- {
- case 1:
- dst.val = (uint8_t)dst.val;
- dst.val *= src.val;
- if ( (uint8_t)dst.val != (uint16_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- dst.bytes = 2;
- break;
- case 2:
- dst.val = (uint16_t)dst.val;
- dst.val *= src.val;
- if ( (uint16_t)dst.val != (uint32_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- *(uint16_t *)&_regs.edx = dst.val >> 16;
- break;
-#ifdef __x86_64__
- case 4:
- dst.val = (uint32_t)dst.val;
- dst.val *= src.val;
- if ( (uint32_t)dst.val != dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- _regs.edx = (uint32_t)(dst.val >> 32);
- break;
-#endif
- default: {
- unsigned long m[2] = { src.val, dst.val };
- if ( mul_dbl(m) )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- _regs.edx = m[1];
- dst.val = m[0];
- break;
- }
- }
- break;
- case 5: /* imul */
- src = dst;
- dst.type = OP_REG;
- dst.reg = (unsigned long *)&_regs.eax;
- dst.val = *dst.reg;
- _regs.eflags &= ~(EFLG_OF|EFLG_CF);
- switch ( src.bytes )
- {
- case 1:
- dst.val = ((uint16_t)(int8_t)src.val *
- (uint16_t)(int8_t)dst.val);
- if ( (int8_t)dst.val != (uint16_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- dst.bytes = 2;
- break;
- case 2:
- dst.val = ((uint32_t)(int16_t)src.val *
- (uint32_t)(int16_t)dst.val);
- if ( (int16_t)dst.val != (uint32_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- *(uint16_t *)&_regs.edx = dst.val >> 16;
- break;
-#ifdef __x86_64__
- case 4:
- dst.val = ((uint64_t)(int32_t)src.val *
- (uint64_t)(int32_t)dst.val);
- if ( (int32_t)dst.val != dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- _regs.edx = (uint32_t)(dst.val >> 32);
- break;
-#endif
- default: {
- unsigned long m[2] = { src.val, dst.val };
- if ( imul_dbl(m) )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- _regs.edx = m[1];
- dst.val = m[0];
- break;
- }
- }
- break;
- case 6: /* div */ {
- unsigned long u[2], v;
- src = dst;
- dst.type = OP_REG;
- dst.reg = (unsigned long *)&_regs.eax;
- switch ( src.bytes )
- {
- case 1:
- u[0] = (uint16_t)_regs.eax;
- u[1] = 0;
- v = (uint8_t)src.val;
- generate_exception_if(
- div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]),
- EXC_DE, -1);
- dst.val = (uint8_t)u[0];
- ((uint8_t *)&_regs.eax)[1] = u[1];
- break;
- case 2:
- u[0] = ((uint32_t)_regs.edx << 16) | (uint16_t)_regs.eax;
- u[1] = 0;
- v = (uint16_t)src.val;
- generate_exception_if(
- div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]),
- EXC_DE, -1);
- dst.val = (uint16_t)u[0];
- *(uint16_t *)&_regs.edx = u[1];
- break;
-#ifdef __x86_64__
- case 4:
- u[0] = (_regs.edx << 32) | (uint32_t)_regs.eax;
- u[1] = 0;
- v = (uint32_t)src.val;
- generate_exception_if(
- div_dbl(u, v) || ((uint32_t)u[0] != u[0]),
- EXC_DE, -1);
- dst.val = (uint32_t)u[0];
- _regs.edx = (uint32_t)u[1];
- break;
-#endif
- default:
- u[0] = _regs.eax;
- u[1] = _regs.edx;
- v = src.val;
- generate_exception_if(div_dbl(u, v), EXC_DE, -1);
- dst.val = u[0];
- _regs.edx = u[1];
- break;
- }
- break;
- }
- case 7: /* idiv */ {
- unsigned long u[2], v;
- src = dst;
- dst.type = OP_REG;
- dst.reg = (unsigned long *)&_regs.eax;
- switch ( src.bytes )
- {
- case 1:
- u[0] = (int16_t)_regs.eax;
- u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
- v = (int8_t)src.val;
- generate_exception_if(
- idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]),
- EXC_DE, -1);
- dst.val = (int8_t)u[0];
- ((int8_t *)&_regs.eax)[1] = u[1];
- break;
- case 2:
- u[0] = (int32_t)((_regs.edx << 16) | (uint16_t)_regs.eax);
- u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
- v = (int16_t)src.val;
- generate_exception_if(
- idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]),
- EXC_DE, -1);
- dst.val = (int16_t)u[0];
- *(int16_t *)&_regs.edx = u[1];
- break;
-#ifdef __x86_64__
- case 4:
- u[0] = (_regs.edx << 32) | (uint32_t)_regs.eax;
- u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
- v = (int32_t)src.val;
- generate_exception_if(
- idiv_dbl(u, v) || ((int32_t)u[0] != u[0]),
- EXC_DE, -1);
- dst.val = (int32_t)u[0];
- _regs.edx = (uint32_t)u[1];
- break;
-#endif
- default:
- u[0] = _regs.eax;
- u[1] = _regs.edx;
- v = src.val;
- generate_exception_if(idiv_dbl(u, v), EXC_DE, -1);
- dst.val = u[0];
- _regs.edx = u[1];
- break;
- }
- break;
- }
- default:
- goto cannot_emulate;
- }
- break;
-
- case 0xfe: /* Grp4 */
- generate_exception_if((modrm_reg & 7) >= 2, EXC_UD, -1);
- case 0xff: /* Grp5 */
- switch ( modrm_reg & 7 )
- {
- case 0: /* inc */
- emulate_1op("inc", dst, _regs.eflags);
- break;
- case 1: /* dec */
- emulate_1op("dec", dst, _regs.eflags);
- break;
- case 2: /* call (near) */
- case 4: /* jmp (near) */
- if ( (dst.bytes != 8) && mode_64bit() )
- {
- dst.bytes = op_bytes = 8;
- if ( dst.type == OP_REG )
- dst.val = *dst.reg;
- else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
- &dst.val, 8, ctxt, ops)) != 0 )
- goto done;
- }
- src.val = _regs.eip;
- _regs.eip = dst.val;
- if ( (modrm_reg & 7) == 2 )
- goto push; /* call */
- dst.type = OP_NONE;
- break;
- case 3: /* call (far, absolute indirect) */
- case 5: /* jmp (far, absolute indirect) */ {
- unsigned long sel;
-
- generate_exception_if(dst.type != OP_MEM, EXC_UD, -1);
-
- if ( (rc = read_ulong(dst.mem.seg, dst.mem.off+dst.bytes,
- &sel, 2, ctxt, ops)) )
- goto done;
-
- if ( (modrm_reg & 7) == 3 ) /* call */
- {
- struct segment_register reg;
- fail_if(ops->read_segment == NULL);
- if ( (rc = ops->read_segment(x86_seg_cs, &reg, ctxt)) ||
- (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
- &reg.sel, op_bytes, ctxt)) ||
- (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
- &_regs.eip, op_bytes, ctxt)) )
- goto done;
- }
-
- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
- goto done;
- _regs.eip = dst.val;
-
- dst.type = OP_NONE;
- break;
- }
- case 6: /* push */
- /* 64-bit mode: PUSH defaults to a 64-bit operand. */
- if ( mode_64bit() && (dst.bytes == 4) )
- {
- dst.bytes = 8;
- if ( dst.type == OP_REG )
- dst.val = *dst.reg;
- else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
- &dst.val, 8, ctxt, ops)) != 0 )
- goto done;
- }
- if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
- &dst.val, dst.bytes, ctxt)) != 0 )
- goto done;
- dst.type = OP_NONE;
- break;
- case 7:
- generate_exception_if(1, EXC_UD, -1);
- default:
- goto cannot_emulate;
- }
- break;
- }
-
- writeback:
- switch ( dst.type )
- {
- case OP_REG:
- /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
- switch ( dst.bytes )
- {
- case 1: *(uint8_t *)dst.reg = (uint8_t)dst.val; break;
- case 2: *(uint16_t *)dst.reg = (uint16_t)dst.val; break;
- case 4: *dst.reg = (uint32_t)dst.val; break; /* 64b: zero-ext */
- case 8: *dst.reg = dst.val; break;
- }
- break;
- case OP_MEM:
- if ( !(d & Mov) && (dst.orig_val == dst.val) &&
- !ctxt->force_writeback )
- /* nothing to do */;
- else if ( lock_prefix )
- rc = ops->cmpxchg(
- dst.mem.seg, dst.mem.off, &dst.orig_val,
- &dst.val, dst.bytes, ctxt);
- else
- rc = ops->write(
- dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, ctxt);
- if ( rc != 0 )
- goto done;
- default:
- break;
- }
-
- /* Inject #DB if single-step tracing was enabled at instruction start. */
- if ( (ctxt->regs->eflags & EFLG_TF) && (rc == X86EMUL_OKAY) &&
- (ops->inject_hw_exception != NULL) )
- rc = ops->inject_hw_exception(EXC_DB, -1, ctxt) ? : X86EMUL_EXCEPTION;
-
- /* Commit shadow register state. */
- _regs.eflags &= ~EFLG_RF;
- *ctxt->regs = _regs;
-
- done:
- return rc;
-
- special_insn:
- dst.type = OP_NONE;
-
- /*
- * The only implicit-operands instructions allowed a LOCK prefix are
- * CMPXCHG{8,16}B, MOV CRn, MOV DRn.
- */
- generate_exception_if(lock_prefix &&
- ((b < 0x20) || (b > 0x23)) && /* MOV CRn/DRn */
- (b != 0xc7), /* CMPXCHG{8,16}B */
- EXC_GP, 0);
-
- if ( twobyte )
- goto twobyte_special_insn;
-
- switch ( b )
- {
case 0x06: /* push %%es */ {
struct segment_register reg;
src.val = x86_seg_es;
@@ -2370,12 +1804,90 @@ x86_emulate(
break;
}
+ case 0x62: /* bound */ {
+ unsigned long src_val2;
+ int lb, ub, idx;
+ generate_exception_if(mode_64bit() || (src.type != OP_MEM),
+ EXC_UD, -1);
+ if ( (rc = read_ulong(src.mem.seg, src.mem.off + op_bytes,
+ &src_val2, op_bytes, ctxt, ops)) )
+ goto done;
+ ub = (op_bytes == 2) ? (int16_t)src_val2 : (int32_t)src_val2;
+ lb = (op_bytes == 2) ? (int16_t)src.val : (int32_t)src.val;
+ idx = (op_bytes == 2) ? (int16_t)dst.val : (int32_t)dst.val;
+ generate_exception_if((idx < lb) || (idx > ub), EXC_BR, -1);
+ dst.type = OP_NONE;
+ break;
+ }
+
+ case 0x63: /* movsxd (x86/64) / arpl (x86/32) */
+ if ( mode_64bit() )
+ {
+ /* movsxd */
+ if ( src.type == OP_REG )
+ src.val = *(int32_t *)src.reg;
+ else if ( (rc = read_ulong(src.mem.seg, src.mem.off,
+ &src.val, 4, ctxt, ops)) )
+ goto done;
+ dst.val = (int32_t)src.val;
+ }
+ else
+ {
+ /* arpl */
+ uint16_t src_val = dst.val;
+ dst = src;
+ _regs.eflags &= ~EFLG_ZF;
+ _regs.eflags |= ((src_val & 3) > (dst.val & 3)) ? EFLG_ZF : 0;
+ if ( _regs.eflags & EFLG_ZF )
+ dst.val = (dst.val & ~3) | (src_val & 3);
+ else
+ dst.type = OP_NONE;
+ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1);
+ }
+ break;
+
case 0x68: /* push imm{16,32,64} */
src.val = ((op_bytes == 2)
? (int32_t)insn_fetch_type(int16_t)
: insn_fetch_type(int32_t));
goto push;
+ case 0x69: /* imul imm16/32 */
+ case 0x6b: /* imul imm8 */ {
+ unsigned long src1; /* ModR/M source operand */
+ if ( ea.type == OP_REG )
+ src1 = *ea.reg;
+ else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off,
+ &src1, op_bytes, ctxt, ops)) )
+ goto done;
+ _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+ switch ( dst.bytes )
+ {
+ case 2:
+ dst.val = ((uint32_t)(int16_t)src.val *
+ (uint32_t)(int16_t)src1);
+ if ( (int16_t)dst.val != (uint32_t)dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ break;
+#ifdef __x86_64__
+ case 4:
+ dst.val = ((uint64_t)(int32_t)src.val *
+ (uint64_t)(int32_t)src1);
+ if ( (int32_t)dst.val != dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ break;
+#endif
+ default: {
+ unsigned long m[2] = { src.val, src1 };
+ if ( imul_dbl(m) )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ dst.val = m[0];
+ break;
+ }
+ }
+ break;
+ }
+
case 0x6a: /* push imm8 */
src.val = insn_fetch_type(int8_t);
push:
@@ -2457,6 +1969,88 @@ x86_emulate(
break;
}
+ case 0x82: /* Grp1 (x86/32 only) */
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
+ case 0x80: case 0x81: case 0x83: /* Grp1 */
+ switch ( modrm_reg & 7 )
+ {
+ case 0: goto add;
+ case 1: goto or;
+ case 2: goto adc;
+ case 3: goto sbb;
+ case 4: goto and;
+ case 5: goto sub;
+ case 6: goto xor;
+ case 7: goto cmp;
+ }
+ break;
+
+ case 0xa8 ... 0xa9: /* test imm,%%eax */
+ dst.reg = (unsigned long *)&_regs.eax;
+ dst.val = _regs.eax;
+ case 0x84 ... 0x85: test: /* test */
+ emulate_2op_SrcV("test", src, dst, _regs.eflags);
+ dst.type = OP_NONE;
+ break;
+
+ case 0x86 ... 0x87: xchg: /* xchg */
+ /* Write back the register source. */
+ switch ( dst.bytes )
+ {
+ case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break;
+ case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break;
+ case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */
+ case 8: *src.reg = dst.val; break;
+ }
+ /* Write back the memory destination with implicit LOCK prefix. */
+ dst.val = src.val;
+ lock_prefix = 1;
+ break;
+
+ case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
+ generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1);
+ case 0x88 ... 0x8b: /* mov */
+ dst.val = src.val;
+ break;
+
+ case 0x8c: /* mov Sreg,r/m */ {
+ struct segment_register reg;
+ enum x86_segment seg = decode_segment(modrm_reg);
+ generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
+ fail_if(ops->read_segment == NULL);
+ if ( (rc = ops->read_segment(seg, &reg, ctxt)) != 0 )
+ goto done;
+ dst.val = reg.sel;
+ if ( dst.type == OP_MEM )
+ dst.bytes = 2;
+ break;
+ }
+
+ case 0x8e: /* mov r/m,Sreg */ {
+ enum x86_segment seg = decode_segment(modrm_reg);
+ generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
+ if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 )
+ goto done;
+ if ( seg == x86_seg_ss )
+ ctxt->retire.flags.mov_ss = 1;
+ dst.type = OP_NONE;
+ break;
+ }
+
+ case 0x8d: /* lea */
+ dst.val = ea.mem.off;
+ break;
+
+ case 0x8f: /* pop (sole member of Grp1a) */
+ generate_exception_if((modrm_reg & 7) != 0, EXC_UD, -1);
+ /* 64-bit mode: POP defaults to a 64-bit operand. */
+ if ( mode_64bit() && (dst.bytes == 4) )
+ dst.bytes = 8;
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(dst.bytes),
+ &dst.val, dst.bytes, ctxt, ops)) != 0 )
+ goto done;
+ break;
+
case 0x90: /* nop / xchg %%r8,%%rax */
if ( !(rex_prefix & 1) )
break; /* nop */
@@ -2673,6 +2267,49 @@ x86_emulate(
break;
}
+ case 0xb0 ... 0xb7: /* mov imm8,r8 */
+ dst.reg = decode_register(
+ (b & 7) | ((rex_prefix & 1) << 3), &_regs, (rex_prefix == 0));
+ dst.val = src.val;
+ break;
+
+ case 0xb8 ... 0xbf: /* mov imm{16,32,64},r{16,32,64} */
+ if ( dst.bytes == 8 ) /* Fetch more bytes to obtain imm64 */
+ src.val = ((uint32_t)src.val |
+ ((uint64_t)insn_fetch_type(uint32_t) << 32));
+ dst.reg = decode_register(
+ (b & 7) | ((rex_prefix & 1) << 3), &_regs, 0);
+ dst.val = src.val;
+ break;
+
+ case 0xc0 ... 0xc1: grp2: /* Grp2 */
+ switch ( modrm_reg & 7 )
+ {
+ case 0: /* rol */
+ emulate_2op_SrcB("rol", src, dst, _regs.eflags);
+ break;
+ case 1: /* ror */
+ emulate_2op_SrcB("ror", src, dst, _regs.eflags);
+ break;
+ case 2: /* rcl */
+ emulate_2op_SrcB("rcl", src, dst, _regs.eflags);
+ break;
+ case 3: /* rcr */
+ emulate_2op_SrcB("rcr", src, dst, _regs.eflags);
+ break;
+ case 4: /* sal/shl */
+ case 6: /* sal/shl */
+ emulate_2op_SrcB("sal", src, dst, _regs.eflags);
+ break;
+ case 5: /* shr */
+ emulate_2op_SrcB("shr", src, dst, _regs.eflags);
+ break;
+ case 7: /* sar */
+ emulate_2op_SrcB("sar", src, dst, _regs.eflags);
+ break;
+ }
+ break;
+
case 0xc2: /* ret imm16 (near) */
case 0xc3: /* ret (near) */ {
int offset = (b == 0xc2) ? insn_fetch_type(uint16_t) : 0;
@@ -2684,6 +2321,24 @@ x86_emulate(
break;
}
+ case 0xc4: /* les */ {
+ unsigned long sel;
+ dst.val = x86_seg_es;
+ les: /* dst.val identifies the segment */
+ generate_exception_if(src.type != OP_MEM, EXC_UD, -1);
+ if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
+ &sel, 2, ctxt, ops)) != 0 )
+ goto done;
+ if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 )
+ goto done;
+ dst.val = src.val;
+ break;
+ }
+
+ case 0xc5: /* lds */
+ dst.val = x86_seg_ds;
+ goto les;
+
case 0xc8: /* enter imm16,imm8 */ {
uint16_t size = insn_fetch_type(uint16_t);
uint8_t depth = insn_fetch_type(uint8_t) & 31;
@@ -2800,6 +2455,14 @@ x86_emulate(
break;
}
+ case 0xd0 ... 0xd1: /* Grp2 */
+ src.val = 1;
+ goto grp2;
+
+ case 0xd2 ... 0xd3: /* Grp2 */
+ src.val = _regs.ecx;
+ goto grp2;
+
case 0xd4: /* aam */ {
unsigned int base = insn_fetch_type(uint8_t);
uint8_t al = _regs.eax;
@@ -3436,6 +3099,214 @@ x86_emulate(
_regs.eflags ^= EFLG_CF;
break;
+ case 0xf6 ... 0xf7: /* Grp3 */
+ switch ( modrm_reg & 7 )
+ {
+ case 0 ... 1: /* test */
+ /* Special case in Grp3: test has an immediate source operand. */
+ src.type = OP_IMM;
+ src.bytes = (d & ByteOp) ? 1 : op_bytes;
+ if ( src.bytes == 8 ) src.bytes = 4;
+ switch ( src.bytes )
+ {
+ case 1: src.val = insn_fetch_type(int8_t); break;
+ case 2: src.val = insn_fetch_type(int16_t); break;
+ case 4: src.val = insn_fetch_type(int32_t); break;
+ }
+ goto test;
+ case 2: /* not */
+ dst.val = ~dst.val;
+ break;
+ case 3: /* neg */
+ emulate_1op("neg", dst, _regs.eflags);
+ break;
+ case 4: /* mul */
+ src = dst;
+ dst.type = OP_REG;
+ dst.reg = (unsigned long *)&_regs.eax;
+ dst.val = *dst.reg;
+ _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+ switch ( src.bytes )
+ {
+ case 1:
+ dst.val = (uint8_t)dst.val;
+ dst.val *= src.val;
+ if ( (uint8_t)dst.val != (uint16_t)dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ dst.bytes = 2;
+ break;
+ case 2:
+ dst.val = (uint16_t)dst.val;
+ dst.val *= src.val;
+ if ( (uint16_t)dst.val != (uint32_t)dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ *(uint16_t *)&_regs.edx = dst.val >> 16;
+ break;
+#ifdef __x86_64__
+ case 4:
+ dst.val = (uint32_t)dst.val;
+ dst.val *= src.val;
+ if ( (uint32_t)dst.val != dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs.edx = (uint32_t)(dst.val >> 32);
+ break;
+#endif
+ default: {
+ unsigned long m[2] = { src.val, dst.val };
+ if ( mul_dbl(m) )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs.edx = m[1];
+ dst.val = m[0];
+ break;
+ }
+ }
+ break;
+ case 5: /* imul */
+ src = dst;
+ dst.type = OP_REG;
+ dst.reg = (unsigned long *)&_regs.eax;
+ dst.val = *dst.reg;
+ _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+ switch ( src.bytes )
+ {
+ case 1:
+ dst.val = ((uint16_t)(int8_t)src.val *
+ (uint16_t)(int8_t)dst.val);
+ if ( (int8_t)dst.val != (uint16_t)dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ dst.bytes = 2;
+ break;
+ case 2:
+ dst.val = ((uint32_t)(int16_t)src.val *
+ (uint32_t)(int16_t)dst.val);
+ if ( (int16_t)dst.val != (uint32_t)dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ *(uint16_t *)&_regs.edx = dst.val >> 16;
+ break;
+#ifdef __x86_64__
+ case 4:
+ dst.val = ((uint64_t)(int32_t)src.val *
+ (uint64_t)(int32_t)dst.val);
+ if ( (int32_t)dst.val != dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs.edx = (uint32_t)(dst.val >> 32);
+ break;
+#endif
+ default: {
+ unsigned long m[2] = { src.val, dst.val };
+ if ( imul_dbl(m) )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs.edx = m[1];
+ dst.val = m[0];
+ break;
+ }
+ }
+ break;
+ case 6: /* div */ {
+ unsigned long u[2], v;
+ src = dst;
+ dst.type = OP_REG;
+ dst.reg = (unsigned long *)&_regs.eax;
+ switch ( src.bytes )
+ {
+ case 1:
+ u[0] = (uint16_t)_regs.eax;
+ u[1] = 0;
+ v = (uint8_t)src.val;
+ generate_exception_if(
+ div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]),
+ EXC_DE, -1);
+ dst.val = (uint8_t)u[0];
+ ((uint8_t *)&_regs.eax)[1] = u[1];
+ break;
+ case 2:
+ u[0] = ((uint32_t)_regs.edx << 16) | (uint16_t)_regs.eax;
+ u[1] = 0;
+ v = (uint16_t)src.val;
+ generate_exception_if(
+ div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]),
+ EXC_DE, -1);
+ dst.val = (uint16_t)u[0];
+ *(uint16_t *)&_regs.edx = u[1];
+ break;
+#ifdef __x86_64__
+ case 4:
+ u[0] = (_regs.edx << 32) | (uint32_t)_regs.eax;
+ u[1] = 0;
+ v = (uint32_t)src.val;
+ generate_exception_if(
+ div_dbl(u, v) || ((uint32_t)u[0] != u[0]),
+ EXC_DE, -1);
+ dst.val = (uint32_t)u[0];
+ _regs.edx = (uint32_t)u[1];
+ break;
+#endif
+ default:
+ u[0] = _regs.eax;
+ u[1] = _regs.edx;
+ v = src.val;
+ generate_exception_if(div_dbl(u, v), EXC_DE, -1);
+ dst.val = u[0];
+ _regs.edx = u[1];
+ break;
+ }
+ break;
+ }
+ case 7: /* idiv */ {
+ unsigned long u[2], v;
+ src = dst;
+ dst.type = OP_REG;
+ dst.reg = (unsigned long *)&_regs.eax;
+ switch ( src.bytes )
+ {
+ case 1:
+ u[0] = (int16_t)_regs.eax;
+ u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
+ v = (int8_t)src.val;
+ generate_exception_if(
+ idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]),
+ EXC_DE, -1);
+ dst.val = (int8_t)u[0];
+ ((int8_t *)&_regs.eax)[1] = u[1];
+ break;
+ case 2:
+ u[0] = (int32_t)((_regs.edx << 16) | (uint16_t)_regs.eax);
+ u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
+ v = (int16_t)src.val;
+ generate_exception_if(
+ idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]),
+ EXC_DE, -1);
+ dst.val = (int16_t)u[0];
+ *(int16_t *)&_regs.edx = u[1];
+ break;
+#ifdef __x86_64__
+ case 4:
+ u[0] = (_regs.edx << 32) | (uint32_t)_regs.eax;
+ u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
+ v = (int32_t)src.val;
+ generate_exception_if(
+ idiv_dbl(u, v) || ((int32_t)u[0] != u[0]),
+ EXC_DE, -1);
+ dst.val = (int32_t)u[0];
+ _regs.edx = (uint32_t)u[1];
+ break;
+#endif
+ default:
+ u[0] = _regs.eax;
+ u[1] = _regs.edx;
+ v = src.val;
+ generate_exception_if(idiv_dbl(u, v), EXC_DE, -1);
+ dst.val = u[0];
+ _regs.edx = u[1];
+ break;
+ }
+ break;
+ }
+ default:
+ goto cannot_emulate;
+ }
+ break;
+
case 0xf8: /* clc */
_regs.eflags &= ~EFLG_CF;
break;
@@ -3465,201 +3336,131 @@ x86_emulate(
case 0xfd: /* std */
_regs.eflags |= EFLG_DF;
break;
- }
- goto writeback;
-
- twobyte_insn:
- switch ( b )
- {
- case 0x40 ... 0x4f: /* cmovcc */
- dst.val = src.val;
- if ( !test_cc(b, _regs.eflags) )
- dst.type = OP_NONE;
- break;
-
- case 0x90 ... 0x9f: /* setcc */
- dst.val = test_cc(b, _regs.eflags);
- break;
-
- case 0xb0 ... 0xb1: /* cmpxchg */
- /* Save real source value, then compare EAX against destination. */
- src.orig_val = src.val;
- src.val = _regs.eax;
- emulate_2op_SrcV("cmp", src, dst, _regs.eflags);
- if ( _regs.eflags & EFLG_ZF )
- {
- /* Success: write back to memory. */
- dst.val = src.orig_val;
- }
- else
- {
- /* Failure: write the value we saw to EAX. */
- dst.type = OP_REG;
- dst.reg = (unsigned long *)&_regs.eax;
- }
- break;
-
- case 0xa3: bt: /* bt */
- emulate_2op_SrcV_nobyte("bt", src, dst, _regs.eflags);
- dst.type = OP_NONE;
- break;
-
- case 0xa4: /* shld imm8,r,r/m */
- case 0xa5: /* shld %%cl,r,r/m */
- case 0xac: /* shrd imm8,r,r/m */
- case 0xad: /* shrd %%cl,r,r/m */ {
- uint8_t shift, width = dst.bytes << 3;
- shift = (b & 1) ? (uint8_t)_regs.ecx : insn_fetch_type(uint8_t);
- if ( (shift &= width - 1) == 0 )
- break;
- dst.orig_val = truncate_word(dst.val, dst.bytes);
- dst.val = ((shift == width) ? src.val :
- (b & 8) ?
- /* shrd */
- ((dst.orig_val >> shift) |
- truncate_word(src.val << (width - shift), dst.bytes)) :
- /* shld */
- ((dst.orig_val << shift) |
- ((src.val >> (width - shift)) & ((1ull << shift) - 1))));
- dst.val = truncate_word(dst.val, dst.bytes);
- _regs.eflags &= ~(EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_PF|EFLG_CF);
- if ( (dst.val >> ((b & 8) ? (shift - 1) : (width - shift))) & 1 )
- _regs.eflags |= EFLG_CF;
- if ( ((dst.val ^ dst.orig_val) >> (width - 1)) & 1 )
- _regs.eflags |= EFLG_OF;
- _regs.eflags |= ((dst.val >> (width - 1)) & 1) ? EFLG_SF : 0;
- _regs.eflags |= (dst.val == 0) ? EFLG_ZF : 0;
- _regs.eflags |= even_parity(dst.val) ? EFLG_PF : 0;
- break;
- }
-
- case 0xb3: btr: /* btr */
- emulate_2op_SrcV_nobyte("btr", src, dst, _regs.eflags);
- break;
-
- case 0xab: bts: /* bts */
- emulate_2op_SrcV_nobyte("bts", src, dst, _regs.eflags);
- break;
- case 0xaf: /* imul */
- _regs.eflags &= ~(EFLG_OF|EFLG_CF);
- switch ( dst.bytes )
+ case 0xfe: /* Grp4 */
+ generate_exception_if((modrm_reg & 7) >= 2, EXC_UD, -1);
+ case 0xff: /* Grp5 */
+ switch ( modrm_reg & 7 )
{
- case 2:
- dst.val = ((uint32_t)(int16_t)src.val *
- (uint32_t)(int16_t)dst.val);
- if ( (int16_t)dst.val != (uint32_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ case 0: /* inc */
+ emulate_1op("inc", dst, _regs.eflags);
break;
-#ifdef __x86_64__
- case 4:
- dst.val = ((uint64_t)(int32_t)src.val *
- (uint64_t)(int32_t)dst.val);
- if ( (int32_t)dst.val != dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ case 1: /* dec */
+ emulate_1op("dec", dst, _regs.eflags);
break;
-#endif
- default: {
- unsigned long m[2] = { src.val, dst.val };
- if ( imul_dbl(m) )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- dst.val = m[0];
+ case 2: /* call (near) */
+ case 4: /* jmp (near) */
+ if ( (dst.bytes != 8) && mode_64bit() )
+ {
+ dst.bytes = op_bytes = 8;
+ if ( dst.type == OP_REG )
+ dst.val = *dst.reg;
+ else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
+ &dst.val, 8, ctxt, ops)) != 0 )
+ goto done;
+ }
+ src.val = _regs.eip;
+ _regs.eip = dst.val;
+ if ( (modrm_reg & 7) == 2 )
+ goto push; /* call */
+ dst.type = OP_NONE;
break;
- }
- }
- break;
+ case 3: /* call (far, absolute indirect) */
+ case 5: /* jmp (far, absolute indirect) */ {
+ unsigned long sel;
- case 0xb2: /* lss */
- dst.val = x86_seg_ss;
- goto les;
+ generate_exception_if(dst.type != OP_MEM, EXC_UD, -1);
- case 0xb4: /* lfs */
- dst.val = x86_seg_fs;
- goto les;
+ if ( (rc = read_ulong(dst.mem.seg, dst.mem.off+dst.bytes,
+ &sel, 2, ctxt, ops)) )
+ goto done;
- case 0xb5: /* lgs */
- dst.val = x86_seg_gs;
- goto les;
+ if ( (modrm_reg & 7) == 3 ) /* call */
+ {
+ struct segment_register reg;
+ fail_if(ops->read_segment == NULL);
+ if ( (rc = ops->read_segment(x86_seg_cs, &reg, ctxt)) ||
+ (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
+ &reg.sel, op_bytes, ctxt)) ||
+ (rc = ops->write(x86_seg_ss, sp_pre_dec(op_bytes),
+ &_regs.eip, op_bytes, ctxt)) )
+ goto done;
+ }
- case 0xb6: /* movzx rm8,r{16,32,64} */
- /* Recompute DstReg as we may have decoded AH/BH/CH/DH. */
- dst.reg = decode_register(modrm_reg, &_regs, 0);
- dst.bytes = op_bytes;
- dst.val = (uint8_t)src.val;
- break;
+ if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = dst.val;
- case 0xbc: /* bsf */ {
- int zf;
- asm ( "bsf %2,%0; setz %b1"
- : "=r" (dst.val), "=q" (zf)
- : "r" (src.val), "1" (0) );
- _regs.eflags &= ~EFLG_ZF;
- if ( zf )
- {
- _regs.eflags |= EFLG_ZF;
dst.type = OP_NONE;
+ break;
}
- break;
- }
-
- case 0xbd: /* bsr */ {
- int zf;
- asm ( "bsr %2,%0; setz %b1"
- : "=r" (dst.val), "=q" (zf)
- : "r" (src.val), "1" (0) );
- _regs.eflags &= ~EFLG_ZF;
- if ( zf )
- {
- _regs.eflags |= EFLG_ZF;
+ case 6: /* push */
+ /* 64-bit mode: PUSH defaults to a 64-bit operand. */
+ if ( mode_64bit() && (dst.bytes == 4) )
+ {
+ dst.bytes = 8;
+ if ( dst.type == OP_REG )
+ dst.val = *dst.reg;
+ else if ( (rc = read_ulong(dst.mem.seg, dst.mem.off,
+ &dst.val, 8, ctxt, ops)) != 0 )
+ goto done;
+ }
+ if ( (rc = ops->write(x86_seg_ss, sp_pre_dec(dst.bytes),
+ &dst.val, dst.bytes, ctxt)) != 0 )
+ goto done;
dst.type = OP_NONE;
+ break;
+ case 7:
+ generate_exception_if(1, EXC_UD, -1);
+ default:
+ goto cannot_emulate;
}
break;
}
- case 0xb7: /* movzx rm16,r{16,32,64} */
- dst.val = (uint16_t)src.val;
- break;
-
- case 0xbb: btc: /* btc */
- emulate_2op_SrcV_nobyte("btc", src, dst, _regs.eflags);
- break;
-
- case 0xba: /* Grp8 */
- switch ( modrm_reg & 7 )
+ writeback:
+ switch ( dst.type )
+ {
+ case OP_REG:
+ /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
+ switch ( dst.bytes )
{
- case 4: goto bt;
- case 5: goto bts;
- case 6: goto btr;
- case 7: goto btc;
- default: generate_exception_if(1, EXC_UD, -1);
+ case 1: *(uint8_t *)dst.reg = (uint8_t)dst.val; break;
+ case 2: *(uint16_t *)dst.reg = (uint16_t)dst.val; break;
+ case 4: *dst.reg = (uint32_t)dst.val; break; /* 64b: zero-ext */
+ case 8: *dst.reg = dst.val; break;
}
break;
-
- case 0xbe: /* movsx rm8,r{16,32,64} */
- /* Recompute DstReg as we may have decoded AH/BH/CH/DH. */
- dst.reg = decode_register(modrm_reg, &_regs, 0);
- dst.bytes = op_bytes;
- dst.val = (int8_t)src.val;
+ case OP_MEM:
+ if ( !(d & Mov) && (dst.orig_val == dst.val) &&
+ !ctxt->force_writeback )
+ /* nothing to do */;
+ else if ( lock_prefix )
+ rc = ops->cmpxchg(
+ dst.mem.seg, dst.mem.off, &dst.orig_val,
+ &dst.val, dst.bytes, ctxt);
+ else
+ rc = ops->write(
+ dst.mem.seg, dst.mem.off, &dst.val, dst.bytes, ctxt);
+ if ( rc != 0 )
+ goto done;
+ default:
break;
+ }
- case 0xbf: /* movsx rm16,r{16,32,64} */
- dst.val = (int16_t)src.val;
- break;
+ /* Inject #DB if single-step tracing was enabled at instruction start. */
+ if ( (ctxt->regs->eflags & EFLG_TF) && (rc == X86EMUL_OKAY) &&
+ (ops->inject_hw_exception != NULL) )
+ rc = ops->inject_hw_exception(EXC_DB, -1, ctxt) ? : X86EMUL_EXCEPTION;
- case 0xc0 ... 0xc1: /* xadd */
- /* Write back the register source. */
- switch ( dst.bytes )
- {
- case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break;
- case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break;
- case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */
- case 8: *src.reg = dst.val; break;
- }
- goto add;
- }
- goto writeback;
+ /* Commit shadow register state. */
+ _regs.eflags &= ~EFLG_RF;
+ *ctxt->regs = _regs;
- twobyte_special_insn:
+ done:
+ return rc;
+
+ twobyte_insn:
switch ( b )
{
case 0x01: /* Grp7 */ {
@@ -3846,6 +3647,12 @@ x86_emulate(
break;
}
+ case 0x40 ... 0x4f: /* cmovcc */
+ dst.val = src.val;
+ if ( !test_cc(b, _regs.eflags) )
+ dst.type = OP_NONE;
+ break;
+
case 0x6f: /* movq mm/m64,mm */ {
uint8_t stub[] = { 0x0f, 0x6f, modrm, 0xc3 };
struct fpu_insn_ctxt fic = { .insn_bytes = sizeof(stub)-1 };
@@ -3895,6 +3702,10 @@ x86_emulate(
break;
}
+ case 0x90 ... 0x9f: /* setcc */
+ dst.val = test_cc(b, _regs.eflags);
+ break;
+
case 0xa0: /* push %%fs */
src.val = x86_seg_fs;
goto push_seg;
@@ -3922,6 +3733,182 @@ x86_emulate(
src.val = x86_seg_gs;
goto pop_seg;
+ case 0xb0 ... 0xb1: /* cmpxchg */
+ /* Save real source value, then compare EAX against destination. */
+ src.orig_val = src.val;
+ src.val = _regs.eax;
+ emulate_2op_SrcV("cmp", src, dst, _regs.eflags);
+ if ( _regs.eflags & EFLG_ZF )
+ {
+ /* Success: write back to memory. */
+ dst.val = src.orig_val;
+ }
+ else
+ {
+ /* Failure: write the value we saw to EAX. */
+ dst.type = OP_REG;
+ dst.reg = (unsigned long *)&_regs.eax;
+ }
+ break;
+
+ case 0xa3: bt: /* bt */
+ emulate_2op_SrcV_nobyte("bt", src, dst, _regs.eflags);
+ dst.type = OP_NONE;
+ break;
+
+ case 0xa4: /* shld imm8,r,r/m */
+ case 0xa5: /* shld %%cl,r,r/m */
+ case 0xac: /* shrd imm8,r,r/m */
+ case 0xad: /* shrd %%cl,r,r/m */ {
+ uint8_t shift, width = dst.bytes << 3;
+ shift = (b & 1) ? (uint8_t)_regs.ecx : insn_fetch_type(uint8_t);
+ if ( (shift &= width - 1) == 0 )
+ break;
+ dst.orig_val = truncate_word(dst.val, dst.bytes);
+ dst.val = ((shift == width) ? src.val :
+ (b & 8) ?
+ /* shrd */
+ ((dst.orig_val >> shift) |
+ truncate_word(src.val << (width - shift), dst.bytes)) :
+ /* shld */
+ ((dst.orig_val << shift) |
+ ((src.val >> (width - shift)) & ((1ull << shift) - 1))));
+ dst.val = truncate_word(dst.val, dst.bytes);
+ _regs.eflags &= ~(EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_PF|EFLG_CF);
+ if ( (dst.val >> ((b & 8) ? (shift - 1) : (width - shift))) & 1 )
+ _regs.eflags |= EFLG_CF;
+ if ( ((dst.val ^ dst.orig_val) >> (width - 1)) & 1 )
+ _regs.eflags |= EFLG_OF;
+ _regs.eflags |= ((dst.val >> (width - 1)) & 1) ? EFLG_SF : 0;
+ _regs.eflags |= (dst.val == 0) ? EFLG_ZF : 0;
+ _regs.eflags |= even_parity(dst.val) ? EFLG_PF : 0;
+ break;
+ }
+
+ case 0xb3: btr: /* btr */
+ emulate_2op_SrcV_nobyte("btr", src, dst, _regs.eflags);
+ break;
+
+ case 0xab: bts: /* bts */
+ emulate_2op_SrcV_nobyte("bts", src, dst, _regs.eflags);
+ break;
+
+ case 0xaf: /* imul */
+ _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+ switch ( dst.bytes )
+ {
+ case 2:
+ dst.val = ((uint32_t)(int16_t)src.val *
+ (uint32_t)(int16_t)dst.val);
+ if ( (int16_t)dst.val != (uint32_t)dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ break;
+#ifdef __x86_64__
+ case 4:
+ dst.val = ((uint64_t)(int32_t)src.val *
+ (uint64_t)(int32_t)dst.val);
+ if ( (int32_t)dst.val != dst.val )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ break;
+#endif
+ default: {
+ unsigned long m[2] = { src.val, dst.val };
+ if ( imul_dbl(m) )
+ _regs.eflags |= EFLG_OF|EFLG_CF;
+ dst.val = m[0];
+ break;
+ }
+ }
+ break;
+
+ case 0xb2: /* lss */
+ dst.val = x86_seg_ss;
+ goto les;
+
+ case 0xb4: /* lfs */
+ dst.val = x86_seg_fs;
+ goto les;
+
+ case 0xb5: /* lgs */
+ dst.val = x86_seg_gs;
+ goto les;
+
+ case 0xb6: /* movzx rm8,r{16,32,64} */
+ /* Recompute DstReg as we may have decoded AH/BH/CH/DH. */
+ dst.reg = decode_register(modrm_reg, &_regs, 0);
+ dst.bytes = op_bytes;
+ dst.val = (uint8_t)src.val;
+ break;
+
+ case 0xbc: /* bsf */ {
+ int zf;
+ asm ( "bsf %2,%0; setz %b1"
+ : "=r" (dst.val), "=q" (zf)
+ : "r" (src.val), "1" (0) );
+ _regs.eflags &= ~EFLG_ZF;
+ if ( zf )
+ {
+ _regs.eflags |= EFLG_ZF;
+ dst.type = OP_NONE;
+ }
+ break;
+ }
+
+ case 0xbd: /* bsr */ {
+ int zf;
+ asm ( "bsr %2,%0; setz %b1"
+ : "=r" (dst.val), "=q" (zf)
+ : "r" (src.val), "1" (0) );
+ _regs.eflags &= ~EFLG_ZF;
+ if ( zf )
+ {
+ _regs.eflags |= EFLG_ZF;
+ dst.type = OP_NONE;
+ }
+ break;
+ }
+
+ case 0xb7: /* movzx rm16,r{16,32,64} */
+ dst.val = (uint16_t)src.val;
+ break;
+
+ case 0xbb: btc: /* btc */
+ emulate_2op_SrcV_nobyte("btc", src, dst, _regs.eflags);
+ break;
+
+ case 0xba: /* Grp8 */
+ switch ( modrm_reg & 7 )
+ {
+ case 4: goto bt;
+ case 5: goto bts;
+ case 6: goto btr;
+ case 7: goto btc;
+ default: generate_exception_if(1, EXC_UD, -1);
+ }
+ break;
+
+ case 0xbe: /* movsx rm8,r{16,32,64} */
+ /* Recompute DstReg as we may have decoded AH/BH/CH/DH. */
+ dst.reg = decode_register(modrm_reg, &_regs, 0);
+ dst.bytes = op_bytes;
+ dst.val = (int8_t)src.val;
+ break;
+
+ case 0xbf: /* movsx rm16,r{16,32,64} */
+ dst.val = (int16_t)src.val;
+ break;
+
+ case 0xc0 ... 0xc1: /* xadd */
+ /* Write back the register source. */
+ switch ( dst.bytes )
+ {
+ case 1: *(uint8_t *)src.reg = (uint8_t)dst.val; break;
+ case 2: *(uint16_t *)src.reg = (uint16_t)dst.val; break;
+ case 4: *src.reg = (uint32_t)dst.val; break; /* 64b reg: zero-extend */
+ case 8: *src.reg = dst.val; break;
+ }
+ goto add;
+
case 0xc7: /* Grp9 (cmpxchg8b/cmpxchg16b) */ {
unsigned long old[2], exp[2], new[2];
unsigned int i;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index ed4de3f5cd..475c031df0 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -28,6 +28,7 @@
#include <asm/hvm/vioapic.h>
#include <asm/hvm/io.h>
#include <xen/hvm/iommu.h>
+#include <asm/hvm/viridian.h>
#include <asm/hvm/vmx/vmcs.h>
#include <asm/hvm/svm/vmcb.h>
#include <public/hvm/params.h>
@@ -74,6 +75,8 @@ struct hvm_domain {
/* Pass-through */
struct hvm_iommu hvm_iommu;
+ struct viridian_domain viridian;
+
bool_t hap_enabled;
bool_t qemu_mapcache_invalidate;
bool_t is_s3_suspended;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0d7791ec39..aadc025e46 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -219,6 +219,9 @@ hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
hvm_funcs.set_segment_register(v, seg, reg);
}
+#define is_viridian_domain(_d) \
+ (is_hvm_domain(_d) && ((_d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN]))
+
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void hvm_migrate_timers(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/viridian.h b/xen/include/asm-x86/hvm/viridian.h
new file mode 100644
index 0000000000..ac16966172
--- /dev/null
+++ b/xen/include/asm-x86/hvm/viridian.h
@@ -0,0 +1,65 @@
+/*****************************************************************************
+ *
+ * include/xen/viridian.h
+ *
+ * Copyright (c) 2008 Citrix Corp.
+ *
+ */
+
+#ifndef __ASM_X86_HVM_VIRIDIAN_H__
+#define __ASM_X86_HVM_VIRIDIAN_H__
+
+union viridian_guest_os_id
+{
+ uint64_t raw;
+ struct
+ {
+ uint64_t build_number:16;
+ uint64_t service_pack:8;
+ uint64_t minor:8;
+ uint64_t major:8;
+ uint64_t os:8;
+ uint64_t vendor:16;
+ } fields;
+};
+
+union viridian_hypercall_gpa
+{ uint64_t raw;
+ struct
+ {
+ uint64_t enabled:1;
+ uint64_t reserved_preserved:11;
+ uint64_t pfn:48;
+ } fields;
+};
+
+struct viridian_domain
+{
+ union viridian_guest_os_id guest_os_id;
+ union viridian_hypercall_gpa hypercall_gpa;
+};
+
+int
+cpuid_viridian_leaves(
+ unsigned int leaf,
+ unsigned int *eax,
+ unsigned int *ebx,
+ unsigned int *ecx,
+ unsigned int *edx);
+
+int
+wrmsr_viridian_regs(
+ uint32_t idx,
+ uint32_t eax,
+ uint32_t edx);
+
+int
+rdmsr_viridian_regs(
+ uint32_t idx,
+ uint32_t *eax,
+ uint32_t *edx);
+
+int
+viridian_hypercall(struct cpu_user_regs *regs);
+
+#endif /* __ASM_X86_HVM_VIRIDIAN_H__ */
diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h
index e0ef14205a..3f34e47950 100644
--- a/xen/include/asm-x86/hvm/vlapic.h
+++ b/xen/include/asm-x86/hvm/vlapic.h
@@ -98,4 +98,8 @@ struct vlapic *apic_round_robin(
int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda);
+void vlapic_EOI_set(struct vlapic *vlapic);
+
+int vlapic_ipi(struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high);
+
#endif /* __ASM_X86_HVM_VLAPIC_H__ */
diff --git a/xen/include/asm-x86/perfc_defn.h b/xen/include/asm-x86/perfc_defn.h
index 73e9d9be2a..784aa9eb5a 100644
--- a/xen/include/asm-x86/perfc_defn.h
+++ b/xen/include/asm-x86/perfc_defn.h
@@ -111,4 +111,20 @@ PERFCOUNTER(shadow_unsync, "shadow OOS unsyncs")
PERFCOUNTER(shadow_unsync_evict, "shadow OOS evictions")
PERFCOUNTER(shadow_resync, "shadow OOS resyncs")
+PERFCOUNTER(mshv_call_sw_addr_space, "MS Hv Switch Address Space")
+PERFCOUNTER(mshv_call_flush_tlb_list, "MS Hv Flush TLB list")
+PERFCOUNTER(mshv_call_flush_tlb_all, "MS Hv Flush TLB all")
+PERFCOUNTER(mshv_call_long_wait, "MS Hv Notify long wait")
+PERFCOUNTER(mshv_rdmsr_osid, "MS Hv rdmsr Guest OS ID")
+PERFCOUNTER(mshv_rdmsr_hc_page, "MS Hv rdmsr hypercall page")
+PERFCOUNTER(mshv_rdmsr_vp_index, "MS Hv rdmsr vp index")
+PERFCOUNTER(mshv_rdmsr_icr, "MS Hv rdmsr icr")
+PERFCOUNTER(mshv_rdmsr_tpr, "MS Hv rdmsr tpr")
+PERFCOUNTER(mshv_wrmsr_osid, "MS Hv wrmsr Guest OS ID")
+PERFCOUNTER(mshv_wrmsr_hc_page, "MS Hv wrmsr hypercall page")
+PERFCOUNTER(mshv_wrmsr_vp_index, "MS Hv wrmsr vp index")
+PERFCOUNTER(mshv_wrmsr_icr, "MS Hv wrmsr icr")
+PERFCOUNTER(mshv_wrmsr_tpr, "MS Hv wrmsr tpr")
+PERFCOUNTER(mshv_wrmsr_eoi, "MS Hv wrmsr eoi")
+
/*#endif*/ /* __XEN_PERFC_DEFN_H__ */
diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h
index 73b390998e..9b78787b44 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -421,9 +421,20 @@ struct hvm_hw_mtrr {
DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
+/*
+ * Viridian hypervisor context.
+ */
+
+struct hvm_viridian_context {
+ uint64_t hypercall_gpa;
+ uint64_t guest_os_id;
+};
+
+DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context);
+
/*
* Largest type-code in use
*/
-#define HVM_SAVE_CODE_MAX 14
+#define HVM_SAVE_CODE_MAX 15
#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 5f75ed78e8..62b0d858a8 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -51,9 +51,16 @@
#define HVM_PARAM_BUFIOREQ_PFN 6
#ifdef __ia64__
+
#define HVM_PARAM_NVRAM_FD 7
#define HVM_PARAM_VHPT_SIZE 8
#define HVM_PARAM_BUFPIOREQ_PFN 9
+
+#elif defined(__i386__) || defined(__x86_64__)
+
+/* Expose Viridian interfaces to this HVM guest? */
+#define HVM_PARAM_VIRIDIAN 9
+
#endif
/*