aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-10-15 09:36:40 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-10-15 09:36:40 +0100
commit61f2a440299cdf5ff809cd330a6400f6a5cdf910 (patch)
tree227d1b51b47a9bddf6dec437371eabad8d346221 /xen
parentf8e61527db79a580d9d9d5602059ed4db6c06b2d (diff)
downloadxen-61f2a440299cdf5ff809cd330a6400f6a5cdf910.tar.gz
xen-61f2a440299cdf5ff809cd330a6400f6a5cdf910.tar.bz2
xen-61f2a440299cdf5ff809cd330a6400f6a5cdf910.zip
gdbsx: a gdbserver stub for xen.
It should be run on dom0 on gdbsx enabled hypervisor. For details, please see tools/debugger/gdbsx/README Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com> Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Diffstat (limited to 'xen')
-rw-r--r--xen/Rules.mk2
-rw-r--r--xen/arch/x86/Makefile1
-rw-r--r--xen/arch/x86/debug.c264
-rw-r--r--xen/arch/x86/domctl.c131
-rw-r--r--xen/arch/x86/hvm/svm/svm.c4
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c4
-rw-r--r--xen/include/asm-x86/debugger.h4
-rw-r--r--xen/include/asm-x86/domain.h4
-rw-r--r--xen/include/public/domctl.h26
9 files changed, 440 insertions, 0 deletions
diff --git a/xen/Rules.mk b/xen/Rules.mk
index a28efaf902..731e0cc88c 100644
--- a/xen/Rules.mk
+++ b/xen/Rules.mk
@@ -8,6 +8,7 @@ perfc ?= n
perfc_arrays ?= n
lock_profile ?= n
crash_debug ?= n
+gdbsx ?= n
frame_pointer ?= n
# Allow some delicate passthrough related hypercalls to be made from a stubdom
@@ -56,6 +57,7 @@ CFLAGS-$(perfc_arrays) += -DPERF_ARRAYS
CFLAGS-$(lock_profile) += -DLOCK_PROFILE
CFLAGS-$(frame_pointer) += -fno-omit-frame-pointer -DCONFIG_FRAME_POINTER
CFLAGS-$(privileged_stubdoms) += -DPRIVILEGED_STUBDOMS
+CFLAGS-$(gdbsx) += -DXEN_GDBSX_CONFIG
ifneq ($(max_phys_cpus),)
CFLAGS-y += -DMAX_PHYS_CPUS=$(max_phys_cpus)
diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index 8c3809213e..ba5da39e4b 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -56,6 +56,7 @@ obj-y += hpet.o
obj-y += bzimage.o
obj-$(crash_debug) += gdbstub.o
+obj-$(gdbsx) += debug.o
x86_emulate.o: x86_emulate/x86_emulate.c x86_emulate/x86_emulate.h
diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
new file mode 100644
index 0000000000..51193dd40b
--- /dev/null
+++ b/xen/arch/x86/debug.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2009, Mukesh Rathor, Oracle Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <xen/config.h>
+#include <xen/sched.h>
+#include <xen/compile.h>
+#include <xen/mm.h>
+#include <xen/domain_page.h>
+#include <xen/guest_access.h>
+#include <asm/p2m.h>
+
+/*
+ * This file for general routines common to more than one debugger, like kdb,
+ * gdbsx, etc..
+ */
+
+#ifdef XEN_KDB_CONFIG
+extern volatile int kdbdbg;
+extern void kdbp(const char *fmt, ...);
+#define DBGP(...) {(kdbdbg) ? kdbp(__VA_ARGS__):0;}
+#define DBGP1(...) {(kdbdbg>1) ? kdbp(__VA_ARGS__):0;}
+#define DBGP2(...) {(kdbdbg>2) ? kdbp(__VA_ARGS__):0;}
+#else
+#define DBGP1(...) {0;}
+#define DBGP2(...) {0;}
+#endif
+
+typedef unsigned long dbgva_t;
+typedef unsigned char dbgbyte_t;
+
+
+/* Returns: mfn for the given (hvm guest) vaddr */
+static unsigned long
+dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr)
+{
+ unsigned long mfn, gfn;
+ uint32_t pfec = PFEC_page_present;
+ p2m_type_t gfntype;
+
+ DBGP2("vaddr:%lx domid:%d\n", vaddr, dp->domain_id);
+
+ gfn = paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec);
+ if ( gfn == INVALID_GFN )
+ {
+ DBGP2("kdb:bad gfn from gva_to_gfn\n");
+ return INVALID_MFN;
+ }
+
+ mfn = mfn_x(gfn_to_mfn(dp, gfn, &gfntype));
+ if ( p2m_is_readonly(gfntype) && toaddr )
+ {
+ DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
+ return INVALID_MFN;
+ }
+
+ DBGP2("X: vaddr:%lx domid:%d mfn:%lx\n", vaddr, dp->domain_id, mfn);
+ return mfn;
+}
+
+#if defined(__x86_64__)
+
+/*
+ * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional.
+ * This to assist debug of modules in the guest. The kernel address
+ * space seems is always mapped, but modules are not necessarily
+ * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0.
+ * Modules should always be addressible if we use cr3 from init_mm.
+ * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to
+ * do 2 level lookups.
+ *
+ * NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e
+ * mode.
+ * Returns: mfn for the given (pv guest) vaddr
+ */
+static unsigned long
+dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
+{
+ l4_pgentry_t l4e, *l4t;
+ l3_pgentry_t l3e, *l3t;
+ l2_pgentry_t l2e, *l2t;
+ l1_pgentry_t l1e, *l1t;
+ unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
+ unsigned long mfn = cr3 >> PAGE_SHIFT;
+
+ DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
+ cr3, pgd3val);
+
+ if ( pgd3val == 0 )
+ {
+ l4t = mfn_to_virt(mfn);
+ l4e = l4t[l4_table_offset(vaddr)];
+ mfn = l4e_get_pfn(l4e);
+ DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%lx\n", l4t,
+ l4_table_offset(vaddr), l4e, mfn);
+ if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
+ {
+ DBGP1("l4 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
+ return INVALID_MFN;
+ }
+
+ l3t = mfn_to_virt(mfn);
+ l3e = l3t[l3_table_offset(vaddr)];
+ mfn = l3e_get_pfn(l3e);
+ DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%lx\n", l3t,
+ l3_table_offset(vaddr), l3e, mfn);
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ {
+ DBGP1("l3 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
+ return INVALID_MFN;
+ }
+ }
+
+ l2t = mfn_to_virt(mfn);
+ l2e = l2t[l2_table_offset(vaddr)];
+ mfn = l2e_get_pfn(l2e);
+ DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%lx\n", l2t, l2_table_offset(vaddr),
+ l2e, mfn);
+ if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
+ (l2e_get_flags(l2e) & _PAGE_PSE) )
+ {
+ DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
+ return INVALID_MFN;
+ }
+ l1t = mfn_to_virt(mfn);
+ l1e = l1t[l1_table_offset(vaddr)];
+ mfn = l1e_get_pfn(l1e);
+ DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%lx\n", l1t, l1_table_offset(vaddr),
+ l1e, mfn);
+
+ return mfn_valid(mfn) ? mfn : INVALID_MFN;
+}
+
+#else
+
+/* Returns: mfn for the given (pv guest) vaddr */
+static unsigned long
+dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
+{
+ l3_pgentry_t l3e, *l3t;
+ l2_pgentry_t l2e, *l2t;
+ l1_pgentry_t l1e, *l1t;
+ unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
+ unsigned long mfn = cr3 >> PAGE_SHIFT;
+
+ DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id,
+ cr3, pgd3val);
+
+ if ( pgd3val == 0 )
+ {
+ l3t = map_domain_page(mfn);
+ l3t += (cr3 & 0xFE0UL) >> 3;
+ l3e = l3t[l3_table_offset(vaddr)];
+ mfn = l3e_get_pfn(l3e);
+ unmap_domain_page(l3t);
+ if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
+ return INVALID_MFN;
+ }
+
+ l2t = map_domain_page(mfn);
+ l2e = l2t[l2_table_offset(vaddr)];
+ mfn = l2e_get_pfn(l2e);
+ unmap_domain_page(l2t);
+ if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
+ (l2e_get_flags(l2e) & _PAGE_PSE) )
+ return INVALID_MFN;
+
+ l1t = map_domain_page(mfn);
+ l1e = l1t[l1_table_offset(vaddr)];
+ mfn = l1e_get_pfn(l1e);
+ unmap_domain_page(l1t);
+
+ return mfn_valid(mfn) ? mfn : INVALID_MFN;
+}
+#endif /* defined(__x86_64__) */
+
+/* Returns: number of bytes remaining to be copied */
+static int
+dbg_rw_guest_mem(dbgva_t addr, dbgbyte_t *buf, int len, struct domain *dp,
+ int toaddr, uint64_t pgd3)
+{
+ while ( len > 0 )
+ {
+ char *va;
+ unsigned long mfn, pagecnt;
+
+ pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
+
+ mfn = (dp->is_hvm
+ ? dbg_hvm_va2mfn(addr, dp, toaddr)
+ : dbg_pv_va2mfn(addr, dp, pgd3));
+ if ( mfn == INVALID_MFN )
+ break;
+
+ va = map_domain_page(mfn);
+ va = va + (addr & (PAGE_SIZE-1));
+
+ if ( toaddr )
+ {
+ memcpy(va, buf, pagecnt); /* va = buf */
+ paging_mark_dirty(dp, mfn);
+ }
+ else
+ {
+ memcpy(buf, va, pagecnt); /* buf = va */
+ }
+
+ unmap_domain_page(va);
+
+ addr += pagecnt;
+ buf += pagecnt;
+ len -= pagecnt;
+ }
+
+ return len;
+}
+
+/*
+ * addr is hypervisor addr if domid == IDLE_DOMAIN_ID, else it's guest addr
+ * buf is debugger buffer.
+ * if toaddr, then addr = buf (write to addr), else buf = addr (rd from guest)
+ * pgd3: value of init_mm.pgd[3] in guest. see above.
+ * Returns: number of bytes remaining to be copied.
+ */
+int
+dbg_rw_mem(dbgva_t addr, dbgbyte_t *buf, int len, domid_t domid, int toaddr,
+ uint64_t pgd3)
+{
+ struct domain *dp = get_domain_by_id(domid);
+ int hyp = (domid == IDLE_DOMAIN_ID);
+
+ DBGP2("gmem:addr:%lx buf:%p len:$%d domid:%x toaddr:%x dp:%p\n",
+ addr, buf, len, domid, toaddr, dp);
+ if ( hyp )
+ {
+ if ( toaddr )
+ len = __copy_to_user((void *)addr, buf, len);
+ else
+ len = __copy_from_user(buf, (void *)addr, len);
+ }
+ else
+ {
+ if ( dp && !dp->is_dying ) /* make sure guest is still there */
+ len= dbg_rw_guest_mem(addr, buf, len, dp, toaddr, pgd3);
+ }
+
+ DBGP2("gmem:exit:len:$%d\n", len);
+ return len;
+}
+
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 6025475a30..581fcb3295 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -31,6 +31,26 @@
#include <xsm/xsm.h>
#include <xen/iommu.h>
+#ifdef XEN_GDBSX_CONFIG
+#ifdef XEN_KDB_CONFIG
+#include "../kdb/include/kdbdefs.h"
+#include "../kdb/include/kdbproto.h"
+#else
+typedef unsigned long kdbva_t;
+typedef unsigned char kdbbyt_t;
+extern int dbg_rw_mem(kdbva_t, kdbbyt_t *, int, domid_t, int, uint64_t);
+#endif
+static int
+gdbsx_guest_mem_io(domid_t domid, struct xen_domctl_gdbsx_memio *iop)
+{
+ ulong l_uva = (ulong)iop->uva;
+ iop->remain = dbg_rw_mem(
+ (kdbva_t)iop->gva, (kdbbyt_t *)l_uva, iop->len, domid,
+ iop->gwr, iop->pgd3val);
+ return (iop->remain ? -EFAULT : 0);
+}
+#endif /* XEN_GDBSX_CONFIG */
+
long arch_do_domctl(
struct xen_domctl *domctl,
XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
@@ -1121,6 +1141,117 @@ long arch_do_domctl(
}
break;
+#ifdef XEN_GDBSX_CONFIG
+ case XEN_DOMCTL_gdbsx_guestmemio:
+ {
+ struct domain *d;
+
+ ret = -ESRCH;
+ if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
+ break;
+
+ domctl->u.gdbsx_guest_memio.remain =
+ domctl->u.gdbsx_guest_memio.len;
+
+ ret = gdbsx_guest_mem_io(domctl->domain, &domctl->u.gdbsx_guest_memio);
+ if ( !ret && copy_to_guest(u_domctl, domctl, 1) )
+ ret = -EFAULT;
+
+ rcu_unlock_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_gdbsx_pausevcpu:
+ {
+ struct domain *d;
+ struct vcpu *v;
+
+ ret = -ESRCH;
+ if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
+ break;
+
+ ret = -EBUSY;
+ if ( !d->is_paused_by_controller )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+ ret = -EINVAL;
+ if ( domctl->u.gdbsx_pauseunp_vcpu.vcpu >= MAX_VIRT_CPUS ||
+ (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+ vcpu_pause(v);
+ ret = 0;
+ rcu_unlock_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_gdbsx_unpausevcpu:
+ {
+ struct domain *d;
+ struct vcpu *v;
+
+ ret = -ESRCH;
+ if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
+ break;
+
+ ret = -EBUSY;
+ if ( !d->is_paused_by_controller )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+ ret = -EINVAL;
+ if ( domctl->u.gdbsx_pauseunp_vcpu.vcpu >= MAX_VIRT_CPUS ||
+ (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+ if ( !atomic_read(&v->pause_count) )
+ printk("WARN: Unpausing vcpu:%d which is not paused\n", v->vcpu_id);
+ vcpu_unpause(v);
+ ret = 0;
+ rcu_unlock_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_gdbsx_domstatus:
+ {
+ struct domain *d;
+ struct vcpu *v;
+
+ ret = -ESRCH;
+ if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
+ break;
+
+ domctl->u.gdbsx_domstatus.vcpu_id = -1;
+ domctl->u.gdbsx_domstatus.paused = d->is_paused_by_controller;
+ if ( domctl->u.gdbsx_domstatus.paused )
+ {
+ for_each_vcpu ( d, v )
+ {
+ if ( v->arch.gdbsx_vcpu_event )
+ {
+ domctl->u.gdbsx_domstatus.vcpu_id = v->vcpu_id;
+ domctl->u.gdbsx_domstatus.vcpu_ev =
+ v->arch.gdbsx_vcpu_event;
+ v->arch.gdbsx_vcpu_event = 0;
+ break;
+ }
+ }
+ }
+ ret = 0;
+ if ( copy_to_guest(u_domctl, domctl, 1) )
+ ret = -EFAULT;
+ rcu_unlock_domain(d);
+ }
+ break;
+#endif /* XEN_GDBSX_CONFIG */
+
default:
ret = -ENOSYS;
break;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index dde588db2f..c5fc6a9c9e 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -51,6 +51,7 @@
#include <asm/hvm/vpt.h>
#include <asm/hvm/trace.h>
#include <asm/hap.h>
+#include <asm/debugger.h>
u32 svm_feature_flags;
@@ -1370,6 +1371,9 @@ asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
if ( (inst_len = __get_instruction_length(v, INSTR_INT3)) == 0 )
break;
__update_guest_eip(regs, inst_len);
+#ifdef XEN_GDBSX_CONFIG
+ current->arch.gdbsx_vcpu_event = TRAP_int3;
+#endif
domain_pause_for_debugger();
break;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 3559d8a961..e187677dd3 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -51,6 +51,7 @@
#include <public/hvm/save.h>
#include <asm/hvm/trace.h>
#include <asm/xenoprof.h>
+#include <asm/debugger.h>
enum handler_return { HNDL_done, HNDL_unhandled, HNDL_exception_raised };
@@ -2475,6 +2476,9 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
goto exit_and_crash;
inst_len = __get_instruction_length(); /* Safe: INT3 */
__update_guest_eip(inst_len);
+#ifdef XEN_GDBSX_CONFIG
+ current->arch.gdbsx_vcpu_event = TRAP_int3;
+#endif
domain_pause_for_debugger();
break;
case TRAP_no_device:
diff --git a/xen/include/asm-x86/debugger.h b/xen/include/asm-x86/debugger.h
index 68019dfaf5..318b38a3e7 100644
--- a/xen/include/asm-x86/debugger.h
+++ b/xen/include/asm-x86/debugger.h
@@ -68,6 +68,10 @@ static inline int debugger_trap_entry(
if ( guest_kernel_mode(v, regs) && v->domain->debugger_attached &&
((vector == TRAP_int3) || (vector == TRAP_debug)) )
{
+#ifdef XEN_GDBSX_CONFIG
+ if ( vector != TRAP_debug ) /* domain pause is good enough */
+ current->arch.gdbsx_vcpu_event = vector;
+#endif
domain_pause_for_debugger();
return 1;
}
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index b5888505eb..3ecc3bf873 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -413,6 +413,10 @@ struct arch_vcpu
struct mapcache_vcpu mapcache;
#endif
+#if XEN_GDBSX_CONFIG
+ uint32_t gdbsx_vcpu_event;
+#endif
+
} __cacheline_aligned;
/* Shorthands to improve code legibility. */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 3b4cb51796..0a8c372796 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -650,6 +650,29 @@ typedef struct xen_domctl_hvmcontext_partial {
} xen_domctl_hvmcontext_partial_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t);
+#define XEN_DOMCTL_gdbsx_guestmemio 1000 /* guest mem io */
+struct xen_domctl_gdbsx_memio {
+ uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */
+ uint64_aligned_t gva; /* guest virtual address */
+ uint64_aligned_t uva; /* user buffer virtual address */
+ int len; /* number of bytes to read/write */
+ int gwr; /* 0 = read from guest. 1 = write to guest */
+ int remain; /* bytes remaining to be copied */
+};
+
+#define XEN_DOMCTL_gdbsx_pausevcpu 1001
+#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
+struct xen_domctl_gdbsx_pauseunp_vcpu { /* pause/unpause a vcpu */
+ uint32_t vcpu; /* which vcpu */
+};
+
+#define XEN_DOMCTL_gdbsx_domstatus 1003
+struct xen_domctl_gdbsx_domstatus {
+ int paused; /* is the domain paused */
+ uint32_t vcpu_id; /* any vcpu in an event? */
+ uint32_t vcpu_ev; /* if yes, what event? */
+
+};
struct xen_domctl {
uint32_t cmd;
@@ -696,6 +719,9 @@ struct xen_domctl {
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
#endif
+ struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
+ struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
+ struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
uint8_t pad[128];
} u;
};