aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2009-06-18 10:14:16 +0100
committerKeir Fraser <keir.fraser@citrix.com>2009-06-18 10:14:16 +0100
commitfb442e217186a5bc3ed7ec84f0b683b708609eac (patch)
treeb8f199e985285357b03ca679c4eb352c7000bb19
parenta5c98899b234c23f43dae0f7dd58649a666d070e (diff)
downloadxen-fb442e217186a5bc3ed7ec84f0b683b708609eac.tar.gz
xen-fb442e217186a5bc3ed7ec84f0b683b708609eac.tar.bz2
xen-fb442e217186a5bc3ed7ec84f0b683b708609eac.zip
x86_64: allow more vCPU-s per guest
Since the shared info layout is fixed, guests are required to use VCPUOP_register_vcpu_info prior to booting any vCPU beyond the traditional limit of 32. MAX_VIRT_CPUS, being an implemetation detail of the hypervisor, is no longer being exposed in the public headers. The tools changes are clearly incomplete (and done only so things would build again), and the current state of the tools (using scalar variables all over the place to represent vCPU bitmaps) very likely doesn't permit booting DomU-s with more than the traditional number of vCPU-s. Testing of the extended functionality was done with Dom0 (96 vCPU-s, as well as 128 vCPU-s out of which the kernel elected - by way of a simple kernel side patch - to use only some, resulting in a sparse bitmap). ia64 changes only to make things build, and build-tested only (and the tools part only as far as the build would go without encountering unrelated problems in the blktap code). Signed-off-by: Jan Beulich <jbeulich@novell.com>
-rw-r--r--docs/src/interface.tex2
-rw-r--r--tools/debugger/xenitp/xenitp.c18
-rw-r--r--tools/include/xen-foreign/structs.py2
-rw-r--r--tools/libxc/ia64/xc_ia64_linux_restore.c2
-rw-r--r--tools/libxc/ia64/xc_ia64_linux_save.c4
-rw-r--r--tools/libxc/xc_core.c11
-rw-r--r--tools/libxc/xc_core_ia64.c21
-rw-r--r--tools/libxc/xc_core_ia64.h2
-rw-r--r--tools/libxc/xc_dom_ia64.c2
-rw-r--r--tools/libxc/xc_dom_x86.c4
-rw-r--r--tools/libxc/xc_domain_restore.c2
-rw-r--r--tools/libxc/xc_private.h5
-rw-r--r--tools/libxc/xc_ptrace.c27
-rw-r--r--tools/libxc/xc_ptrace.h5
-rw-r--r--tools/libxc/xc_ptrace_core.c105
-rw-r--r--xen/arch/ia64/xen/dom_fw_common.c1
-rw-r--r--xen/arch/ia64/xen/domain.c25
-rw-r--r--xen/arch/ia64/xen/xensetup.c8
-rw-r--r--xen/arch/x86/domain.c28
-rw-r--r--xen/arch/x86/domain_build.c24
-rw-r--r--xen/arch/x86/domctl.c7
-rw-r--r--xen/arch/x86/hvm/hvm.c8
-rw-r--r--xen/arch/x86/hvm/mtrr.c5
-rw-r--r--xen/arch/x86/hvm/vioapic.c12
-rw-r--r--xen/arch/x86/hvm/vlapic.c6
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c9
-rw-r--r--xen/arch/x86/hvm/vpic.c4
-rw-r--r--xen/arch/x86/mm.c6
-rw-r--r--xen/arch/x86/mm/paging.c2
-rw-r--r--xen/arch/x86/mm/shadow/common.c4
-rw-r--r--xen/arch/x86/nmi.c3
-rw-r--r--xen/arch/x86/setup.c12
-rw-r--r--xen/arch/x86/traps.c6
-rw-r--r--xen/common/compat/domain.c2
-rw-r--r--xen/common/domain.c32
-rw-r--r--xen/common/domctl.c38
-rw-r--r--xen/common/event_channel.c41
-rw-r--r--xen/common/keyhandler.c6
-rw-r--r--xen/common/sched_sedf.c3
-rw-r--r--xen/common/xenoprof.c13
-rw-r--r--xen/include/asm-ia64/config.h1
-rw-r--r--xen/include/asm-x86/config.h15
-rw-r--r--xen/include/asm-x86/paging.h2
-rw-r--r--xen/include/asm-x86/shared.h4
-rw-r--r--xen/include/public/arch-ia64.h2
-rw-r--r--xen/include/public/arch-x86/xen.h4
-rw-r--r--xen/include/public/xen.h2
-rw-r--r--xen/include/xen/domain.h1
-rw-r--r--xen/include/xen/numa.h3
-rw-r--r--xen/include/xen/sched.h10
-rw-r--r--xen/include/xen/shared.h17
-rw-r--r--xen/include/xen/xenoprof.h2
52 files changed, 393 insertions, 187 deletions
diff --git a/docs/src/interface.tex b/docs/src/interface.tex
index 5fa0c5fd68..458d0b4ae3 100644
--- a/docs/src/interface.tex
+++ b/docs/src/interface.tex
@@ -462,7 +462,7 @@ The structure is declared in {\bf xen/include/public/xen.h}:
\scriptsize
\begin{verbatim}
typedef struct shared_info {
- vcpu_info_t vcpu_info[MAX_VIRT_CPUS];
+ vcpu_info_t vcpu_info[XEN_LEGACY_MAX_VCPUS];
/*
* A domain can create "event channels" on which it can send and receive
diff --git a/tools/debugger/xenitp/xenitp.c b/tools/debugger/xenitp/xenitp.c
index 95d3820f94..847d7bd7a6 100644
--- a/tools/debugger/xenitp/xenitp.c
+++ b/tools/debugger/xenitp/xenitp.c
@@ -955,7 +955,7 @@ char *parse_arg (char **buf)
return res;
}
-vcpu_guest_context_any_t vcpu_ctx_any[MAX_VIRT_CPUS];
+vcpu_guest_context_any_t *vcpu_ctx_any;
int vcpu_setcontext (int vcpu)
{
@@ -1584,11 +1584,23 @@ void xenitp (int vcpu)
{
int ret;
struct sigaction sa;
-
- cur_ctx = &vcpu_ctx_any[vcpu].c;
+ xc_dominfo_t dominfo;
xc_handle = xc_interface_open (); /* for accessing control interface */
+ ret = xc_domain_getinfo (xc_handle, domid, 1, &dominfo);
+ if (ret < 0) {
+ perror ("xc_domain_getinfo");
+ exit (-1);
+ }
+
+ vcpu_ctx_any = calloc (sizeof(vcpu_ctx_any), dominfo.max_vcpu_id + 1);
+ if (!vcpu_ctx_any) {
+ perror ("vcpu context array alloc");
+ exit (-1);
+ }
+ cur_ctx = &vcpu_ctx_any[vcpu].c;
+
if (xc_domain_setdebugging (xc_handle, domid, 1) != 0)
perror ("setdebugging");
diff --git a/tools/include/xen-foreign/structs.py b/tools/include/xen-foreign/structs.py
index 7d51ca8b79..a1bc055a19 100644
--- a/tools/include/xen-foreign/structs.py
+++ b/tools/include/xen-foreign/structs.py
@@ -53,6 +53,6 @@ defines = [ "__i386__",
# all archs
"xen_pfn_to_cr3",
- "MAX_VIRT_CPUS",
+ "XEN_LEGACY_MAX_VCPUS",
"MAX_GUEST_CMDLINE" ];
diff --git a/tools/libxc/ia64/xc_ia64_linux_restore.c b/tools/libxc/ia64/xc_ia64_linux_restore.c
index 1e2d9a038f..ed62ab7d92 100644
--- a/tools/libxc/ia64/xc_ia64_linux_restore.c
+++ b/tools/libxc/ia64/xc_ia64_linux_restore.c
@@ -174,7 +174,7 @@ xc_ia64_recv_shared_info(int xc_handle, int io_fd, uint32_t dom,
/* clear any pending events and the selector */
memset(&(shared_info->evtchn_pending[0]), 0,
sizeof (shared_info->evtchn_pending));
- for (i = 0; i < MAX_VIRT_CPUS; i++)
+ for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++)
shared_info->vcpu_info[i].evtchn_pending_sel = 0;
if (start_info_pfn != NULL)
diff --git a/tools/libxc/ia64/xc_ia64_linux_save.c b/tools/libxc/ia64/xc_ia64_linux_save.c
index 3f19613b96..6c855eb4a6 100644
--- a/tools/libxc/ia64/xc_ia64_linux_save.c
+++ b/tools/libxc/ia64/xc_ia64_linux_save.c
@@ -238,7 +238,7 @@ xc_ia64_pv_send_context(int xc_handle, int io_fd, uint32_t dom,
/* vcpu map */
uint64_t *vcpumap = NULL;
- if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, MAX_VIRT_CPUS,
+ if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, XEN_LEGACY_MAX_VCPUS,
&vcpumap))
goto out;
@@ -308,7 +308,7 @@ xc_ia64_hvm_send_context(int xc_handle, int io_fd, uint32_t dom,
return -1;
/* vcpu map */
- if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, MAX_VIRT_CPUS,
+ if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, XEN_LEGACY_MAX_VCPUS,
&vcpumap))
goto out;
diff --git a/tools/libxc/xc_core.c b/tools/libxc/xc_core.c
index ef1e8dd7a1..8121a08bf0 100644
--- a/tools/libxc/xc_core.c
+++ b/tools/libxc/xc_core.c
@@ -430,7 +430,7 @@ xc_domain_dumpcore_via_callback(int xc_handle,
int nr_vcpus = 0;
char *dump_mem, *dump_mem_start = NULL;
- vcpu_guest_context_any_t ctxt[MAX_VIRT_CPUS];
+ vcpu_guest_context_any_t *ctxt = NULL;
struct xc_core_arch_context arch_ctxt;
char dummy[PAGE_SIZE];
int dummy_len;
@@ -495,6 +495,13 @@ xc_domain_dumpcore_via_callback(int xc_handle,
goto out;
}
+ ctxt = calloc(sizeof(*ctxt), info.max_vcpu_id + 1);
+ if ( !ctxt )
+ {
+ PERROR("Could not allocate vcpu context array", domid);
+ goto out;
+ }
+
for ( i = 0; i <= info.max_vcpu_id; i++ )
{
if ( xc_vcpu_getcontext(xc_handle, domid, i, &ctxt[nr_vcpus]) == 0 )
@@ -900,6 +907,8 @@ out:
xc_core_shdr_free(sheaders);
if ( strtab != NULL )
xc_core_strtab_free(strtab);
+ if ( ctxt != NULL )
+ free(ctxt);
if ( dump_mem_start != NULL )
free(dump_mem_start);
if ( live_shinfo != NULL )
diff --git a/tools/libxc/xc_core_ia64.c b/tools/libxc/xc_core_ia64.c
index b385d65f7d..77c8596605 100644
--- a/tools/libxc/xc_core_ia64.c
+++ b/tools/libxc/xc_core_ia64.c
@@ -251,13 +251,10 @@ xc_core_arch_map_p2m(int xc_handle, unsigned int guest_width, xc_dominfo_t *info
void
xc_core_arch_context_init(struct xc_core_arch_context* arch_ctxt)
{
- int i;
-
arch_ctxt->mapped_regs_size =
(XMAPPEDREGS_SIZE < PAGE_SIZE) ? PAGE_SIZE: XMAPPEDREGS_SIZE;
arch_ctxt->nr_vcpus = 0;
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
- arch_ctxt->mapped_regs[i] = NULL;
+ arch_ctxt->mapped_regs = NULL;
xc_ia64_p2m_init(&arch_ctxt->p2m_table);
}
@@ -269,6 +266,7 @@ xc_core_arch_context_free(struct xc_core_arch_context* arch_ctxt)
for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
if ( arch_ctxt->mapped_regs[i] != NULL )
munmap(arch_ctxt->mapped_regs[i], arch_ctxt->mapped_regs_size);
+ free(arch_ctxt->mapped_regs);
xc_ia64_p2m_unmap(&arch_ctxt->p2m_table);
}
@@ -289,6 +287,21 @@ xc_core_arch_context_get(struct xc_core_arch_context* arch_ctxt,
errno = ENOENT;
return -1;
}
+ if ( !(arch_ctxt->nr_vcpus & (arch_ctxt->nr_vcpus - 1)) ) {
+ unsigned int nr = arch_ctxt->nr_vcpus ? arch_ctxt->nr_vcpus << 1 : 1;
+ mapped_regs_t** new = realloc(arch_ctxt->mapped_regs,
+ nr * sizeof(*new));
+
+ if ( !new )
+ {
+ PERROR("Could not alloc mapped regs pointer array");
+ return -1;
+ }
+ memset(new + arch_ctxt->nr_vcpus, 0,
+ (nr - arch_ctxt->nr_vcpus) * sizeof(*new));
+ arch_ctxt->mapped_regs = new;
+ }
+
mapped_regs = xc_map_foreign_range(xc_handle, domid,
arch_ctxt->mapped_regs_size,
PROT_READ, ctxt->privregs_pfn);
diff --git a/tools/libxc/xc_core_ia64.h b/tools/libxc/xc_core_ia64.h
index 75dd40fdc7..89ffd6ec5d 100644
--- a/tools/libxc/xc_core_ia64.h
+++ b/tools/libxc/xc_core_ia64.h
@@ -29,7 +29,7 @@
struct xc_core_arch_context {
size_t mapped_regs_size;
int nr_vcpus;
- mapped_regs_t* mapped_regs[MAX_VIRT_CPUS];
+ mapped_regs_t** mapped_regs;
struct xen_ia64_p2m_table p2m_table;
};
diff --git a/tools/libxc/xc_dom_ia64.c b/tools/libxc/xc_dom_ia64.c
index 76f845ab1c..2d4086e41d 100644
--- a/tools/libxc/xc_dom_ia64.c
+++ b/tools/libxc/xc_dom_ia64.c
@@ -87,7 +87,7 @@ int shared_info_ia64(struct xc_dom_image *dom, void *ptr)
xc_dom_printf("%s: called\n", __FUNCTION__);
memset(shared_info, 0, sizeof(*shared_info));
- for (i = 0; i < MAX_VIRT_CPUS; i++)
+ for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++)
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
shared_info->arch.start_info_pfn = dom->start_info_pfn;
shared_info->arch.memmap_info_num_pages = 1; //XXX
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index a89eb4fef7..30f13970ac 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -498,7 +498,7 @@ static int shared_info_x86_32(struct xc_dom_image *dom, void *ptr)
xc_dom_printf("%s: called\n", __FUNCTION__);
memset(shared_info, 0, sizeof(*shared_info));
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
return 0;
}
@@ -511,7 +511,7 @@ static int shared_info_x86_64(struct xc_dom_image *dom, void *ptr)
xc_dom_printf("%s: called\n", __FUNCTION__);
memset(shared_info, 0, sizeof(*shared_info));
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
return 0;
}
diff --git a/tools/libxc/xc_domain_restore.c b/tools/libxc/xc_domain_restore.c
index 9987d35dfa..13e83c3f13 100644
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -1146,7 +1146,7 @@ int xc_domain_restore(int xc_handle, int io_fd, uint32_t dom,
/* clear any pending events and the selector */
MEMSET_ARRAY_FIELD(new_shared_info, evtchn_pending, 0);
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
SET_FIELD(new_shared_info, vcpu_info[i].evtchn_pending_sel, 0);
/* mask event channels */
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index d95bd0675d..4676fe8ea2 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -191,11 +191,6 @@ void *xc_map_foreign_ranges(int xc_handle, uint32_t dom,
size_t size, int prot, size_t chunksize,
privcmd_mmap_entry_t entries[], int nentries);
-void *map_domain_va_core(unsigned long domfd, int cpu, void *guest_va,
- vcpu_guest_context_any_t *ctxt);
-int xc_waitdomain_core(int xc_handle, int domain, int *status,
- int options, vcpu_guest_context_any_t *ctxt);
-
void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits);
void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits);
diff --git a/tools/libxc/xc_ptrace.c b/tools/libxc/xc_ptrace.c
index 39f02198a9..725a022339 100644
--- a/tools/libxc/xc_ptrace.c
+++ b/tools/libxc/xc_ptrace.c
@@ -42,7 +42,8 @@ static int current_is_hvm;
static uint64_t online_cpumap;
static uint64_t regs_valid;
-static vcpu_guest_context_any_t ctxt[MAX_VIRT_CPUS];
+static unsigned int nr_vcpu_ids;
+static vcpu_guest_context_any_t *ctxt;
#define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i = xc_ffs64(cpumap)); cpumap &= ~(1 << (index - 1)) )
@@ -101,6 +102,21 @@ paging_enabled(vcpu_guest_context_any_t *v)
return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
}
+vcpu_guest_context_any_t *xc_ptrace_get_vcpu_ctxt(unsigned int nr_cpus)
+{
+ if (nr_cpus > nr_vcpu_ids) {
+ vcpu_guest_context_any_t *new;
+
+ new = realloc(ctxt, nr_cpus * sizeof(*ctxt));
+ if (!new)
+ return NULL;
+ ctxt = new;
+ nr_vcpu_ids = nr_cpus;
+ }
+
+ return ctxt;
+}
+
/*
* Fetch registers for all online cpus and set the cpumap
* to indicate which cpus are online
@@ -113,6 +129,9 @@ get_online_cpumap(int xc_handle, struct xen_domctl_getdomaininfo *d,
{
int i, online;
+ if (!xc_ptrace_get_vcpu_ctxt(d->max_vcpu_id + 1))
+ return -ENOMEM;
+
*cpumap = 0;
for (i = 0; i <= d->max_vcpu_id; i++) {
fetch_regs(xc_handle, i, &online);
@@ -261,7 +280,7 @@ xc_ptrace(
case PTRACE_PEEKDATA:
if (current_isfile)
guest_va = (unsigned long *)map_domain_va_core(
- current_domid, cpu, addr, ctxt);
+ current_domid, cpu, addr);
else
guest_va = (unsigned long *)map_domain_va(
xc_handle, cpu, addr, PROT_READ);
@@ -277,7 +296,7 @@ xc_ptrace(
/* XXX assume that all CPUs have the same address space */
if (current_isfile)
guest_va = (unsigned long *)map_domain_va_core(
- current_domid, cpu, addr, ctxt);
+ current_domid, cpu, addr);
else
guest_va = (unsigned long *)map_domain_va(
xc_handle, cpu, addr, PROT_READ|PROT_WRITE);
@@ -433,7 +452,7 @@ xc_waitdomain(
int options)
{
if (current_isfile)
- return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
+ return xc_waitdomain_core(xc_handle, domain, status, options);
return __xc_waitdomain(xc_handle, domain, status, options);
}
diff --git a/tools/libxc/xc_ptrace.h b/tools/libxc/xc_ptrace.h
index baaadb5b0b..c8ba4041e0 100644
--- a/tools/libxc/xc_ptrace.h
+++ b/tools/libxc/xc_ptrace.h
@@ -157,4 +157,9 @@ struct gdb_regs {
}
#endif
+void *map_domain_va_core(unsigned long domfd, int cpu, void *guest_va);
+int xc_waitdomain_core(int xc_handle, int domain, int *status, int options);
+vcpu_guest_context_any_t *xc_ptrace_get_vcpu_ctxt(unsigned int nr_cpus);
+
+
#endif /* XC_PTRACE */
diff --git a/tools/libxc/xc_ptrace_core.c b/tools/libxc/xc_ptrace_core.c
index a159591c4d..b6a5eb9ffd 100644
--- a/tools/libxc/xc_ptrace_core.c
+++ b/tools/libxc/xc_ptrace_core.c
@@ -12,6 +12,44 @@
#include <time.h>
#include <inttypes.h>
+static unsigned int max_nr_vcpus;
+static unsigned long *cr3;
+static unsigned long *cr3_phys;
+static unsigned long **cr3_virt;
+static unsigned long *pde_phys;
+static unsigned long **pde_virt;
+static unsigned long *page_phys;
+static unsigned long **page_virt;
+
+static vcpu_guest_context_t *
+ptrace_core_get_vcpu_ctxt(unsigned int nr_vcpus)
+{
+ if (nr_vcpus > max_nr_vcpus) {
+ void *new;
+
+#define REALLOC(what) \
+ new = realloc(what, nr_vcpus * sizeof(*what)); \
+ if (!new) \
+ return NULL; \
+ memset(what + max_nr_vcpus, 0, \
+ (nr_vcpus - max_nr_vcpus) * sizeof(*what)); \
+ what = new
+
+ REALLOC(cr3);
+ REALLOC(cr3_phys);
+ REALLOC(cr3_virt);
+ REALLOC(pde_phys);
+ REALLOC(pde_virt);
+ REALLOC(page_phys);
+ REALLOC(page_virt);
+
+#undef REALLOC
+ max_nr_vcpus = nr_vcpus;
+ }
+
+ return &xc_ptrace_get_vcpu_ctxt(nr_vcpus)->c;
+}
+
/* Leave the code for the old format as is. */
/* --- compatible layer for old format ------------------------------------- */
/* XXX application state */
@@ -21,7 +59,6 @@ static long nr_pages_compat = 0;
static unsigned long *p2m_array_compat = NULL;
static unsigned long *m2p_array_compat = NULL;
static unsigned long pages_offset_compat;
-static unsigned long cr3_compat[MAX_VIRT_CPUS];
/* --------------------- */
@@ -34,23 +71,15 @@ map_mtop_offset_compat(unsigned long ma)
static void *
-map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va,
- vcpu_guest_context_t *ctxt)
+map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va)
{
unsigned long pde, page;
unsigned long va = (unsigned long)guest_va;
void *v;
- static unsigned long cr3_phys[MAX_VIRT_CPUS];
- static unsigned long *cr3_virt[MAX_VIRT_CPUS];
- static unsigned long pde_phys[MAX_VIRT_CPUS];
- static unsigned long *pde_virt[MAX_VIRT_CPUS];
- static unsigned long page_phys[MAX_VIRT_CPUS];
- static unsigned long *page_virt[MAX_VIRT_CPUS];
-
- if (cr3_compat[cpu] != cr3_phys[cpu])
+ if (cr3[cpu] != cr3_phys[cpu])
{
- cr3_phys[cpu] = cr3_compat[cpu];
+ cr3_phys[cpu] = cr3[cpu];
if (cr3_virt[cpu])
munmap(cr3_virt[cpu], PAGE_SIZE);
v = mmap(
@@ -93,7 +122,7 @@ map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va,
map_mtop_offset_compat(page_phys[cpu]));
if (v == MAP_FAILED)
{
- IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n", cr3_compat[cpu], pde, page, l1_table_offset_i386(va));
+ IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page, l1_table_offset_i386(va));
page_phys[cpu] = 0;
return NULL;
}
@@ -107,11 +136,11 @@ xc_waitdomain_core_compat(
int xc_handle,
int domfd,
int *status,
- int options,
- vcpu_guest_context_t *ctxt)
+ int options)
{
int nr_vcpus;
int i;
+ vcpu_guest_context_t *ctxt;
xc_core_header_t header;
if ( nr_pages_compat == 0 )
@@ -132,12 +161,18 @@ xc_waitdomain_core_compat(
nr_vcpus = header.xch_nr_vcpus;
pages_offset_compat = header.xch_pages_offset;
+ if ((ctxt = ptrace_core_get_vcpu_ctxt(nr_vcpus)) == NULL)
+ {
+ IPRINTF("Could not allocate vcpu context array\n");
+ return -1;
+ }
+
if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
sizeof(vcpu_guest_context_t)*nr_vcpus)
return -1;
for (i = 0; i < nr_vcpus; i++)
- cr3_compat[i] = ctxt[i].ctrlreg[3];
+ cr3[i] = ctxt[i].ctrlreg[3];
if ((p2m_array_compat = malloc(nr_pages_compat * sizeof(unsigned long))) == NULL)
{
@@ -375,7 +410,6 @@ static uint64_t* pfn_array = NULL; /* for auto translated physmap mode */
static uint64_t pfn_array_size = 0;
static long nr_pages = 0;
static uint64_t pages_offset;
-static unsigned long cr3[MAX_VIRT_CPUS];
static const struct xen_dumpcore_elfnote_format_version_desc
known_format_version[] =
@@ -413,21 +447,13 @@ map_gmfn_to_offset_elf(unsigned long gmfn)
}
static void *
-map_domain_va_core_elf(unsigned long domfd, int cpu, void *guest_va,
- vcpu_guest_context_t *ctxt)
+map_domain_va_core_elf(unsigned long domfd, int cpu, void *guest_va)
{
unsigned long pde, page;
unsigned long va = (unsigned long)guest_va;
unsigned long offset;
void *v;
- static unsigned long cr3_phys[MAX_VIRT_CPUS];
- static unsigned long *cr3_virt[MAX_VIRT_CPUS];
- static unsigned long pde_phys[MAX_VIRT_CPUS];
- static unsigned long *pde_virt[MAX_VIRT_CPUS];
- static unsigned long page_phys[MAX_VIRT_CPUS];
- static unsigned long *page_virt[MAX_VIRT_CPUS];
-
if (cr3[cpu] != cr3_phys[cpu])
{
if (cr3_virt[cpu])
@@ -498,10 +524,10 @@ xc_waitdomain_core_elf(
int xc_handle,
int domfd,
int *status,
- int options,
- vcpu_guest_context_t *ctxt)
+ int options)
{
int i;
+ vcpu_guest_context_t *ctxt;
struct elf_core ecore;
struct xen_dumpcore_elfnote_none *none;
@@ -527,14 +553,13 @@ xc_waitdomain_core_elf(
if ((header->header.xch_magic != XC_CORE_MAGIC &&
header->header.xch_magic != XC_CORE_MAGIC_HVM) ||
header->header.xch_nr_vcpus == 0 ||
- header->header.xch_nr_vcpus >= MAX_VIRT_CPUS ||
header->header.xch_nr_pages == 0 ||
header->header.xch_page_size != PAGE_SIZE)
goto out;
current_is_auto_translated_physmap =
(header->header.xch_magic == XC_CORE_MAGIC_HVM);
nr_pages = header->header.xch_nr_pages;
-
+
/* .note.Xen: xen_version */
if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
XEN_ELFNOTE_DUMPCORE_XEN_VERSION,
@@ -561,6 +586,9 @@ xc_waitdomain_core_elf(
format_version->format_version.version);
}
+ if ((ctxt = ptrace_core_get_vcpu_ctxt(header->header.xch_nr_vcpus)) == NULL)
+ goto out;
+
/* .xen_prstatus: read vcpu_guest_context_t*/
if (elf_core_read_sec_by_name(&ecore, XEN_DUMPCORE_SEC_PRSTATUS,
(char*)ctxt) < 0)
@@ -621,12 +649,10 @@ out:
typedef int (*xc_waitdomain_core_t)(int xc_handle,
int domfd,
int *status,
- int options,
- vcpu_guest_context_t *ctxt);
+ int options);
typedef void *(*map_domain_va_core_t)(unsigned long domfd,
int cpu,
- void *guest_va,
- vcpu_guest_context_t *ctxt);
+ void *guest_va);
struct xc_core_format_type {
xc_waitdomain_core_t waitdomain_core;
map_domain_va_core_t map_domain_va_core;
@@ -642,25 +668,22 @@ static const struct xc_core_format_type format_type[] = {
static const struct xc_core_format_type* current_format_type = NULL;
void *
-map_domain_va_core(unsigned long domfd, int cpu, void *guest_va,
- vcpu_guest_context_any_t *ctxt)
+map_domain_va_core(unsigned long domfd, int cpu, void *guest_va)
{
if (current_format_type == NULL)
return NULL;
- return (current_format_type->map_domain_va_core)(domfd, cpu, guest_va,
- &ctxt->c);
+ return (current_format_type->map_domain_va_core)(domfd, cpu, guest_va);
}
int
-xc_waitdomain_core(int xc_handle, int domfd, int *status, int options,
- vcpu_guest_context_any_t *ctxt)
+xc_waitdomain_core(int xc_handle, int domfd, int *status, int options)
{
int ret;
int i;
for (i = 0; i < NR_FORMAT_TYPE; i++) {
ret = (format_type[i].waitdomain_core)(xc_handle, domfd, status,
- options, &ctxt->c);
+ options);
if (ret == 0) {
current_format_type = &format_type[i];
break;
diff --git a/xen/arch/ia64/xen/dom_fw_common.c b/xen/arch/ia64/xen/dom_fw_common.c
index 01adf4d110..26318e740f 100644
--- a/xen/arch/ia64/xen/dom_fw_common.c
+++ b/xen/arch/ia64/xen/dom_fw_common.c
@@ -28,6 +28,7 @@
#include "ia64/xc_dom_ia64_util.h"
#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS /* XXX */
#endif /* __XEN__ */
#include <xen/acpi.h>
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
index 33789f2b55..f69fdb820a 100644
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -2225,13 +2225,6 @@ int __init construct_dom0(struct domain *d,
for ( i = 1; i < MAX_VIRT_CPUS; i++ )
d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
- if (dom0_max_vcpus == 0)
- dom0_max_vcpus = MAX_VIRT_CPUS;
- if (dom0_max_vcpus > num_online_cpus())
- dom0_max_vcpus = num_online_cpus();
- if (dom0_max_vcpus > MAX_VIRT_CPUS)
- dom0_max_vcpus = MAX_VIRT_CPUS;
-
printk ("Dom0 max_vcpus=%d\n", dom0_max_vcpus);
for ( i = 1; i < dom0_max_vcpus; i++ )
if (alloc_vcpu(d, i, i) == NULL)
@@ -2306,6 +2299,24 @@ int __init construct_dom0(struct domain *d,
return 0;
}
+struct vcpu *__init alloc_dom0_vcpu0(void)
+{
+ if (dom0_max_vcpus == 0)
+ dom0_max_vcpus = MAX_VIRT_CPUS;
+ if (dom0_max_vcpus > num_online_cpus())
+ dom0_max_vcpus = num_online_cpus();
+ if (dom0_max_vcpus > MAX_VIRT_CPUS)
+ dom0_max_vcpus = MAX_VIRT_CPUS;
+
+ dom0->vcpu = xmalloc_array(struct vcpu *, dom0_max_vcpus);
+ if ( !dom0->vcpu )
+ return NULL;
+ memset(dom0->vcpu, 0, dom0_max_vcpus * sizeof(*dom0->vcpu));
+ dom0->max_vcpus = dom0_max_vcpus;
+
+ return alloc_vcpu(dom0, 0, 0);
+}
+
void machine_restart(unsigned int delay_millisecs)
{
mdelay(delay_millisecs);
diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c
index 61b1d11e1c..2d17de3ce5 100644
--- a/xen/arch/ia64/xen/xensetup.c
+++ b/xen/arch/ia64/xen/xensetup.c
@@ -570,7 +570,11 @@ skip_move:
scheduler_init();
idle_vcpu[0] = (struct vcpu*) ia64_r13;
idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
- if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
+ if ( idle_domain == NULL )
+ BUG();
+ idle_domain->vcpu = idle_vcpu;
+ idle_domain->max_vcpus = NR_CPUS;
+ if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
BUG();
alloc_dom_xen_and_dom_io();
@@ -657,7 +661,7 @@ printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
if (dom0 == NULL)
panic("Error creating domain 0\n");
domain_set_vhpt_size(dom0, dom0_vhpt_size_log2);
- dom0_vcpu0 = alloc_vcpu(dom0, 0, 0);
+ dom0_vcpu0 = alloc_dom0_vcpu0();
if (dom0_vcpu0 == NULL || vcpu_late_initialise(dom0_vcpu0) != 0)
panic("Cannot allocate dom0 vcpu 0\n");
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 46b573164e..201c9eb457 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -263,7 +263,7 @@ int switch_native(struct domain *d)
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
+ for ( vcpuid = 0; vcpuid < d->max_vcpus; vcpuid++ )
{
if (d->vcpu[vcpuid])
release_compat_l4(d->vcpu[vcpuid]);
@@ -285,7 +285,7 @@ int switch_compat(struct domain *d)
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
- for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
+ for ( vcpuid = 0; vcpuid < d->max_vcpus; vcpuid++ )
{
if ( (d->vcpu[vcpuid] != NULL) &&
(setup_compat_l4(d->vcpu[vcpuid]) != 0) )
@@ -423,12 +423,13 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
#else /* __x86_64__ */
- d->arch.mm_perdomain_pt_pages = xmalloc_array(struct page_info *,
- PDPT_L2_ENTRIES);
- if ( !d->arch.mm_perdomain_pt_pages )
+ BUILD_BUG_ON(PDPT_L2_ENTRIES * sizeof(*d->arch.mm_perdomain_pt_pages)
+ != PAGE_SIZE);
+ pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ if ( !pg )
goto fail;
- memset(d->arch.mm_perdomain_pt_pages, 0,
- PDPT_L2_ENTRIES * sizeof(*d->arch.mm_perdomain_pt_pages));
+ d->arch.mm_perdomain_pt_pages = page_to_virt(pg);
+ clear_page(d->arch.mm_perdomain_pt_pages);
pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
if ( pg == NULL )
@@ -523,7 +524,8 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
if ( d->arch.mm_perdomain_l3 )
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
- xfree(d->arch.mm_perdomain_pt_pages);
+ if ( d->arch.mm_perdomain_pt_pages )
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_pt_pages));
#else
free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
#endif
@@ -556,7 +558,7 @@ void arch_domain_destroy(struct domain *d)
if ( perdomain_pt_page(d, i) )
free_domheap_page(perdomain_pt_page(d, i));
}
- xfree(d->arch.mm_perdomain_pt_pages);
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_pt_pages));
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
#endif
@@ -872,7 +874,13 @@ map_vcpu_info(struct vcpu *v, unsigned long mfn, unsigned offset)
new_info = (vcpu_info_t *)(mapping + offset);
- memcpy(new_info, v->vcpu_info, sizeof(*new_info));
+ if ( v->vcpu_info )
+ memcpy(new_info, v->vcpu_info, sizeof(*new_info));
+ else
+ {
+ memset(new_info, 0, sizeof(*new_info));
+ __vcpu_info(v, new_info, evtchn_upcall_mask) = 1;
+ }
v->vcpu_info = new_info;
v->arch.vcpu_info_mfn = mfn;
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 8b14e77fe9..91f2f91ee3 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -82,9 +82,25 @@ static void __init parse_dom0_mem(const char *s)
}
custom_param("dom0_mem", parse_dom0_mem);
-static unsigned int opt_dom0_max_vcpus;
+static unsigned int __initdata opt_dom0_max_vcpus;
integer_param("dom0_max_vcpus", opt_dom0_max_vcpus);
+struct vcpu *__init alloc_dom0_vcpu0(void)
+{
+ if ( opt_dom0_max_vcpus == 0 )
+ opt_dom0_max_vcpus = num_online_cpus();
+ if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
+ opt_dom0_max_vcpus = MAX_VIRT_CPUS;
+
+ dom0->vcpu = xmalloc_array(struct vcpu *, opt_dom0_max_vcpus);
+ if ( !dom0->vcpu )
+ return NULL;
+ memset(dom0->vcpu, 0, opt_dom0_max_vcpus * sizeof(*dom0->vcpu));
+ dom0->max_vcpus = opt_dom0_max_vcpus;
+
+ return alloc_vcpu(dom0, 0, 0);
+}
+
static unsigned int opt_dom0_shadow;
boolean_param("dom0_shadow", opt_dom0_shadow);
@@ -701,13 +717,9 @@ int __init construct_dom0(
#endif /* __x86_64__ */
/* Mask all upcalls... */
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
- if ( opt_dom0_max_vcpus == 0 )
- opt_dom0_max_vcpus = num_online_cpus();
- if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
- opt_dom0_max_vcpus = MAX_VIRT_CPUS;
printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
for ( i = 1; i < opt_dom0_max_vcpus; i++ )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 955c5e6088..6b36dd9f01 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -574,7 +574,8 @@ long arch_do_domctl(
goto sendtrigger_out;
ret = -ESRCH;
- if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
+ if ( domctl->u.sendtrigger.vcpu >= d->max_vcpus ||
+ (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
goto sendtrigger_out;
switch ( domctl->u.sendtrigger.trigger )
@@ -963,7 +964,7 @@ long arch_do_domctl(
goto ext_vcpucontext_out;
ret = -ESRCH;
- if ( (evc->vcpu >= MAX_VIRT_CPUS) ||
+ if ( (evc->vcpu >= d->max_vcpus) ||
((v = d->vcpu[evc->vcpu]) == NULL) )
goto ext_vcpucontext_out;
@@ -1085,7 +1086,7 @@ long arch_do_domctl(
break;
ret = -EINVAL;
- if ( (domctl->u.debug_op.vcpu >= MAX_VIRT_CPUS) ||
+ if ( (domctl->u.debug_op.vcpu >= d->max_vcpus) ||
((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) )
goto debug_op_out;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index aa85502ebf..e584e0c00b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -367,7 +367,7 @@ void hvm_domain_relinquish_resources(struct domain *d)
/* Stop all asynchronous timer actions. */
rtc_deinit(d);
- if ( d->vcpu[0] != NULL )
+ if ( d->vcpu != NULL && d->vcpu[0] != NULL )
{
pit_deinit(d);
pmtimer_deinit(d);
@@ -507,7 +507,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
/* Which vcpu is this? */
vcpuid = hvm_load_instance(h);
- if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
return -EINVAL;
@@ -2285,7 +2285,7 @@ static void hvm_s3_suspend(struct domain *d)
domain_pause(d);
domain_lock(d);
- if ( d->is_dying || (d->vcpu[0] == NULL) ||
+ if ( d->is_dying || (d->vcpu == NULL) || (d->vcpu[0] == NULL) ||
test_and_set_bool(d->arch.hvm_domain.is_s3_suspended) )
{
domain_unlock(d);
@@ -2660,7 +2660,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
goto param_fail2;
rc = -EINVAL;
- if ( d->vcpu[0] == NULL )
+ if ( d->vcpu == NULL || d->vcpu[0] == NULL )
goto param_fail2;
if ( shadow_mode_enabled(d) )
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 2553f7c9a5..58e9cef2ce 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -676,7 +676,7 @@ static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
struct hvm_hw_mtrr hw_mtrr;
vcpuid = hvm_load_instance(h);
- if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
return -EINVAL;
@@ -720,7 +720,8 @@ uint8_t epte_get_entry_emt(
*igmt = 0;
- if ( (current->domain != d) && ((v = d->vcpu[0]) == NULL) )
+ if ( (current->domain != d) &&
+ ((d->vcpu == NULL) || ((v = d->vcpu[0]) == NULL)) )
return MTRR_TYPE_WRBACK;
if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 7f63699ab2..4860b74941 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -339,7 +339,8 @@ static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)
/* Force round-robin to pick VCPU 0 */
if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
{
- v = vioapic_domain(vioapic)->vcpu[0];
+ v = vioapic_domain(vioapic)->vcpu ?
+ vioapic_domain(vioapic)->vcpu[0] : NULL;
target = v ? vcpu_vlapic(v) : NULL;
}
else
@@ -367,12 +368,14 @@ static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)
if ( !(deliver_bitmask & (1 << bit)) )
continue;
deliver_bitmask &= ~(1 << bit);
+ if ( vioapic_domain(vioapic)->vcpu == NULL )
+ v = NULL;
#ifdef IRQ0_SPECIAL_ROUTING
/* Do not deliver timer interrupts to VCPU != 0 */
- if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
+ else if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
v = vioapic_domain(vioapic)->vcpu[0];
- else
#endif
+ else
v = vioapic_domain(vioapic)->vcpu[bit];
if ( v != NULL )
{
@@ -392,7 +395,8 @@ static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq)
if ( !(deliver_bitmask & (1 << bit)) )
continue;
deliver_bitmask &= ~(1 << bit);
- if ( ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) &&
+ if ( (vioapic_domain(vioapic)->vcpu != NULL) &&
+ ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) &&
!test_and_set_bool(v->nmi_pending) )
vcpu_kick(v);
}
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 6e30a2e2af..fd353c9342 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -384,7 +384,7 @@ struct vlapic *apic_lowest_prio(struct domain *d, uint32_t bitmap)
struct vlapic *vlapic, *target = NULL;
struct vcpu *v;
- if ( unlikely((v = d->vcpu[old]) == NULL) )
+ if ( unlikely(!d->vcpu) || unlikely((v = d->vcpu[old]) == NULL) )
return NULL;
do {
@@ -913,7 +913,7 @@ static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
/* Which vlapic to load? */
vcpuid = hvm_load_instance(h);
- if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
return -EINVAL;
@@ -936,7 +936,7 @@ static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
/* Which vlapic to load? */
vcpuid = hvm_load_instance(h);
- if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
return -EINVAL;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 322652cf2d..adab2a93e8 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1221,7 +1221,7 @@ static void __ept_sync_domain(void *info)
void ept_sync_domain(struct domain *d)
{
/* Only if using EPT and this domain has some VCPUs to dirty. */
- if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] )
+ if ( d->arch.hvm_domain.hap_enabled && d->vcpu && d->vcpu[0] )
{
ASSERT(local_irq_is_enabled());
on_each_cpu(__ept_sync_domain, d, 1);
@@ -1399,7 +1399,7 @@ static struct hvm_function_table vmx_function_table = {
};
static unsigned long *vpid_bitmap;
-#define VPID_BITMAP_SIZE ((1u << VMCS_VPID_WIDTH) / MAX_VIRT_CPUS)
+#define VPID_BITMAP_SIZE ((1u << VMCS_VPID_WIDTH) / XEN_LEGACY_MAX_VCPUS)
void start_vmx(void)
{
@@ -1921,7 +1921,7 @@ static int vmx_alloc_vpid(struct domain *d)
}
while ( test_and_set_bit(idx, vpid_bitmap) );
- d->arch.hvm_domain.vmx.vpid_base = idx * MAX_VIRT_CPUS;
+ d->arch.hvm_domain.vmx.vpid_base = idx * XEN_LEGACY_MAX_VCPUS;
return 0;
}
@@ -1930,7 +1930,8 @@ static void vmx_free_vpid(struct domain *d)
if ( !cpu_has_vmx_vpid )
return;
- clear_bit(d->arch.hvm_domain.vmx.vpid_base / MAX_VIRT_CPUS, vpid_bitmap);
+ clear_bit(d->arch.hvm_domain.vmx.vpid_base / XEN_LEGACY_MAX_VCPUS,
+ vpid_bitmap);
}
static void vmx_install_vlapic_mapping(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index cf43f1e544..5d320d4f82 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -110,7 +110,9 @@ static void vpic_update_int_output(struct hvm_hw_vpic *vpic)
if ( vpic->is_master )
{
/* Master INT line is connected to VCPU0's VLAPIC LVT0. */
- struct vcpu *v = vpic_domain(vpic)->vcpu[0];
+ struct vcpu *v = vpic_domain(vpic)->vcpu ?
+ vpic_domain(vpic)->vcpu[0] : NULL;
+
if ( (v != NULL) && vlapic_accept_pic_intr(v) )
vcpu_kick(v);
}
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 36462a334f..b8d71f59cf 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1336,7 +1336,7 @@ static int alloc_l3_table(struct page_info *page, int preemptible)
*/
if ( (pfn >= 0x100000) &&
unlikely(!VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3)) &&
- d->vcpu[0] && d->vcpu[0]->is_initialised )
+ d->vcpu && d->vcpu[0] && d->vcpu[0]->is_initialised )
{
MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn);
return -EINVAL;
@@ -2575,7 +2575,7 @@ static inline int vcpumask_to_pcpumask(
for ( vmask = 0, offs = 0; ; ++offs)
{
vcpu_bias = offs * (is_native ? BITS_PER_LONG : 32);
- if ( vcpu_bias >= MAX_VIRT_CPUS )
+ if ( vcpu_bias >= d->max_vcpus )
return 0;
if ( unlikely(is_native ?
@@ -2592,7 +2592,7 @@ static inline int vcpumask_to_pcpumask(
vcpu_id = find_first_set_bit(vmask);
vmask &= ~(1UL << vcpu_id);
vcpu_id += vcpu_bias;
- if ( (vcpu_id >= MAX_VIRT_CPUS) )
+ if ( (vcpu_id >= d->max_vcpus) )
return 0;
if ( ((v = d->vcpu[vcpu_id]) != NULL) )
cpus_or(*pmask, *pmask, v->vcpu_dirty_cpumask);
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 38f120212c..0a5daa9781 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -684,7 +684,7 @@ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
return 0;
}
- if ( unlikely(d->vcpu[0] == NULL) )
+ if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
{
PAGING_ERROR("Paging op on a domain (%u) with no vcpus\n",
d->domain_id);
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2ffce4d0d5..2ada90cff9 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1452,7 +1452,7 @@ static void shadow_blow_tables(struct domain *d)
void shadow_blow_tables_per_domain(struct domain *d)
{
- if ( shadow_mode_enabled(d) && d->vcpu[0] != NULL ) {
+ if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL ) {
shadow_lock(d);
shadow_blow_tables(d);
shadow_unlock(d);
@@ -1470,7 +1470,7 @@ static void shadow_blow_all_tables(unsigned char c)
rcu_read_lock(&domlist_read_lock);
for_each_domain(d)
{
- if ( shadow_mode_enabled(d) && d->vcpu[0] != NULL )
+ if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL )
{
shadow_lock(d);
shadow_blow_tables(d);
diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index 8a1f056baf..9db8999965 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -463,7 +463,8 @@ static void do_nmi_stats(unsigned char key)
for_each_cpu ( i )
printk("%3d\t%3d\n", i, nmi_count(i));
- if ( ((d = dom0) == NULL) || ((v = d->vcpu[0]) == NULL) )
+ if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
+ ((v = d->vcpu[0]) == NULL) )
return;
if ( v->nmi_pending || (v->trap_priority >= VCPU_TRAP_NMI) )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 026996ec09..a35dda5ae2 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -234,11 +234,15 @@ static void __init init_idle_domain(void)
scheduler_init();
idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
- if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
+ if ( idle_domain == NULL )
+ BUG();
+ idle_domain->vcpu = idle_vcpu;
+ idle_domain->max_vcpus = NR_CPUS;
+ if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
BUG();
- set_current(idle_domain->vcpu[0]);
- idle_vcpu[0] = this_cpu(curr_vcpu) = current;
+ set_current(idle_vcpu[0]);
+ this_cpu(curr_vcpu) = current;
setup_idle_pagetable();
}
@@ -998,7 +1002,7 @@ void __init __start_xen(unsigned long mbi_p)
/* Create initial domain 0. */
dom0 = domain_create(0, DOMCRF_s3_integrity, DOM0_SSIDREF);
- if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
+ if ( (dom0 == NULL) || (alloc_dom0_vcpu0() == NULL) )
panic("Error creating domain 0\n");
dom0->is_privileged = 1;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 20e7356cd1..f39d371699 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2868,7 +2868,7 @@ static void nmi_dom0_report(unsigned int reason_idx)
{
struct domain *d = dom0;
- if ( (d == NULL) || (d->vcpu[0] == NULL) )
+ if ( (d == NULL) || (d->vcpu == NULL) || (d->vcpu[0] == NULL) )
return;
set_bit(reason_idx, nmi_reason(d));
@@ -3205,7 +3205,7 @@ int guest_has_trap_callback(struct domain *d, uint16_t vcpuid, unsigned int trap
struct trap_info *t;
BUG_ON(d == NULL);
- BUG_ON(vcpuid >= MAX_VIRT_CPUS);
+ BUG_ON(vcpuid >= d->max_vcpus);
/* Sanity check - XXX should be more fine grained. */
BUG_ON(trap_nr > TRAP_syscall);
@@ -3223,7 +3223,7 @@ int send_guest_trap(struct domain *d, uint16_t vcpuid, unsigned int trap_nr)
struct softirq_trap *st;
BUG_ON(d == NULL);
- BUG_ON(vcpuid >= MAX_VIRT_CPUS);
+ BUG_ON(vcpuid >= d->max_vcpus);
v = d->vcpu[vcpuid];
switch (trap_nr) {
diff --git a/xen/common/compat/domain.c b/xen/common/compat/domain.c
index cf2a13b4e9..48173decc6 100644
--- a/xen/common/compat/domain.c
+++ b/xen/common/compat/domain.c
@@ -24,7 +24,7 @@ int compat_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
return -EINVAL;
- if ( (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
return -ENOENT;
switch ( cmd )
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 0bce563331..3b21006d36 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -134,7 +134,7 @@ struct vcpu *alloc_vcpu(
{
struct vcpu *v;
- BUG_ON(d->vcpu[vcpu_id] != NULL);
+ BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]);
if ( (v = alloc_vcpu_struct()) == NULL )
return NULL;
@@ -153,7 +153,8 @@ struct vcpu *alloc_vcpu(
v->runstate.state = RUNSTATE_offline;
v->runstate.state_entry_time = NOW();
set_bit(_VPF_down, &v->pause_flags);
- v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
+ if ( vcpu_id < XEN_LEGACY_MAX_VCPUS )
+ v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
}
if ( sched_init_vcpu(v, cpu_id) != 0 )
@@ -181,22 +182,8 @@ struct vcpu *alloc_vcpu(
struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
{
- struct domain *d;
- struct vcpu *v;
- unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
-
- if ( (v = idle_vcpu[cpu_id]) != NULL )
- return v;
-
- d = (vcpu_id == 0) ?
- domain_create(IDLE_DOMAIN_ID, 0, 0) :
- idle_vcpu[cpu_id - vcpu_id]->domain;
- BUG_ON(d == NULL);
-
- v = alloc_vcpu(d, vcpu_id, cpu_id);
- idle_vcpu[cpu_id] = v;
-
- return v;
+ return idle_vcpu[cpu_id] ?: alloc_vcpu(idle_vcpu[0]->domain,
+ cpu_id, cpu_id);
}
static unsigned int extra_dom0_irqs, extra_domU_irqs = 8;
@@ -575,7 +562,7 @@ static void complete_domain_destroy(struct rcu_head *head)
struct vcpu *v;
int i;
- for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
+ for ( i = d->max_vcpus - 1; i >= 0; i-- )
{
if ( (v = d->vcpu[i]) == NULL )
continue;
@@ -594,7 +581,7 @@ static void complete_domain_destroy(struct rcu_head *head)
/* Free page used by xen oprofile buffer. */
free_xenoprof_pages(d);
- for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
+ for ( i = d->max_vcpus - 1; i >= 0; i-- )
if ( (v = d->vcpu[i]) != NULL )
free_vcpu_struct(v);
@@ -742,12 +729,15 @@ long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
return -EINVAL;
- if ( (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
return -ENOENT;
switch ( cmd )
{
case VCPUOP_initialise:
+ if ( !v->vcpu_info )
+ return -EINVAL;
+
if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
return -ENOMEM;
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 29e3885de2..47a3a09e3f 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -253,7 +253,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
ret = -EINVAL;
if ( (d == current->domain) || /* no domain_pause() */
- (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
+ (vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) )
goto svc_out;
if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
@@ -433,7 +433,8 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
ret = -EINVAL;
if ( (d == current->domain) || /* no domain_pause() */
- (max > MAX_VIRT_CPUS) )
+ (max > MAX_VIRT_CPUS) ||
+ (is_hvm_domain(d) && max > XEN_LEGACY_MAX_VCPUS) )
{
rcu_unlock_domain(d);
break;
@@ -446,15 +447,40 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
break;
}
+ /* Until Xenoprof can dynamically grow its vcpu-s array... */
+ if ( d->xenoprof )
+ {
+ rcu_unlock_domain(d);
+ ret = -EAGAIN;
+ break;
+ }
+
/* Needed, for example, to ensure writable p.t. state is synced. */
domain_pause(d);
/* We cannot reduce maximum VCPUs. */
ret = -EINVAL;
- if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
+ if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) )
goto maxvcpu_out;
ret = -ENOMEM;
+ if ( max > d->max_vcpus )
+ {
+ struct vcpu **vcpus = xmalloc_array(struct vcpu *, max);
+ void *ptr;
+
+ if ( !vcpus )
+ goto maxvcpu_out;
+ memcpy(vcpus, d->vcpu, d->max_vcpus * sizeof(*vcpus));
+ memset(vcpus + d->max_vcpus, 0,
+ (max - d->max_vcpus) * sizeof(*vcpus));
+
+ ptr = d->vcpu;
+ d->vcpu = vcpus;
+ wmb();
+ d->max_vcpus = max;
+ xfree(ptr);
+ }
for ( i = 0; i < max; i++ )
{
if ( d->vcpu[i] != NULL )
@@ -505,7 +531,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
goto vcpuaffinity_out;
ret = -EINVAL;
- if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
+ if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
goto vcpuaffinity_out;
ret = -ESRCH;
@@ -599,7 +625,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
goto getvcpucontext_out;
ret = -EINVAL;
- if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
+ if ( op->u.vcpucontext.vcpu >= d->max_vcpus )
goto getvcpucontext_out;
ret = -ESRCH;
@@ -661,7 +687,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
goto getvcpuinfo_out;
ret = -EINVAL;
- if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
+ if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus )
goto getvcpuinfo_out;
ret = -ESRCH;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index c51fd10031..ccc709111f 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -240,10 +240,13 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
if ( virq_is_global(virq) && (vcpu != 0) )
return -EINVAL;
- if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
+ if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
((v = d->vcpu[vcpu]) == NULL) )
return -ENOENT;
+ if ( unlikely(!v->vcpu_info) )
+ return -EAGAIN;
+
spin_lock(&d->event_lock);
if ( v->virq_to_evtchn[virq] != 0 )
@@ -273,10 +276,13 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
int port, vcpu = bind->vcpu;
long rc = 0;
- if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
+ if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
(d->vcpu[vcpu] == NULL) )
return -ENOENT;
+ if ( unlikely(!d->vcpu[vcpu]->vcpu_info) )
+ return -EAGAIN;
+
spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
@@ -555,13 +561,13 @@ static int evtchn_set_pending(struct vcpu *v, int port)
}
/* Check if some VCPU might be polling for this event. */
- if ( likely(bitmap_empty(d->poll_mask, MAX_VIRT_CPUS)) )
+ if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
return 0;
/* Wake any interested (or potentially interested) pollers. */
- for ( vcpuid = find_first_bit(d->poll_mask, MAX_VIRT_CPUS);
- vcpuid < MAX_VIRT_CPUS;
- vcpuid = find_next_bit(d->poll_mask, MAX_VIRT_CPUS, vcpuid+1) )
+ for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
+ vcpuid < d->max_vcpus;
+ vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
{
v = d->vcpu[vcpuid];
if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
@@ -608,7 +614,7 @@ void send_guest_global_virq(struct domain *d, int virq)
ASSERT(virq_is_global(virq));
- if ( unlikely(d == NULL) )
+ if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
return;
v = d->vcpu[0];
@@ -717,9 +723,12 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
struct evtchn *chn;
long rc = 0;
- if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
+ if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
return -ENOENT;
+ if ( unlikely(!d->vcpu[vcpu_id]->vcpu_info) )
+ return -EAGAIN;
+
spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
@@ -943,6 +952,9 @@ int alloc_unbound_xen_event_channel(
struct domain *d = local_vcpu->domain;
int port;
+ if ( unlikely(!local_vcpu->vcpu_info) )
+ return -EAGAIN;
+
spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
@@ -1016,6 +1028,14 @@ int evtchn_init(struct domain *d)
if ( get_free_port(d) != 0 )
return -EINVAL;
evtchn_from_port(d, 0)->state = ECS_RESERVED;
+
+#if MAX_VIRT_CPUS > BITS_PER_LONG
+ d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
+ if ( !d->poll_mask )
+ return -ENOMEM;
+ bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
+#endif
+
return 0;
}
@@ -1044,6 +1064,11 @@ void evtchn_destroy(struct domain *d)
d->evtchn[i] = NULL;
}
spin_unlock(&d->event_lock);
+
+#if MAX_VIRT_CPUS > BITS_PER_LONG
+ xfree(d->poll_mask);
+ d->poll_mask = NULL;
+#endif
}
static void domain_dump_evtchn_info(struct domain *d)
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index a1fecf23b0..7c703770be 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -209,8 +209,8 @@ static void dump_domains(unsigned char key)
v->vcpu_id, v->processor,
v->is_running ? 'T':'F',
v->pause_flags, v->poll_evtchn,
- vcpu_info(v, evtchn_upcall_pending),
- vcpu_info(v, evtchn_upcall_mask));
+ v->vcpu_info ? vcpu_info(v, evtchn_upcall_pending) : 0,
+ v->vcpu_info ? vcpu_info(v, evtchn_upcall_mask) : 1);
cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
printk("dirty_cpus=%s ", tmpstr);
cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity);
@@ -218,6 +218,8 @@ static void dump_domains(unsigned char key)
arch_dump_vcpu_info(v);
periodic_timer_print(tmpstr, sizeof(tmpstr), v->periodic_period);
printk(" %s\n", tmpstr);
+ if ( !v->vcpu_info )
+ continue;
printk(" Notifying guest (virq %d, port %d, stat %d/%d/%d)\n",
VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 4bf3c80a89..25ea61584e 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -1376,6 +1376,9 @@ static int sedf_adjust(struct domain *p, struct xen_domctl_scheduler_op *op)
p->domain_id, op->u.sedf.period, op->u.sedf.slice,
op->u.sedf.latency, (op->u.sedf.extratime)?"yes":"no");
+ if ( !p->vcpu )
+ return -EINVAL;
+
if ( op->cmd == XEN_DOMCTL_SCHEDOP_putinfo )
{
/* Check for sane parameters. */
diff --git a/xen/common/xenoprof.c b/xen/common/xenoprof.c
index 0a33613caa..84e3437fa9 100644
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -120,7 +120,7 @@ static void xenoprof_reset_buf(struct domain *d)
return;
}
- for ( j = 0; j < MAX_VIRT_CPUS; j++ )
+ for ( j = 0; j < d->max_vcpus; j++ )
{
buf = d->xenoprof->vcpu[j].buffer;
if ( buf != NULL )
@@ -201,6 +201,17 @@ static int alloc_xenoprof_struct(
memset(d->xenoprof, 0, sizeof(*d->xenoprof));
+ d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
+ if ( d->xenoprof->vcpu == NULL )
+ {
+ xfree(d->xenoprof);
+ d->xenoprof = NULL;
+ printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
+ return -ENOMEM;
+ }
+
+ memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));
+
nvcpu = 0;
for_each_vcpu ( d, v )
nvcpu++;
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
index 78ba3cb602..0d24feb19d 100644
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -31,6 +31,7 @@
#else
#define NR_CPUS 64
#endif
+#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
#define CONFIG_NUMA
#define CONFIG_ACPI_NUMA
#define NODES_SHIFT 8 /* linux/asm/numnodes.h */
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index 2246145cd8..c10ce2905a 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -52,9 +52,13 @@
#define NR_CPUS 32
#endif
-#if defined(__i386__) && (NR_CPUS > 32)
+#ifdef __i386__
+#if NR_CPUS > 32
#error "Maximum of 32 physical processors supported by Xen on x86_32"
#endif
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
+#endif
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
# define supervisor_mode_kernel (1)
@@ -203,7 +207,7 @@ extern unsigned int video_mode, video_flags;
/* Slot 260: per-domain mappings. */
#define PERDOMAIN_VIRT_START (PML4_ADDR(260))
#define PERDOMAIN_VIRT_END (PERDOMAIN_VIRT_START + (PERDOMAIN_MBYTES<<20))
-#define PERDOMAIN_MBYTES ((unsigned long)GDT_LDT_MBYTES)
+#define PERDOMAIN_MBYTES (PML4_ENTRY_BYTES >> (20 + PAGETABLE_ORDER))
/* Slot 261: machine-to-phys conversion table (16GB). */
#define RDWR_MPT_VIRT_START (PML4_ADDR(261))
#define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + (16UL<<30))
@@ -242,6 +246,8 @@ extern unsigned int video_mode, video_flags;
#define COMPAT_L2_PAGETABLE_XEN_SLOTS(d) \
(COMPAT_L2_PAGETABLE_LAST_XEN_SLOT - COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d) + 1)
+#define COMPAT_LEGACY_MAX_VCPUS XEN_LEGACY_MAX_VCPUS
+
#endif
#define PGT_base_page_table PGT_l4_page_table
@@ -347,7 +353,12 @@ extern unsigned long xenheap_phys_end;
/* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */
#define GDT_LDT_VCPU_SHIFT 5
#define GDT_LDT_VCPU_VA_SHIFT (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT)
+#ifdef MAX_VIRT_CPUS
#define GDT_LDT_MBYTES (MAX_VIRT_CPUS >> (20-GDT_LDT_VCPU_VA_SHIFT))
+#else
+#define GDT_LDT_MBYTES PERDOMAIN_MBYTES
+#define MAX_VIRT_CPUS (GDT_LDT_MBYTES << (20-GDT_LDT_VCPU_VA_SHIFT))
+#endif
#define GDT_LDT_VIRT_START PERDOMAIN_VIRT_START
#define GDT_LDT_VIRT_END (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20))
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index f26c5e83a7..a83772e85f 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -332,7 +332,7 @@ static inline void paging_write_p2m_entry(struct domain *d, unsigned long gfn,
{
struct vcpu *v = current;
if ( v->domain != d )
- v = d->vcpu[0];
+ v = d->vcpu ? d->vcpu[0] : NULL;
if ( likely(v && paging_mode_enabled(d) && v->arch.paging.mode != NULL) )
{
return v->arch.paging.mode->write_p2m_entry(v, gfn, p, table_mfn,
diff --git a/xen/include/asm-x86/shared.h b/xen/include/asm-x86/shared.h
index ceca564d62..d3584c0ac3 100644
--- a/xen/include/asm-x86/shared.h
+++ b/xen/include/asm-x86/shared.h
@@ -26,6 +26,8 @@ static inline void arch_set_##field(struct domain *d, \
#define GET_SET_VCPU(type, field) \
static inline type arch_get_##field(const struct vcpu *v) \
{ \
+ if ( unlikely(!v->vcpu_info) ) \
+ return 0; \
return !has_32bit_shinfo(v->domain) ? \
v->vcpu_info->native.arch.field : \
v->vcpu_info->compat.arch.field; \
@@ -57,7 +59,7 @@ static inline void arch_set_##field(struct domain *d, \
#define GET_SET_VCPU(type, field) \
static inline type arch_get_##field(const struct vcpu *v) \
{ \
- return v->vcpu_info->arch.field; \
+ return v->vcpu_info ? v->vcpu_info->arch.field : 0; \
} \
static inline void arch_set_##field(struct vcpu *v, \
type val) \
diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h
index f058df104b..8333a78fdc 100644
--- a/xen/include/public/arch-ia64.h
+++ b/xen/include/public/arch-ia64.h
@@ -66,7 +66,7 @@ typedef unsigned long xen_pfn_t;
/* Maximum number of virtual CPUs in multi-processor guests. */
/* WARNING: before changing this, check that shared_info fits on a page */
-#define MAX_VIRT_CPUS 64
+#define XEN_LEGACY_MAX_VCPUS 64
/* IO ports location for PV. */
#define IO_PORTS_PADDR 0x00000ffffc000000UL
diff --git a/xen/include/public/arch-x86/xen.h b/xen/include/public/arch-x86/xen.h
index 5f7579aab1..b08ac7a898 100644
--- a/xen/include/public/arch-x86/xen.h
+++ b/xen/include/public/arch-x86/xen.h
@@ -73,8 +73,8 @@ typedef unsigned long xen_pfn_t;
#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
-/* Maximum number of virtual CPUs in multi-processor guests. */
-#define MAX_VIRT_CPUS 32
+/* Maximum number of virtual CPUs in legacy multi-processor guests. */
+#define XEN_LEGACY_MAX_VCPUS 32
#ifndef __ASSEMBLY__
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index 72aa667430..af355ed678 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -458,7 +458,7 @@ typedef struct vcpu_info vcpu_info_t;
* of this structure remaining constant.
*/
struct shared_info {
- struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
+ struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
/*
* A domain can create "event channels" on which it can send and receive
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 282e5665d0..ff7a0ce4d6 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -14,6 +14,7 @@ struct vcpu *alloc_vcpu(
int boot_vcpu(
struct domain *d, int vcpuid, vcpu_guest_context_u ctxt);
struct vcpu *alloc_idle_vcpu(unsigned int cpu_id);
+struct vcpu *alloc_dom0_vcpu0(void);
void vcpu_reset(struct vcpu *v);
struct xen_domctl_getdomaininfo;
diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
index 80aa3586be..2be38349b5 100644
--- a/xen/include/xen/numa.h
+++ b/xen/include/xen/numa.h
@@ -15,6 +15,7 @@
#define vcpu_to_node(v) (cpu_to_node((v)->processor))
#define domain_to_node(d) \
- (((d)->vcpu[0] != NULL) ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
+ (((d)->vcpu != NULL && (d)->vcpu[0] != NULL) \
+ ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
#endif /* _XEN_NUMA_H */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 953d3f06c0..be15f3307f 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -180,6 +180,8 @@ struct domain
unsigned int max_pages; /* maximum value for tot_pages */
unsigned int xenheap_pages; /* # pages allocated from Xen heap */
+ unsigned int max_vcpus;
+
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
@@ -226,7 +228,11 @@ struct domain
bool_t is_pinned;
/* Are any VCPUs polling event channels (SCHEDOP_poll)? */
+#if MAX_VIRT_CPUS <= BITS_PER_LONG
DECLARE_BITMAP(poll_mask, MAX_VIRT_CPUS);
+#else
+ unsigned long *poll_mask;
+#endif
/* Guest has shut down (inc. reason code)? */
spinlock_t shutdown_lock;
@@ -244,7 +250,7 @@ struct domain
atomic_t refcnt;
- struct vcpu *vcpu[MAX_VIRT_CPUS];
+ struct vcpu **vcpu;
/* Bitmask of CPUs which are holding onto this domain's state. */
cpumask_t domain_dirty_cpumask;
@@ -497,7 +503,7 @@ extern struct domain *domain_list;
(_d) = rcu_dereference((_d)->next_in_list )) \
#define for_each_vcpu(_d,_v) \
- for ( (_v) = (_d)->vcpu[0]; \
+ for ( (_v) = (_d)->vcpu ? (_d)->vcpu[0] : NULL; \
(_v) != NULL; \
(_v) = (_v)->next_in_list )
diff --git a/xen/include/xen/shared.h b/xen/include/xen/shared.h
index 9738a49621..3bbf133294 100644
--- a/xen/include/xen/shared.h
+++ b/xen/include/xen/shared.h
@@ -21,8 +21,6 @@ typedef union {
(*(!has_32bit_shinfo(d) ? \
(typeof(&(s)->compat.field))&(s)->native.field : \
(typeof(&(s)->compat.field))&(s)->compat.field))
-#define shared_info(d, field) \
- __shared_info(d, (d)->shared_info, field)
typedef union {
struct vcpu_info native;
@@ -30,19 +28,22 @@ typedef union {
} vcpu_info_t;
/* As above, cast to compat field type. */
-#define vcpu_info(v, field) \
- (*(!has_32bit_shinfo((v)->domain) ? \
- (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->native.field : \
- (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->compat.field))
+#define __vcpu_info(v, i, field) \
+ (*(!has_32bit_shinfo((v)->domain) ? \
+ (typeof(&(i)->compat.field))&(i)->native.field : \
+ (typeof(&(i)->compat.field))&(i)->compat.field))
#else
typedef struct shared_info shared_info_t;
-#define shared_info(d, field) ((d)->shared_info->field)
+#define __shared_info(d, s, field) ((s)->field)
typedef struct vcpu_info vcpu_info_t;
-#define vcpu_info(v, field) ((v)->vcpu_info->field)
+#define __vcpu_info(v, i, field) ((i)->field)
#endif
+#define shared_info(d, field) __shared_info(d, (d)->shared_info, field)
+#define vcpu_info(v, field) __vcpu_info(v, (v)->vcpu_info, field)
+
#endif /* __XEN_SHARED_H__ */
diff --git a/xen/include/xen/xenoprof.h b/xen/include/xen/xenoprof.h
index 525dbdedfa..40ab1cc386 100644
--- a/xen/include/xen/xenoprof.h
+++ b/xen/include/xen/xenoprof.h
@@ -50,7 +50,7 @@ struct xenoprof {
#ifdef CONFIG_COMPAT
int is_compat;
#endif
- struct xenoprof_vcpu vcpu [MAX_VIRT_CPUS];
+ struct xenoprof_vcpu *vcpu;
};
#ifndef CONFIG_COMPAT