aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-05-12 13:58:47 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-05-12 13:58:47 +0000
commit3b2bf5dfb1110ffb1a5fb7a3d02b66315f9c552d (patch)
tree74b075e518f21996d1c2583b1d885da694fc9900
parenta6ba03510e86c088ef940c521781223c8dc2fc0d (diff)
downloadxen-3b2bf5dfb1110ffb1a5fb7a3d02b66315f9c552d.tar.gz
xen-3b2bf5dfb1110ffb1a5fb7a3d02b66315f9c552d.tar.bz2
xen-3b2bf5dfb1110ffb1a5fb7a3d02b66315f9c552d.zip
bitkeeper revision 1.1389.20.1 (42836117f2yEkILParltXJcom3kTeA)
More descriptive 'flags' and 'id' field names for exec_domain/domain structures seems a good idea after all. At the same time, I've renamed the flag macros to be a bit neater and more descriptive, and more in keeping with the style of such definitions in asm/mm.h, for example. Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/ia64/domain.c18
-rw-r--r--xen/arch/ia64/idle0_task.c4
-rw-r--r--xen/arch/ia64/patch/linux-2.6.7/init_task.h4
-rw-r--r--xen/arch/ia64/process.c2
-rw-r--r--xen/arch/ia64/xenmisc.c18
-rw-r--r--xen/arch/ia64/xensetup.c2
-rw-r--r--xen/arch/x86/audit.c58
-rw-r--r--xen/arch/x86/dom0_ops.c2
-rw-r--r--xen/arch/x86/domain.c20
-rw-r--r--xen/arch/x86/domain_build.c6
-rw-r--r--xen/arch/x86/i387.c6
-rw-r--r--xen/arch/x86/idle0_task.c4
-rw-r--r--xen/arch/x86/mm.c28
-rw-r--r--xen/arch/x86/physdev.c2
-rw-r--r--xen/arch/x86/setup.c2
-rw-r--r--xen/arch/x86/shadow.c21
-rw-r--r--xen/arch/x86/smpboot.c2
-rw-r--r--xen/arch/x86/traps.c16
-rw-r--r--xen/arch/x86/vmx.c8
-rw-r--r--xen/arch/x86/vmx_vmcs.c2
-rw-r--r--xen/common/dom0_ops.c30
-rw-r--r--xen/common/dom_mem_ops.c6
-rw-r--r--xen/common/domain.c38
-rw-r--r--xen/common/event_channel.c14
-rw-r--r--xen/common/grant_table.c30
-rw-r--r--xen/common/keyhandler.c8
-rw-r--r--xen/common/page_alloc.c8
-rw-r--r--xen/common/sched_bvt.c21
-rw-r--r--xen/common/sched_sedf.c84
-rw-r--r--xen/common/schedule.c44
-rw-r--r--xen/drivers/char/console.c2
-rw-r--r--xen/include/asm-x86/config.h4
-rw-r--r--xen/include/asm-x86/debugger.h4
-rw-r--r--xen/include/asm-x86/i387.h6
-rw-r--r--xen/include/asm-x86/shadow.h15
-rw-r--r--xen/include/xen/event.h6
-rw-r--r--xen/include/xen/sched.h101
37 files changed, 345 insertions, 301 deletions
diff --git a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
index eb342e7021..23d95987c9 100644
--- a/xen/arch/ia64/domain.c
+++ b/xen/arch/ia64/domain.c
@@ -626,7 +626,7 @@ int construct_dom0(struct domain *d,
#ifndef CLONE_DOMAIN0
if ( d != dom0 )
BUG();
- if ( test_bit(DF_CONSTRUCTED, &d->flags) )
+ if ( test_bit(_DOMF_constructed, &d->domain_flags) )
BUG();
#endif
@@ -753,7 +753,7 @@ int construct_dom0(struct domain *d,
#endif
console_endboot(strstr(cmdline, "tty0") != NULL);
- set_bit(DF_CONSTRUCTED, &d->flags);
+ set_bit(_DOMF_constructed, &d->domain_flags);
new_thread(ed, pkern_entry, 0, 0);
// FIXME: Hack for keyboard input
@@ -783,10 +783,10 @@ int construct_domU(struct domain *d,
unsigned long pkern_entry;
#ifndef DOMU_AUTO_RESTART
- if ( test_bit(DF_CONSTRUCTED, &d->flags) ) BUG();
+ if ( test_bit(_DOMF_constructed, &d->domain_flags) ) BUG();
#endif
- printk("*** LOADING DOMAIN %d ***\n",d->id);
+ printk("*** LOADING DOMAIN %d ***\n",d->domain_id);
d->max_pages = dom0_size/PAGE_SIZE; // FIXME: use dom0 size
// FIXME: use domain0 command line
@@ -796,13 +796,13 @@ int construct_domU(struct domain *d,
d->arch.mm = xmalloc(struct mm_struct);
if (unlikely(!d->arch.mm)) {
- printk("Can't allocate mm_struct for domain %d\n",d->id);
+ printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
return -ENOMEM;
}
memset(d->arch.mm, 0, sizeof(*d->arch.mm));
d->arch.mm->pgd = pgd_alloc(d->arch.mm);
if (unlikely(!d->arch.mm->pgd)) {
- printk("Can't allocate pgd for domain %d\n",d->id);
+ printk("Can't allocate pgd for domain %d\n",d->domain_id);
return -ENOMEM;
}
@@ -816,7 +816,7 @@ int construct_domU(struct domain *d,
loaddomainelfimage(d,image_start);
printk("loaddomainelfimage returns\n");
- set_bit(DF_CONSTRUCTED, &d->flags);
+ set_bit(_DOMF_constructed, &d->domain_flags);
printk("calling new_thread, entry=%p\n",pkern_entry);
#ifdef DOMU_AUTO_RESTART
@@ -836,7 +836,7 @@ void reconstruct_domU(struct exec_domain *ed)
{
/* re-copy the OS image to reset data values to original */
printk("reconstruct_domU: restarting domain %d...\n",
- ed->domain->id);
+ ed->domain->domain_id);
loaddomainelfimage(ed->domain,ed->domain->arch.image_start);
new_thread(ed, ed->domain->arch.entry, 0, 0);
}
@@ -859,7 +859,7 @@ int launch_domainU(unsigned long size)
else next++;
if (construct_domU(d, (unsigned long)domU_staging_area, size,0,0,0)) {
printf("launch_domainU: couldn't construct(id=%d,%lx,%lx)\n",
- d->id,domU_staging_area,size);
+ d->domain_id,domU_staging_area,size);
return 2;
}
domain_unpause_by_systemcontroller(d);
diff --git a/xen/arch/ia64/idle0_task.c b/xen/arch/ia64/idle0_task.c
index 4bc636ae6e..8aa41a131c 100644
--- a/xen/arch/ia64/idle0_task.c
+++ b/xen/arch/ia64/idle0_task.c
@@ -21,8 +21,8 @@
#define IDLE0_DOMAIN(_t) \
{ \
- id: IDLE_DOMAIN_ID, \
- flags: 1<<DF_IDLETASK, \
+ domain_id: IDLE_DOMAIN_ID, \
+ domain_flags:DOMF_idle_domain, \
refcnt: ATOMIC_INIT(1) \
}
diff --git a/xen/arch/ia64/patch/linux-2.6.7/init_task.h b/xen/arch/ia64/patch/linux-2.6.7/init_task.h
index 5ae96cb907..e1092f416a 100644
--- a/xen/arch/ia64/patch/linux-2.6.7/init_task.h
+++ b/xen/arch/ia64/patch/linux-2.6.7/init_task.h
@@ -35,8 +35,8 @@
+#define INIT_TASK(tsk) \
+{ \
+ /*processor: 0,*/ \
-+ /*id: IDLE_DOMAIN_ID,*/ \
-+ /*flags: 1<<DF_IDLETASK,*/ \
++ /*domain_id: IDLE_DOMAIN_ID,*/ \
++ /*domain_flags: DOMF_idle_domain,*/ \
+ refcnt: ATOMIC_INIT(1) \
+}
+#else
diff --git a/xen/arch/ia64/process.c b/xen/arch/ia64/process.c
index dd9e58071f..328467f655 100644
--- a/xen/arch/ia64/process.c
+++ b/xen/arch/ia64/process.c
@@ -214,7 +214,7 @@ void deliver_pending_interrupt(struct pt_regs *regs)
if (vcpu_deliverable_interrupts(ed)) {
unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
if (vcpu_timer_pending_early(ed))
-printf("*#*#*#* about to deliver early timer to domain %d!!!\n",ed->domain->id);
+printf("*#*#*#* about to deliver early timer to domain %d!!!\n",ed->domain->domain_id);
reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
}
}
diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c
index 2f5562c46a..5e03a07493 100644
--- a/xen/arch/ia64/xenmisc.c
+++ b/xen/arch/ia64/xenmisc.c
@@ -234,21 +234,21 @@ void context_switch(struct exec_domain *prev, struct exec_domain *next)
{
//printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
//printk("@@@@@@ context switch from domain %d (%x) to domain %d (%x)\n",
-//prev->domain->id,(long)prev&0xffffff,next->domain->id,(long)next&0xffffff);
-//if (prev->domain->id == 1 && next->domain->id == 0) cs10foo();
-//if (prev->domain->id == 0 && next->domain->id == 1) cs01foo();
-//printk("@@sw %d->%d\n",prev->domain->id,next->domain->id);
+//prev->domain->domain_id,(long)prev&0xffffff,next->domain->domain_id,(long)next&0xffffff);
+//if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo();
+//if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo();
+//printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id);
switch_to(prev,next,prev);
// leave this debug for now: it acts as a heartbeat when more than
// one domain is active
{
static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
static int i = 100;
-int id = ((struct exec_domain *)current)->domain->id & 0xf;
+int id = ((struct exec_domain *)current)->domain->domain_id & 0xf;
if (!cnt[id]--) { printk("%x",id); cnt[id] = 50; }
if (!i--) { printk("+",id); cnt[id] = 100; }
}
- clear_bit(EDF_RUNNING, &prev->flags);
+ clear_bit(_VCPUF_running, &prev->vcpu_flags);
//if (!is_idle_task(next->domain) )
//send_guest_virq(next, VIRQ_TIMER);
load_region_regs(current);
@@ -271,15 +271,15 @@ void panic_domain(struct pt_regs *regs, const char *fmt, ...)
loop:
printf("$$$$$ PANIC in domain %d (k6=%p): ",
- ed->domain->id, ia64_get_kr(IA64_KR_CURRENT));
+ ed->domain->domain_id, ia64_get_kr(IA64_KR_CURRENT));
va_start(args, fmt);
(void)vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printf(buf);
if (regs) show_registers(regs);
domain_pause_by_systemcontroller(current->domain);
- set_bit(DF_CRASHED, ed->domain->flags);
- if (ed->domain->id == 0) {
+ set_bit(_DOMF_crashed, ed->domain->domain_flags);
+ if (ed->domain->domain_id == 0) {
int i = 1000000000L;
// if domain0 crashes, just periodically print out panic
// message to make post-mortem easier
diff --git a/xen/arch/ia64/xensetup.c b/xen/arch/ia64/xensetup.c
index 9de0a0eed5..c2ba2efac4 100644
--- a/xen/arch/ia64/xensetup.c
+++ b/xen/arch/ia64/xensetup.c
@@ -262,7 +262,7 @@ printk("About to call init_idle_task()\n");
if ( dom0 == NULL )
panic("Error creating domain 0\n");
- set_bit(DF_PRIVILEGED, &dom0->flags);
+ set_bit(_DOMF_privileged, &dom0->domain_flags);
/*
* We're going to setup domain0 using the module(s) that we stashed safely
diff --git a/xen/arch/x86/audit.c b/xen/arch/x86/audit.c
index b7e874c62d..c15c3de31e 100644
--- a/xen/arch/x86/audit.c
+++ b/xen/arch/x86/audit.c
@@ -74,7 +74,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
APRINTK("Audit %d: type count went below zero "
"mfn=%lx t=%x ot=%x",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
errors++;
@@ -83,7 +83,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
APRINTK("Audit %d: type count overflowed "
"mfn=%lx t=%x ot=%x",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
errors++;
@@ -102,7 +102,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
APRINTK("Audit %d: general count went below zero "
"mfn=%lx t=%x ot=%x",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
errors++;
@@ -111,7 +111,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
APRINTK("Audit %d: general count overflowed "
"mfn=%lx t=%x ot=%x",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
errors++;
@@ -142,7 +142,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
"belonging to a domain %p (id=%d)\n",
l1mfn,
page_get_owner(l1page),
- page_get_owner(l1page)->id);
+ page_get_owner(l1page)->domain_id);
errors++;
continue;
}
@@ -153,7 +153,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
printk("Audit %d: [Shadow L2 mfn=%lx i=%x] "
"Expected Shadow L1 t=%x mfn=%lx\n",
- d->id, mfn, i,
+ d->domain_id, mfn, i,
l1page->u.inuse.type_info, l1mfn);
errors++;
}
@@ -167,7 +167,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
l1mfn,
page_get_owner(l1page),
(page_get_owner(l1page)
- ? page_get_owner(l1page)->id
+ ? page_get_owner(l1page)->domain_id
: -1));
errors++;
continue;
@@ -179,14 +179,14 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
printk("Audit %d: [%x] Found %s Linear PT "
"t=%x mfn=%lx\n",
- d->id, i, (l1mfn==mfn) ? "Self" : "Other",
+ d->domain_id, i, (l1mfn==mfn) ? "Self" : "Other",
l1page->u.inuse.type_info, l1mfn);
}
else if ( page_type != PGT_l1_page_table )
{
printk("Audit %d: [L2 mfn=%lx i=%x] "
"Expected L1 t=%x mfn=%lx\n",
- d->id, mfn, i,
+ d->domain_id, mfn, i,
l1page->u.inuse.type_info, l1mfn);
errors++;
}
@@ -238,9 +238,9 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
printk("Audit %d: [hl2mfn=%lx,i=%x] Skip foreign page "
"dom=%p (id=%d) mfn=%lx c=%08x t=%08x\n",
- d->id, hl2mfn, i,
+ d->domain_id, hl2mfn, i,
page_get_owner(gpage),
- page_get_owner(gpage)->id,
+ page_get_owner(gpage)->domain_id,
gmfn,
gpage->count_info,
gpage->u.inuse.type_info);
@@ -289,7 +289,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
printk("Audit %d: [l1mfn=%lx, i=%x] Illegal RW "
"t=%x mfn=%lx\n",
- d->id, l1mfn, i,
+ d->domain_id, l1mfn, i,
gpage->u.inuse.type_info, gmfn);
errors++;
}
@@ -300,7 +300,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
printk("Audit %d: [l1mfn=%lx, i=%x] Illegal RW of "
"page table gmfn=%lx\n",
- d->id, l1mfn, i, gmfn);
+ d->domain_id, l1mfn, i, gmfn);
errors++;
}
}
@@ -309,9 +309,9 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
printk("Audit %d: [l1mfn=%lx,i=%x] Skip foreign page "
"dom=%p (id=%d) mfn=%lx c=%08x t=%08x\n",
- d->id, l1mfn, i,
+ d->domain_id, l1mfn, i,
page_get_owner(gpage),
- page_get_owner(gpage)->id,
+ page_get_owner(gpage)->domain_id,
gmfn,
gpage->count_info,
gpage->u.inuse.type_info);
@@ -455,7 +455,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
{
printk("Audit %d: found an L2 guest page "
"mfn=%lx t=%08x c=%08x while in shadow mode\n",
- d->id, mfn, page->u.inuse.type_info,
+ d->domain_id, mfn, page->u.inuse.type_info,
page->count_info);
errors++;
}
@@ -466,14 +466,14 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
PGT_validated )
{
printk("Audit %d: L2 mfn=%lx not validated %08x\n",
- d->id, mfn, page->u.inuse.type_info);
+ d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
if ( (page->u.inuse.type_info & PGT_pinned) != PGT_pinned )
{
printk("Audit %d: L2 mfn=%lx not pinned t=%08x\n",
- d->id, mfn, page->u.inuse.type_info);
+ d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
}
@@ -506,7 +506,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
PGT_validated )
{
printk("Audit %d: L1 not validated mfn=%lx t=%08x\n",
- d->id, mfn, page->u.inuse.type_info);
+ d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
@@ -515,7 +515,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
if ( !VM_ASSIST(d, VMASST_TYPE_writable_pagetables) )
{
printk("Audit %d: L1 mfn=%lx not pinned t=%08x\n",
- d->id, mfn, page->u.inuse.type_info);
+ d->domain_id, mfn, page->u.inuse.type_info);
}
}
}
@@ -601,7 +601,7 @@ void audit_pagelist(struct domain *d)
if ( xenpages != d->xenheap_pages ||
totpages != d->tot_pages )
{
- printk("ARGH! dom %d: xen=%d %d, pages=%d %d\n", d->id,
+ printk("ARGH! dom %d: xen=%d %d, pages=%d %d\n", d->domain_id,
xenpages, d->xenheap_pages,
totpages, d->tot_pages );
}
@@ -623,7 +623,7 @@ void _audit_domain(struct domain *d, int flags)
if ( (pt[i] & _PAGE_PRESENT) && ((pt[i] >> PAGE_SHIFT) == xmfn) )
printk(" found dom=%d mfn=%lx t=%08x c=%08x "
"pt[i=%x]=%lx\n",
- d->id, mfn, page->u.inuse.type_info,
+ d->domain_id, mfn, page->u.inuse.type_info,
page->count_info, i, pt[i]);
}
@@ -721,7 +721,7 @@ void _audit_domain(struct domain *d, int flags)
{
printk("skipping audit domain of translated domain %d "
"from other context\n",
- d->id);
+ d->domain_id);
return;
}
@@ -832,7 +832,7 @@ void _audit_domain(struct domain *d, int flags)
if ( !(flags & AUDIT_QUIET) &&
((io_mappings > 0) || (lowmem_mappings > 0)) )
printk("Audit %d: Found %d lowmem mappings and %d io mappings\n",
- d->id, lowmem_mappings, io_mappings);
+ d->domain_id, lowmem_mappings, io_mappings);
/* PHASE 2 */
@@ -849,7 +849,7 @@ void _audit_domain(struct domain *d, int flags)
if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
{
printk("Audit %d: type count!=0 t=%x ot=%x c=%x mfn=%lx\n",
- d->id, page->u.inuse.type_info,
+ d->domain_id, page->u.inuse.type_info,
page->tlbflush_timestamp,
page->count_info, mfn);
errors++;
@@ -863,7 +863,7 @@ void _audit_domain(struct domain *d, int flags)
if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
{
printk("Audit %d: type count!=0 t=%x ot=%x c=%x mfn=%lx\n",
- d->id, page->u.inuse.type_info,
+ d->domain_id, page->u.inuse.type_info,
page->tlbflush_timestamp,
page->count_info, mfn);
//errors++;
@@ -876,7 +876,7 @@ void _audit_domain(struct domain *d, int flags)
if ( (page->count_info & PGC_count_mask) != 1 )
{
printk("Audit %d: gen count!=1 (c=%x) t=%x ot=%x mfn=%lx\n",
- d->id,
+ d->domain_id,
page->count_info,
page->u.inuse.type_info,
page->tlbflush_timestamp, mfn );
@@ -912,7 +912,7 @@ void _audit_domain(struct domain *d, int flags)
{
printk("Audit %d: shadow page counts wrong "
"mfn=%lx t=%08x c=%08x\n",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->count_info);
printk("a->gpfn_and_flags=%p\n",
@@ -950,7 +950,7 @@ void _audit_domain(struct domain *d, int flags)
if ( !(flags & AUDIT_QUIET) )
printk("Audit dom%d Done. "
"pages=%d oos=%d l1=%d l2=%d ctot=%d ttot=%d\n",
- d->id, page_count, oos_count, l1, l2, ctot, ttot);
+ d->domain_id, page_count, oos_count, l1, l2, ctot, ttot);
if ( !(flags & AUDIT_SHADOW_ALREADY_LOCKED) )
shadow_unlock(d);
diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
index a3251368ce..171cb3b8a6 100644
--- a/xen/arch/x86/dom0_ops.c
+++ b/xen/arch/x86/dom0_ops.c
@@ -397,7 +397,7 @@ void arch_getdomaininfo_ctxt(
#endif
c->flags = 0;
- if ( test_bit(EDF_DONEFPUINIT, &ed->flags) )
+ if ( test_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags) )
c->flags |= VGCF_I387_VALID;
if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) )
c->flags |= VGCF_IN_KERNEL;
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 298d65d4aa..b2595ceac4 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -100,7 +100,7 @@ void startup_cpu_idle_loop(void)
struct exec_domain *ed = current;
/* Just some sanity to ensure that the scheduler is set up okay. */
- ASSERT(ed->domain->id == IDLE_DOMAIN_ID);
+ ASSERT(ed->domain->domain_id == IDLE_DOMAIN_ID);
percpu_ctxt[smp_processor_id()].curr_ed = ed;
set_bit(smp_processor_id(), &ed->domain->cpuset);
domain_unpause_by_systemcontroller(ed->domain);
@@ -246,13 +246,13 @@ void arch_do_createdomain(struct exec_domain *ed)
ed->arch.flags = TF_kernel_mode;
- if ( d->id != IDLE_DOMAIN_ID )
+ if ( d->domain_id != IDLE_DOMAIN_ID )
{
ed->arch.schedule_tail = continue_nonidle_task;
d->shared_info = (void *)alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
+ ed->vcpu_info = &d->shared_info->vcpu_data[ed->vcpu_id];
ed->cpumap = CPUMAP_RUNANYWHERE;
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
@@ -295,7 +295,7 @@ void arch_do_boot_vcpu(struct exec_domain *ed)
struct domain *d = ed->domain;
ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
ed->arch.perdomain_ptes =
- d->arch.mm_perdomain_pt + (ed->id << PDPT_VCPU_SHIFT);
+ d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
ed->arch.flags = TF_kernel_mode;
}
@@ -399,9 +399,9 @@ int arch_set_info_guest(
return -EINVAL;
}
- clear_bit(EDF_DONEFPUINIT, &ed->flags);
+ clear_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags);
if ( c->flags & VGCF_I387_VALID )
- set_bit(EDF_DONEFPUINIT, &ed->flags);
+ set_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags);
ed->arch.flags &= ~TF_kernel_mode;
if ( c->flags & VGCF_IN_KERNEL )
@@ -419,7 +419,7 @@ int arch_set_info_guest(
ed->arch.guest_context.user_regs.eflags |= EF_IE;
}
- if ( test_bit(EDF_DONEINIT, &ed->flags) )
+ if ( test_bit(_VCPUF_initialised, &ed->vcpu_flags) )
return 0;
if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 )
@@ -430,7 +430,7 @@ int arch_set_info_guest(
for ( i = 0; i < 8; i++ )
(void)set_debugreg(ed, i, c->debugreg[i]);
- if ( ed->id == 0 )
+ if ( ed->vcpu_id == 0 )
d->vm_assist = c->vm_assist;
phys_basetab = c->pt_base;
@@ -482,7 +482,7 @@ int arch_set_info_guest(
update_pagetables(ed);
/* Don't redo final setup */
- set_bit(EDF_DONEINIT, &ed->flags);
+ set_bit(_VCPUF_initialised, &ed->vcpu_flags);
return 0;
}
@@ -800,7 +800,7 @@ void context_switch(struct exec_domain *prev, struct exec_domain *next)
* 'prev' (after this point, a dying domain's info structure may be freed
* without warning).
*/
- clear_bit(EDF_RUNNING, &prev->flags);
+ clear_bit(_VCPUF_running, &prev->vcpu_flags);
schedule_tail(next);
BUG();
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index 6d0b12f03b..560cea0a99 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -122,9 +122,9 @@ int construct_dom0(struct domain *d,
extern void translate_l2pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn);
/* Sanity! */
- if ( d->id != 0 )
+ if ( d->domain_id != 0 )
BUG();
- if ( test_bit(DF_CONSTRUCTED, &d->flags) )
+ if ( test_bit(_DOMF_constructed, &d->domain_flags) )
BUG();
memset(&dsi, 0, sizeof(struct domain_setup_info));
@@ -550,7 +550,7 @@ int construct_dom0(struct domain *d,
/* DOM0 gets access to everything. */
physdev_init_dom0(d);
- set_bit(DF_CONSTRUCTED, &d->flags);
+ set_bit(_DOMF_constructed, &d->domain_flags);
new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index af54d0af8a..b859e74110 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -18,7 +18,7 @@ void init_fpu(void)
__asm__ __volatile__ ( "fninit" );
if ( cpu_has_xmm )
load_mxcsr(0x1f80);
- set_bit(EDF_DONEFPUINIT, &current->flags);
+ set_bit(_VCPUF_fpu_initialised, &current->vcpu_flags);
}
void save_init_fpu(struct exec_domain *tsk)
@@ -28,7 +28,7 @@ void save_init_fpu(struct exec_domain *tsk)
* This causes us to set the real flag, so we'll need
* to temporarily clear it while saving f-p state.
*/
- if ( test_bit(EDF_GUEST_STTS, &tsk->flags) )
+ if ( test_bit(_VCPUF_guest_stts, &tsk->vcpu_flags) )
clts();
if ( cpu_has_fxsr )
@@ -40,7 +40,7 @@ void save_init_fpu(struct exec_domain *tsk)
"fnsave %0 ; fwait"
: "=m" (tsk->arch.guest_context.fpu_ctxt) );
- clear_bit(EDF_USEDFPU, &tsk->flags);
+ clear_bit(_VCPUF_fpu_dirtied, &tsk->vcpu_flags);
stts();
}
diff --git a/xen/arch/x86/idle0_task.c b/xen/arch/x86/idle0_task.c
index 43c0c31c25..7e811b28e8 100644
--- a/xen/arch/x86/idle0_task.c
+++ b/xen/arch/x86/idle0_task.c
@@ -4,8 +4,8 @@
#include <asm/desc.h>
struct domain idle0_domain = {
- id: IDLE_DOMAIN_ID,
- flags: 1<<DF_IDLETASK,
+ domain_id: IDLE_DOMAIN_ID,
+ domain_flags:DOMF_idle_domain,
refcnt: ATOMIC_INIT(1)
};
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d355b7e9af..ef1aaf379c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -106,7 +106,7 @@
#ifdef VERBOSE
#define MEM_LOG(_f, _a...) \
printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
- current->domain->id , __LINE__ , ## _a )
+ current->domain->domain_id , __LINE__ , ## _a )
#else
#define MEM_LOG(_f, _a...) ((void)0)
#endif
@@ -183,7 +183,7 @@ void arch_init_memory(void)
*/
dom_xen = alloc_domain_struct();
atomic_set(&dom_xen->refcnt, 1);
- dom_xen->id = DOMID_XEN;
+ dom_xen->domain_id = DOMID_XEN;
/*
* Initialise our DOMID_IO domain.
@@ -192,7 +192,7 @@ void arch_init_memory(void)
*/
dom_io = alloc_domain_struct();
atomic_set(&dom_io->refcnt, 1);
- dom_io->id = DOMID_IO;
+ dom_io->domain_id = DOMID_IO;
/* First 1MB of RAM is historically marked as I/O. */
for ( i = 0; i < 0x100; i++ )
@@ -1162,7 +1162,7 @@ void put_page_type(struct pfn_info *page)
* See domain.c:relinquish_list().
*/
ASSERT((x & PGT_validated) ||
- test_bit(DF_DYING, &page_get_owner(page)->flags));
+ test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
if ( unlikely((nx & PGT_count_mask) == 0) )
{
@@ -1392,7 +1392,7 @@ static int set_foreigndom(unsigned int cpu, domid_t domid)
percpu_info[cpu].foreign = dom_io;
break;
default:
- MEM_LOG("Dom %u cannot set foreign dom\n", d->id);
+ MEM_LOG("Dom %u cannot set foreign dom\n", d->domain_id);
okay = 0;
break;
}
@@ -1645,7 +1645,7 @@ int do_mmuext_op(
if ( shadow_mode_external(d) )
{
MEM_LOG("ignoring SET_LDT hypercall from external "
- "domain %u\n", d->id);
+ "domain %u\n", d->domain_id);
okay = 0;
break;
}
@@ -1676,7 +1676,7 @@ int do_mmuext_op(
case MMUEXT_REASSIGN_PAGE:
if ( unlikely(!IS_PRIV(d)) )
{
- MEM_LOG("Dom %u has no reassignment priv", d->id);
+ MEM_LOG("Dom %u has no reassignment priv", d->domain_id);
okay = 0;
break;
}
@@ -1711,13 +1711,13 @@ int do_mmuext_op(
* it is dying.
*/
ASSERT(e->tot_pages <= e->max_pages);
- if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
+ if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
unlikely(e->tot_pages == e->max_pages) ||
unlikely(IS_XEN_HEAP_FRAME(page)) )
{
MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
"page is in Xen heap (%lx), or dom is dying (%ld).\n",
- e->tot_pages, e->max_pages, op.mfn, e->flags);
+ e->tot_pages, e->max_pages, op.mfn, e->domain_flags);
okay = 0;
goto reassign_fail;
}
@@ -1738,7 +1738,7 @@ int do_mmuext_op(
{
MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p,"
" caf=%08x, taf=%08x\n", page_to_pfn(page),
- d, d->id, unpickle_domptr(_nd), x,
+ d, d->domain_id, unpickle_domptr(_nd), x,
page->u.inuse.type_info);
okay = 0;
goto reassign_fail;
@@ -1990,7 +1990,7 @@ int do_mmu_update(
{
shadow_lock(FOREIGNDOM);
printk("privileged guest dom%d requests pfn=%lx to map mfn=%lx for dom%d\n",
- d->id, gpfn, mfn, FOREIGNDOM->id);
+ d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id);
set_machinetophys(mfn, gpfn);
set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache);
okay = 1;
@@ -2452,7 +2452,7 @@ int revalidate_l1(struct domain *d, l1_pgentry_t *l1page, l1_pgentry_t *snapshot
int modified = 0, i;
#if 0
- if ( d->id )
+ if ( d->domain_id )
printk("%s: l1page mfn=%lx snapshot mfn=%lx\n", __func__,
l1e_get_pfn(linear_pg_table[l1_linear_offset((unsigned long)l1page)]),
l1e_get_pfn(linear_pg_table[l1_linear_offset((unsigned long)snapshot)]));
@@ -2909,7 +2909,7 @@ void ptwr_destroy(struct domain *d)
{
MEM_LOG("Bad page values %p: ed=%p(%u), sd=%p,"
" caf=%08x, taf=%08x\n", page_to_pfn(page),
- d, d->id, unpickle_domptr(_nd), x,
+ d, d->domain_id, unpickle_domptr(_nd), x,
page->u.inuse.type_info);
spin_unlock(&d->page_alloc_lock);
put_domain(e);
@@ -2939,7 +2939,7 @@ void ptwr_destroy(struct domain *d)
* Also, a domain mustn't have PGC_allocated pages when it is dying.
*/
ASSERT(e->tot_pages <= e->max_pages);
- if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
+ if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
unlikely(e->tot_pages == e->max_pages) ||
unlikely(!gnttab_prepare_for_transfer(e, d, gntref)) )
{
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 5de96ec96d..94cf15e2eb 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -128,7 +128,7 @@ void physdev_init_dom0(struct domain *d)
BUG_ON(d->arch.iobmp_mask == NULL);
memset(d->arch.iobmp_mask, 0, IOBMP_BYTES);
- set_bit(DF_PHYSDEV, &d->flags);
+ set_bit(_DOMF_physdev_access, &d->domain_flags);
}
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index b28e0c58e1..a43b8e0762 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -587,7 +587,7 @@ void __init __start_xen(multiboot_info_t *mbi)
if ( dom0 == NULL )
panic("Error creating domain 0\n");
- set_bit(DF_PRIVILEGED, &dom0->flags);
+ set_bit(_DOMF_privileged, &dom0->domain_flags);
/* Grab the DOM0 command line. */
cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
index 4dbfa6b02d..b94e763f51 100644
--- a/xen/arch/x86/shadow.c
+++ b/xen/arch/x86/shadow.c
@@ -117,7 +117,7 @@ shadow_promote(struct domain *d, unsigned long gpfn, unsigned long gmfn,
{
printk("shadow_promote: get_page_type failed "
"dom%d gpfn=%lx gmfn=%lx t=%08lx\n",
- d->id, gpfn, gmfn, new_type);
+ d->domain_id, gpfn, gmfn, new_type);
okay = 0;
}
@@ -233,7 +233,7 @@ alloc_shadow_page(struct domain *d,
if ( unlikely(page == NULL) )
{
printk("Couldn't alloc shadow page! dom%d count=%d\n",
- d->id, d->arch.shadow_page_count);
+ d->domain_id, d->arch.shadow_page_count);
printk("Shadow table counts: l1=%d l2=%d hl2=%d snapshot=%d\n",
perfc_value(shadow_l1_pages),
perfc_value(shadow_l2_pages),
@@ -1179,7 +1179,8 @@ void __shadow_mode_disable(struct domain *d)
* Currently this does not fix up page ref counts, so it is valid to call
* only when a domain is being destroyed.
*/
- BUG_ON(!test_bit(DF_DYING, &d->flags) && shadow_mode_refcounts(d));
+ BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags) &&
+ shadow_mode_refcounts(d));
d->arch.shadow_tainted_refcnts = shadow_mode_refcounts(d);
free_shadow_pages(d);
@@ -1409,7 +1410,7 @@ gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
{
printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l2e=%lx\n",
- d->id, gpfn, l2e_get_value(l2e));
+ d->domain_id, gpfn, l2e_get_value(l2e));
return INVALID_MFN;
}
unsigned long l1tab = l2e_get_phys(l2e);
@@ -1419,13 +1420,13 @@ gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
#if 0
printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => %lx phystab=%lx l2e=%lx l1tab=%lx, l1e=%lx\n",
- d->id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e);
+ d->domain_id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e);
#endif
if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
{
printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l1e=%lx\n",
- d->id, gpfn, l1e_get_value(l1e));
+ d->domain_id, gpfn, l1e_get_value(l1e));
return INVALID_MFN;
}
@@ -1754,7 +1755,7 @@ shadow_make_snapshot(
{
printk("Couldn't alloc fullshadow snapshot for pfn=%lx mfn=%lx!\n"
"Dom%d snapshot_count_count=%d\n",
- gpfn, gmfn, d->id, d->arch.snapshot_page_count);
+ gpfn, gmfn, d->domain_id, d->arch.snapshot_page_count);
BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */
}
@@ -2645,7 +2646,7 @@ int shadow_fault(unsigned long va, struct cpu_user_regs *regs)
{
printk("%s() failed, crashing domain %d "
"due to a read-only L2 page table (gpde=%lx), va=%lx\n",
- __func__, d->id, l2e_get_value(gpde), va);
+ __func__, d->domain_id, l2e_get_value(gpde), va);
domain_crash_synchronous();
}
@@ -3146,7 +3147,7 @@ int check_l2_table(
FAILPT("bogus owner for snapshot page");
if ( page_get_owner(pfn_to_page(smfn)) != NULL )
FAILPT("shadow page mfn=0x%lx is owned by someone, domid=%d",
- smfn, page_get_owner(pfn_to_page(smfn))->id);
+ smfn, page_get_owner(pfn_to_page(smfn))->domain_id);
#if 0
if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
@@ -3307,7 +3308,7 @@ int _check_all_pagetables(struct exec_domain *ed, char *s)
shadow_status_noswap = 1;
sh_check_name = s;
- SH_VVLOG("%s-PT Audit domid=%d", s, d->id);
+ SH_VVLOG("%s-PT Audit domid=%d", s, d->domain_id);
sh_l2_present = sh_l1_present = 0;
perfc_incrc(check_all_pagetables);
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 0ab5868273..f3d34e0737 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -661,7 +661,7 @@ static void __init do_boot_cpu (int apicid)
ed = idle->exec_domain[0];
- set_bit(DF_IDLETASK, &idle->flags);
+ set_bit(_DOMF_idle_domain, &idle->domain_flags);
ed->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index c340169f46..2838e28a35 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -152,7 +152,7 @@ static inline int do_trap(int trapnr, char *str,
#ifndef NDEBUG
if ( (ed->arch.guest_context.trap_ctxt[trapnr].address == 0) &&
- (ed->domain->id == 0) )
+ (ed->domain->domain_id == 0) )
goto xen_fault;
#endif
@@ -326,7 +326,7 @@ asmlinkage int do_page_fault(struct cpu_user_regs *regs)
#ifndef NDEBUG
if ( (ed->arch.guest_context.trap_ctxt[TRAP_page_fault].address == 0) &&
- (d->id == 0) )
+ (d->domain_id == 0) )
goto xen_fault;
#endif
@@ -361,13 +361,13 @@ long do_fpu_taskswitch(int set)
if ( set )
{
- set_bit(EDF_GUEST_STTS, &ed->flags);
+ set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
stts();
}
else
{
- clear_bit(EDF_GUEST_STTS, &ed->flags);
- if ( test_bit(EDF_USEDFPU, &ed->flags) )
+ clear_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
+ if ( test_bit(_VCPUF_fpu_dirtied, &ed->vcpu_flags) )
clts();
}
@@ -674,7 +674,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
case 0: /* Read CR0 */
*reg =
(read_cr0() & ~X86_CR0_TS) |
- (test_bit(EDF_GUEST_STTS, &ed->flags) ? X86_CR0_TS : 0);
+ (test_bit(_VCPUF_guest_stts, &ed->vcpu_flags) ? X86_CR0_TS:0);
break;
case 2: /* Read CR2 */
@@ -808,7 +808,7 @@ asmlinkage int do_general_protection(struct cpu_user_regs *regs)
#ifndef NDEBUG
if ( (ed->arch.guest_context.trap_ctxt[TRAP_gp_fault].address == 0) &&
- (ed->domain->id == 0) )
+ (ed->domain->domain_id == 0) )
goto gp_in_kernel;
#endif
@@ -922,7 +922,7 @@ asmlinkage int math_state_restore(struct cpu_user_regs *regs)
setup_fpu(current);
- if ( test_and_clear_bit(EDF_GUEST_STTS, &current->flags) )
+ if ( test_and_clear_bit(_VCPUF_guest_stts, &current->vcpu_flags) )
{
struct trap_bounce *tb = &current->arch.trap_bounce;
tb->flags = TBF_EXCEPTION;
diff --git a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
index 75714157a8..52c976f51f 100644
--- a/xen/arch/x86/vmx.c
+++ b/xen/arch/x86/vmx.c
@@ -984,7 +984,7 @@ static void vmx_print_line(const char c, struct exec_domain *d)
print_buf[index++] = c;
}
print_buf[index] = '\0';
- printk("(GUEST: %u) %s\n", d->domain->id, (char *) &print_buf);
+ printk("(GUEST: %u) %s\n", d->domain->domain_id, (char *) &print_buf);
index = 0;
}
else
@@ -1072,7 +1072,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
}
__vmread(GUEST_EIP, &eip);
- TRACE_3D(TRC_VMX_VMEXIT, ed->domain->id, eip, exit_reason);
+ TRACE_3D(TRC_VMX_VMEXIT, ed->domain->domain_id, eip, exit_reason);
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
@@ -1093,7 +1093,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
perfc_incra(cause_vector, vector);
- TRACE_3D(TRC_VMX_VECTOR, ed->domain->id, eip, vector);
+ TRACE_3D(TRC_VMX_VECTOR, ed->domain->domain_id, eip, vector);
switch (vector) {
#ifdef XEN_DEBUGGER
case TRAP_debug:
@@ -1145,7 +1145,7 @@ asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs)
__vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
__vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, regs.error_code);
ed->arch.arch_vmx.cpu_cr2 = va;
- TRACE_3D(TRC_VMX_INT, ed->domain->id, TRAP_page_fault, va);
+ TRACE_3D(TRC_VMX_INT, ed->domain->domain_id, TRAP_page_fault, va);
}
break;
}
diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c
index 375d20da48..9566e60d9d 100644
--- a/xen/arch/x86/vmx_vmcs.c
+++ b/xen/arch/x86/vmx_vmcs.c
@@ -165,7 +165,7 @@ void vmx_do_launch(struct exec_domain *ed)
struct cpu_user_regs *regs = get_cpu_user_regs();
vmx_stts();
- set_bit(EDF_GUEST_STTS, &ed->flags);
+ set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
cpu = smp_processor_id();
diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
index b4fe12ecb9..add8f51d43 100644
--- a/xen/common/dom0_ops.c
+++ b/xen/common/dom0_ops.c
@@ -139,7 +139,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
{
ret = -EINVAL;
if ( (d != current->domain) &&
- test_bit(DF_CONSTRUCTED, &d->flags) )
+ test_bit(_DOMF_constructed, &d->domain_flags) )
{
domain_unpause_by_systemcontroller(d);
ret = 0;
@@ -194,7 +194,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
ret = 0;
- op->u.createdomain.domain = d->id;
+ op->u.createdomain.domain = d->domain_id;
copy_to_user(u_dom0_op, op, sizeof(*op));
}
break;
@@ -265,7 +265,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
ed->cpumap = cpumap;
if ( cpumap == CPUMAP_RUNANYWHERE )
- clear_bit(EDF_CPUPINNED, &ed->flags);
+ clear_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
else
{
/* pick a new cpu from the usable map */
@@ -273,8 +273,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
exec_domain_pause(ed);
if ( ed->processor != new_cpu )
- set_bit(EDF_MIGRATED, &ed->flags);
- set_bit(EDF_CPUPINNED, &ed->flags);
+ set_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags);
+ set_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
ed->processor = new_cpu;
exec_domain_unpause(ed);
}
@@ -309,7 +309,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
for_each_domain ( d )
{
- if ( d->id >= op->u.getdomaininfo.domain )
+ if ( d->domain_id >= op->u.getdomaininfo.domain )
break;
}
@@ -322,7 +322,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
read_unlock(&domlist_lock);
- op->u.getdomaininfo.domain = d->id;
+ op->u.getdomaininfo.domain = d->domain_id;
memset(&op->u.getdomaininfo.vcpu_to_cpu, -1,
sizeof(op->u.getdomaininfo.vcpu_to_cpu));
@@ -335,13 +335,13 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
* - domain is marked as running if any of its vcpus is running
*/
for_each_exec_domain ( d, ed ) {
- op->u.getdomaininfo.vcpu_to_cpu[ed->id] = ed->processor;
- op->u.getdomaininfo.cpumap[ed->id] = ed->cpumap;
- if (!test_bit(EDF_CTRLPAUSE, &ed->flags))
+ op->u.getdomaininfo.vcpu_to_cpu[ed->vcpu_id] = ed->processor;
+ op->u.getdomaininfo.cpumap[ed->vcpu_id] = ed->cpumap;
+ if ( !(ed->vcpu_flags & VCPUF_ctrl_pause) )
flags &= ~DOMFLAGS_PAUSED;
- if (!test_bit(EDF_BLOCKED, &ed->flags))
+ if ( !(ed->vcpu_flags & VCPUF_blocked) )
flags &= ~DOMFLAGS_BLOCKED;
- if (test_bit(EDF_RUNNING, &ed->flags))
+ if ( ed->vcpu_flags & VCPUF_running )
flags |= DOMFLAGS_RUNNING;
if ( ed->cpu_time > cpu_time )
cpu_time += ed->cpu_time;
@@ -352,9 +352,9 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
op->u.getdomaininfo.n_vcpu = vcpu_count;
op->u.getdomaininfo.flags = flags |
- (test_bit( DF_DYING, &d->flags) ? DOMFLAGS_DYING : 0) |
- (test_bit( DF_CRASHED, &d->flags) ? DOMFLAGS_CRASHED : 0) |
- (test_bit( DF_SHUTDOWN, &d->flags) ? DOMFLAGS_SHUTDOWN : 0) |
+ ((d->domain_flags & DOMF_dying) ? DOMFLAGS_DYING : 0) |
+ ((d->domain_flags & DOMF_crashed) ? DOMFLAGS_CRASHED : 0) |
+ ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
op->u.getdomaininfo.tot_pages = d->tot_pages;
diff --git a/xen/common/dom_mem_ops.c b/xen/common/dom_mem_ops.c
index 698f6ee835..40aa522eb0 100644
--- a/xen/common/dom_mem_ops.c
+++ b/xen/common/dom_mem_ops.c
@@ -29,7 +29,7 @@
__HYPERVISOR_dom_mem_op, \
(_op) | (i << START_EXTENT_SHIFT), \
extent_list, nr_extents, extent_order, \
- (d == current->domain) ? DOMID_SELF : d->id);
+ (d == current->domain) ? DOMID_SELF : d->domain_id);
static long
alloc_dom_mem(struct domain *d,
@@ -95,14 +95,14 @@ free_dom_mem(struct domain *d,
if ( unlikely((mpfn + j) >= max_page) )
{
DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
- d->id, mpfn + j, max_page);
+ d->domain_id, mpfn + j, max_page);
return i;
}
page = &frame_table[mpfn + j];
if ( unlikely(!get_page(page, d)) )
{
- DPRINTK("Bad page free for domain %u\n", d->id);
+ DPRINTK("Bad page free for domain %u\n", d->domain_id);
return i;
}
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 6f729eb2ec..3729489492 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -39,7 +39,7 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
atomic_set(&d->refcnt, 1);
atomic_set(&ed->pausecnt, 0);
- d->id = dom_id;
+ d->domain_id = dom_id;
ed->processor = cpu;
spin_lock_init(&d->time_lock);
@@ -50,7 +50,7 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
INIT_LIST_HEAD(&d->page_list);
INIT_LIST_HEAD(&d->xenpage_list);
- if ( (d->id != IDLE_DOMAIN_ID) &&
+ if ( (d->domain_id != IDLE_DOMAIN_ID) &&
((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
{
destroy_event_channels(d);
@@ -62,12 +62,12 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
sched_add_domain(ed);
- if ( d->id != IDLE_DOMAIN_ID )
+ if ( d->domain_id != IDLE_DOMAIN_ID )
{
write_lock(&domlist_lock);
pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
- if ( (*pd)->id > d->id )
+ if ( (*pd)->domain_id > d->domain_id )
break;
d->next_in_list = *pd;
*pd = d;
@@ -88,7 +88,7 @@ struct domain *find_domain_by_id(domid_t dom)
d = domain_hash[DOMAIN_HASH(dom)];
while ( d != NULL )
{
- if ( d->id == dom )
+ if ( d->domain_id == dom )
{
if ( unlikely(!get_domain(d)) )
d = NULL;
@@ -107,7 +107,7 @@ void domain_kill(struct domain *d)
struct exec_domain *ed;
domain_pause(d);
- if ( !test_and_set_bit(DF_DYING, &d->flags) )
+ if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
{
for_each_exec_domain(d, ed)
sched_rem_domain(ed);
@@ -121,10 +121,10 @@ void domain_crash(void)
{
struct domain *d = current->domain;
- if ( d->id == 0 )
+ if ( d->domain_id == 0 )
BUG();
- set_bit(DF_CRASHED, &d->flags);
+ set_bit(_DOMF_crashed, &d->domain_flags);
send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
@@ -144,7 +144,7 @@ void domain_shutdown(u8 reason)
{
struct domain *d = current->domain;
- if ( d->id == 0 )
+ if ( d->domain_id == 0 )
{
extern void machine_restart(char *);
extern void machine_halt(void);
@@ -164,9 +164,9 @@ void domain_shutdown(u8 reason)
}
if ( (d->shutdown_code = reason) == SHUTDOWN_crash )
- set_bit(DF_CRASHED, &d->flags);
+ set_bit(_DOMF_crashed, &d->domain_flags);
else
- set_bit(DF_SHUTDOWN, &d->flags);
+ set_bit(_DOMF_shutdown, &d->domain_flags);
send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
@@ -180,7 +180,7 @@ void domain_destruct(struct domain *d)
struct domain **pd;
atomic_t old, new;
- if ( !test_bit(DF_DYING, &d->flags) )
+ if ( !test_bit(_DOMF_dying, &d->domain_flags) )
BUG();
/* May be already destructed, or get_domain() can race us. */
@@ -196,7 +196,7 @@ void domain_destruct(struct domain *d)
while ( *pd != d )
pd = &(*pd)->next_in_list;
*pd = d->next_in_list;
- pd = &domain_hash[DOMAIN_HASH(d->id)];
+ pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
while ( *pd != d )
pd = &(*pd)->next_in_hashbucket;
*pd = d->next_in_hashbucket;
@@ -217,18 +217,18 @@ void domain_destruct(struct domain *d)
* of domains other than domain 0. ie. the domains that are being built by
* the userspace dom0 domain builder.
*/
-int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo)
+int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo)
{
int rc = 0;
struct vcpu_guest_context *c = NULL;
unsigned long vcpu = setdomaininfo->exec_domain;
struct exec_domain *ed;
- if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = p->exec_domain[vcpu]) == NULL) )
+ if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = d->exec_domain[vcpu]) == NULL) )
return -EINVAL;
- if (test_bit(DF_CONSTRUCTED, &p->flags) &&
- !test_bit(EDF_CTRLPAUSE, &ed->flags))
+ if (test_bit(_DOMF_constructed, &d->domain_flags) &&
+ !test_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags))
return -EINVAL;
if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
@@ -243,7 +243,7 @@ int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo)
if ( (rc = arch_set_info_guest(ed, c)) != 0 )
goto out;
- set_bit(DF_CONSTRUCTED, &p->flags);
+ set_bit(_DOMF_constructed, &d->domain_flags);
out:
xfree(c);
@@ -295,7 +295,7 @@ long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt)
sched_add_domain(ed);
/* domain_unpause_by_systemcontroller */
- if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->flags) )
+ if ( test_and_clear_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
domain_wake(ed);
xfree(c);
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 22b01ad541..32e519bd1e 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -40,7 +40,7 @@ static int get_free_port(struct exec_domain *ed)
max = d->max_event_channel;
chn = d->event_channel;
- for ( port = ed->id * EVENT_CHANNELS_SPREAD; port < max; port++ )
+ for ( port = ed->vcpu_id * EVENT_CHANNELS_SPREAD; port < max; port++ )
if ( chn[port].state == ECS_FREE )
break;
@@ -114,9 +114,9 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
return -EINVAL;
if ( dom1 == DOMID_SELF )
- dom1 = current->domain->id;
+ dom1 = current->domain->domain_id;
if ( dom2 == DOMID_SELF )
- dom2 = current->domain->id;
+ dom2 = current->domain->domain_id;
if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
((d2 = find_domain_by_id(dom2)) == NULL) )
@@ -429,7 +429,7 @@ static long __evtchn_close(struct domain *d1, int port1)
BUG();
chn2[port2].state = ECS_UNBOUND;
- chn2[port2].u.unbound.remote_domid = d1->id;
+ chn2[port2].u.unbound.remote_domid = d1->domain_id;
break;
default:
@@ -459,7 +459,7 @@ static long evtchn_close(evtchn_close_t *close)
domid_t dom = close->dom;
if ( dom == DOMID_SELF )
- dom = current->domain->id;
+ dom = current->domain->domain_id;
else if ( !IS_PRIV(current->domain) )
return -EPERM;
@@ -522,7 +522,7 @@ static long evtchn_status(evtchn_status_t *status)
long rc = 0;
if ( dom == DOMID_SELF )
- dom = current->domain->id;
+ dom = current->domain->domain_id;
else if ( !IS_PRIV(current->domain) )
return -EPERM;
@@ -552,7 +552,7 @@ static long evtchn_status(evtchn_status_t *status)
case ECS_INTERDOMAIN:
status->status = EVTCHNSTAT_interdomain;
status->u.interdomain.dom =
- chn[port].u.interdomain.remote_dom->domain->id;
+ chn[port].u.interdomain.remote_dom->domain->domain_id;
status->u.interdomain.port = chn[port].u.interdomain.remote_port;
break;
case ECS_PIRQ:
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 7fdc92c9b9..591ec609f3 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -118,10 +118,10 @@ __gnttab_activate_grant_ref(
u32 scombo, prev_scombo, new_scombo;
if ( unlikely((sflags & GTF_type_mask) != GTF_permit_access) ||
- unlikely(sdom != mapping_d->id) )
+ unlikely(sdom != mapping_d->domain_id) )
PIN_FAIL(unlock_out, GNTST_general_error,
"Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
- sflags, sdom, mapping_d->id);
+ sflags, sdom, mapping_d->domain_id);
/* Merge two 16-bit values into a 32-bit combined update. */
/* NB. Endianness! */
@@ -674,7 +674,7 @@ gnttab_setup_table(
if ( op.dom == DOMID_SELF )
{
- op.dom = current->domain->id;
+ op.dom = current->domain->domain_id;
}
else if ( unlikely(!IS_PRIV(current->domain)) )
{
@@ -725,7 +725,7 @@ gnttab_dump_table(gnttab_dump_table_t *uop)
if ( op.dom == DOMID_SELF )
{
- op.dom = current->domain->id;
+ op.dom = current->domain->domain_id;
}
if ( unlikely((d = find_domain_by_id(op.dom)) == NULL) )
@@ -866,10 +866,10 @@ gnttab_check_unmap(
lgt = ld->grant_table;
#if GRANT_DEBUG_VERBOSE
- if ( ld->id != 0 )
+ if ( ld->domain_id != 0 )
{
DPRINTK("Foreign unref rd(%d) ld(%d) frm(%x) flgs(%x).\n",
- rd->id, ld->id, frame, readonly);
+ rd->domain_id, ld->domain_id, frame, readonly);
}
#endif
@@ -879,7 +879,8 @@ gnttab_check_unmap(
if ( get_domain(rd) == 0 )
{
- DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n", rd->id);
+ DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n",
+ rd->domain_id);
return 0;
}
@@ -913,7 +914,7 @@ gnttab_check_unmap(
/* gotcha */
DPRINTK("Grant unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
- rd->id, ld->id, frame, readonly);
+ rd->domain_id, ld->domain_id, frame, readonly);
if ( readonly )
act->pin -= GNTPIN_hstr_inc;
@@ -963,12 +964,13 @@ gnttab_prepare_for_transfer(
unsigned long target_pfn;
DPRINTK("gnttab_prepare_for_transfer rd(%hu) ld(%hu) ref(%hu).\n",
- rd->id, ld->id, ref);
+ rd->domain_id, ld->domain_id, ref);
if ( unlikely((rgt = rd->grant_table) == NULL) ||
unlikely(ref >= NR_GRANT_ENTRIES) )
{
- DPRINTK("Dom %d has no g.t., or ref is bad (%d).\n", rd->id, ref);
+ DPRINTK("Dom %d has no g.t., or ref is bad (%d).\n",
+ rd->domain_id, ref);
return 0;
}
@@ -990,10 +992,10 @@ gnttab_prepare_for_transfer(
}
if ( unlikely(sflags != GTF_accept_transfer) ||
- unlikely(sdom != ld->id) )
+ unlikely(sdom != ld->domain_id) )
{
DPRINTK("Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
- sflags, sdom, ld->id);
+ sflags, sdom, ld->domain_id);
goto fail;
}
@@ -1041,7 +1043,7 @@ gnttab_notify_transfer(
unsigned long pfn;
DPRINTK("gnttab_notify_transfer rd(%hu) ld(%hu) ref(%hu).\n",
- rd->id, ld->id, ref);
+ rd->domain_id, ld->domain_id, ref);
sha = &rd->grant_table->shared[ref];
@@ -1062,7 +1064,7 @@ gnttab_notify_transfer(
__phys_to_machine_mapping[pfn] = frame;
}
sha->frame = __mfn_to_gpfn(rd, frame);
- sha->domid = rd->id;
+ sha->domid = rd->domain_id;
wmb();
sha->flags = ( GTF_accept_transfer | GTF_transfer_completed );
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index 3449143458..34f9fd9291 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -109,7 +109,7 @@ static void do_task_queues(unsigned char key)
for_each_domain ( d )
{
printk("Xen: DOM %u, flags=%lx refcnt=%d nr_pages=%d "
- "xenheap_pages=%d\n", d->id, d->flags,
+ "xenheap_pages=%d\n", d->domain_id, d->domain_flags,
atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
dump_pageframe_info(d);
@@ -118,11 +118,11 @@ static void do_task_queues(unsigned char key)
printk("Guest: %p CPU %d [has=%c] flags=%lx "
"upcall_pend = %02x, upcall_mask = %02x\n", ed,
ed->processor,
- test_bit(EDF_RUNNING, &ed->flags) ? 'T':'F',
- ed->flags,
+ test_bit(_VCPUF_running, &ed->vcpu_flags) ? 'T':'F',
+ ed->vcpu_flags,
ed->vcpu_info->evtchn_upcall_pending,
ed->vcpu_info->evtchn_upcall_mask);
- printk("Notifying guest... %d/%d\n", d->id, ed->id);
+ printk("Notifying guest... %d/%d\n", d->domain_id, ed->vcpu_id);
printk("port %d/%d stat %d %d %d\n",
VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
test_bit(ed->virq_to_evtchn[VIRQ_DEBUG],
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 5a4115c82e..8669ac8a27 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -504,13 +504,13 @@ struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
spin_lock(&d->page_alloc_lock);
- if ( unlikely(test_bit(DF_DYING, &d->flags)) ||
+ if ( unlikely(test_bit(_DOMF_dying, &d->domain_flags)) ||
unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
{
DPRINTK("Over-allocation for domain %u: %u > %u\n",
- d->id, d->tot_pages + (1 << order), d->max_pages);
+ d->domain_id, d->tot_pages + (1 << order), d->max_pages);
DPRINTK("...or the domain is dying (%d)\n",
- !!test_bit(DF_DYING, &d->flags));
+ !!test_bit(_DOMF_dying, &d->domain_flags));
spin_unlock(&d->page_alloc_lock);
free_heap_pages(MEMZONE_DOM, pg, order);
return NULL;
@@ -575,7 +575,7 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order)
spin_unlock_recursive(&d->page_alloc_lock);
- if ( likely(!test_bit(DF_DYING, &d->flags)) )
+ if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
{
free_heap_pages(MEMZONE_DOM, pg, order);
}
diff --git a/xen/common/sched_bvt.c b/xen/common/sched_bvt.c
index 276479e490..1ad20578f4 100644
--- a/xen/common/sched_bvt.c
+++ b/xen/common/sched_bvt.c
@@ -174,9 +174,9 @@ static int bvt_alloc_task(struct exec_domain *ed)
return -1;
memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
}
- ed->sched_priv = &BVT_INFO(d)->ed_inf[ed->id];
- BVT_INFO(d)->ed_inf[ed->id].inf = BVT_INFO(d);
- BVT_INFO(d)->ed_inf[ed->id].exec_domain = ed;
+ ed->sched_priv = &BVT_INFO(d)->ed_inf[ed->vcpu_id];
+ BVT_INFO(d)->ed_inf[ed->vcpu_id].inf = BVT_INFO(d);
+ BVT_INFO(d)->ed_inf[ed->vcpu_id].exec_domain = ed;
return 0;
}
@@ -190,7 +190,8 @@ static void bvt_add_task(struct exec_domain *d)
ASSERT(inf != NULL);
ASSERT(d != NULL);
- if (d->id == 0) {
+ if ( d->vcpu_id == 0 )
+ {
inf->mcu_advance = MCU_ADVANCE;
inf->domain = d->domain;
inf->warpback = 0;
@@ -212,7 +213,7 @@ static void bvt_add_task(struct exec_domain *d)
einf->exec_domain = d;
- if ( d->domain->id == IDLE_DOMAIN_ID )
+ if ( d->domain->domain_id == IDLE_DOMAIN_ID )
{
einf->avt = einf->evt = ~0U;
}
@@ -231,7 +232,7 @@ static int bvt_init_idle_task(struct exec_domain *ed)
bvt_add_task(ed);
- set_bit(EDF_RUNNING, &ed->flags);
+ set_bit(_VCPUF_running, &ed->vcpu_flags);
if ( !__task_on_runqueue(ed) )
__add_to_runqueue_head(ed);
@@ -256,7 +257,7 @@ static void bvt_wake(struct exec_domain *ed)
/* Set the BVT parameters. AVT should always be updated
if CPU migration ocurred.*/
if ( einf->avt < CPU_SVT(cpu) ||
- unlikely(test_bit(EDF_MIGRATED, &ed->flags)) )
+ unlikely(test_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags)) )
einf->avt = CPU_SVT(cpu);
/* Deal with warping here. */
@@ -279,7 +280,7 @@ static void bvt_wake(struct exec_domain *ed)
static void bvt_sleep(struct exec_domain *ed)
{
- if ( test_bit(EDF_RUNNING, &ed->flags) )
+ if ( test_bit(_VCPUF_running, &ed->vcpu_flags) )
cpu_raise_softirq(ed->processor, SCHEDULE_SOFTIRQ);
else if ( __task_on_runqueue(ed) )
__del_from_runqueue(ed);
@@ -538,8 +539,8 @@ static void bvt_dump_cpu_state(int i)
list_for_each_entry ( ed_inf, queue, run_list )
{
ed = ed_inf->exec_domain;
- printk("%3d: %u has=%c ", loop++, ed->domain->id,
- test_bit(EDF_RUNNING, &ed->flags) ? 'T':'F');
+ printk("%3d: %u has=%c ", loop++, ed->domain->domain_id,
+ test_bit(_VCPUF_running, &ed->vcpu_flags) ? 'T':'F');
bvt_dump_runq_el(ed);
printk("c=0x%X%08X\n", (u32)(ed->cpu_time>>32), (u32)ed->cpu_time);
printk(" l: %p n: %p p: %p\n",
diff --git a/xen/common/sched_sedf.c b/xen/common/sched_sedf.c
index 6bd8b5a0be..d4ed67ed5b 100644
--- a/xen/common/sched_sedf.c
+++ b/xen/common/sched_sedf.c
@@ -162,8 +162,8 @@ static inline void extraq_del(struct exec_domain *d, int i)
{
struct list_head *list = EXTRALIST(d,i);
ASSERT(extraq_on(d,i));
- PRINT(3, "Removing domain %i.%i from L%i extraq\n", d->domain->id,
- d->id, i);
+ PRINT(3, "Removing domain %i.%i from L%i extraq\n", d->domain->domain_id,
+ d->vcpu_id, i);
list_del(list);
list->next = NULL;
ASSERT(!extraq_on(d, i));
@@ -182,7 +182,7 @@ static inline void extraq_add_sort_update(struct exec_domain *d, int i, int sub)
ASSERT(!extraq_on(d,i));
PRINT(3, "Adding domain %i.%i (score= %i, short_pen= %"PRIi64")"
" to L%i extraq\n",
- d->domain->id, d->id, EDOM_INFO(d)->score[i],
+ d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->score[i],
EDOM_INFO(d)->short_block_lost_tot, i);
/*iterate through all elements to find our "hole" and on our way
update all the other scores*/
@@ -193,8 +193,8 @@ static inline void extraq_add_sort_update(struct exec_domain *d, int i, int sub)
break;
else
PRINT(4,"\tbehind domain %i.%i (score= %i)\n",
- curinf->exec_domain->domain->id,
- curinf->exec_domain->id, curinf->score[i]);
+ curinf->exec_domain->domain->domain_id,
+ curinf->exec_domain->vcpu_id, curinf->score[i]);
}
/*cur now contains the element, before which we'll enqueue*/
PRINT(3, "\tlist_add to %p\n", cur->prev);
@@ -208,23 +208,23 @@ static inline void extraq_add_sort_update(struct exec_domain *d, int i, int sub)
extralist[i]);
curinf->score[i] -= sub;
PRINT(4, "\tupdating domain %i.%i (score= %u)\n",
- curinf->exec_domain->domain->id,
- curinf->exec_domain->id, curinf->score[i]);
+ curinf->exec_domain->domain->domain_id,
+ curinf->exec_domain->vcpu_id, curinf->score[i]);
}
ASSERT(extraq_on(d,i));
}
static inline void extraq_check(struct exec_domain *d) {
if (extraq_on(d, EXTRA_UTIL_Q)) {
- PRINT(2,"Dom %i.%i is on L1 extraQ\n",d->domain->id, d->id);
+ PRINT(2,"Dom %i.%i is on L1 extraQ\n",d->domain->domain_id, d->vcpu_id);
if (!(EDOM_INFO(d)->status & EXTRA_AWARE) &&
!extra_runs(EDOM_INFO(d))) {
extraq_del(d, EXTRA_UTIL_Q);
PRINT(2,"Removed dom %i.%i from L1 extraQ\n",
- d->domain->id, d->id);
+ d->domain->domain_id, d->vcpu_id);
}
} else {
- PRINT(2,"Dom %i.%i is NOT on L1 extraQ\n",d->domain->id,
- d->id);
+ PRINT(2,"Dom %i.%i is NOT on L1 extraQ\n",d->domain->domain_id,
+ d->vcpu_id);
if ((EDOM_INFO(d)->status & EXTRA_AWARE) && sedf_runnable(d))
{
#if (EXTRA == EXTRA_ROUNDR)
@@ -235,8 +235,8 @@ static inline void extraq_check(struct exec_domain *d) {
#elif
;
#endif
- PRINT(2,"Added dom %i.%i to L1 extraQ\n",d->domain->id,
- d->id);
+ PRINT(2,"Added dom %i.%i to L1 extraQ\n",d->domain->domain_id,
+ d->vcpu_id);
}
}
}
@@ -268,7 +268,7 @@ static inline void __del_from_queue(struct exec_domain *d)
struct list_head *list = LIST(d);
ASSERT(__task_on_queue(d));
PRINT(3,"Removing domain %i.%i (bop= %"PRIu64") from runq/waitq\n",
- d->domain->id, d->id, PERIOD_BEGIN(EDOM_INFO(d)));
+ d->domain->domain_id, d->vcpu_id, PERIOD_BEGIN(EDOM_INFO(d)));
list_del(list);
list->next = NULL;
ASSERT(!__task_on_queue(d));
@@ -309,7 +309,7 @@ DOMAIN_COMPARER(waitq, list, PERIOD_BEGIN(d1), PERIOD_BEGIN(d2))
static inline void __add_to_waitqueue_sort(struct exec_domain *d) {
ASSERT(!__task_on_queue(d));
PRINT(3,"Adding domain %i.%i (bop= %"PRIu64") to waitq\n",
- d->domain->id, d->id, PERIOD_BEGIN(EDOM_INFO(d)));
+ d->domain->domain_id, d->vcpu_id, PERIOD_BEGIN(EDOM_INFO(d)));
list_insert_sort(WAITQ(d->processor), LIST(d), waitq_comp);
ASSERT(__task_on_queue(d));
}
@@ -322,7 +322,7 @@ static inline void __add_to_waitqueue_sort(struct exec_domain *d) {
DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs)
static inline void __add_to_runqueue_sort(struct exec_domain *d) {
PRINT(3,"Adding domain %i.%i (deadl= %"PRIu64") to runq\n",
- d->domain->id, d->id, EDOM_INFO(d)->deadl_abs);
+ d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->deadl_abs);
list_insert_sort(RUNQ(d->processor), LIST(d), runq_comp);
}
@@ -346,8 +346,8 @@ static int sedf_init_scheduler() {
/* Allocates memory for per domain private scheduling data*/
static int sedf_alloc_task(struct exec_domain *d) {
- PRINT(2,"sedf_alloc_task was called, domain-id %i.%i\n",d->domain->id,
- d->id);
+ PRINT(2,"sedf_alloc_task was called, domain-id %i.%i\n",d->domain->domain_id,
+ d->vcpu_id);
if (d->domain->sched_priv == NULL) {
if ((d->domain->sched_priv =
xmalloc(struct sedf_dom_info)) == NULL )
@@ -366,10 +366,10 @@ static void sedf_add_task(struct exec_domain *d)
struct sedf_edom_info *inf = EDOM_INFO(d);
inf->exec_domain = d;
- PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",d->domain->id,
- d->id);
+ PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",d->domain->domain_id,
+ d->vcpu_id);
- if (d->domain->id==0) {
+ if (d->domain->domain_id==0) {
/*set dom0 to something useful to boot the machine*/
inf->period = MILLISECS(20);
inf->slice = MILLISECS(15);
@@ -391,7 +391,7 @@ static void sedf_add_task(struct exec_domain *d)
INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q]));
INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q]));
- if (d->domain->id != IDLE_DOMAIN_ID) {
+ if (d->domain->domain_id != IDLE_DOMAIN_ID) {
extraq_check(d);
}
}
@@ -400,7 +400,7 @@ static void sedf_add_task(struct exec_domain *d)
static void sedf_free_task(struct domain *d)
{
int i;
- PRINT(2,"sedf_free_task was called, domain-id %i\n",d->id);
+ PRINT(2,"sedf_free_task was called, domain-id %i\n",d->domain_id);
ASSERT(d->sched_priv != NULL);
xfree(d->sched_priv);
@@ -414,14 +414,14 @@ static void sedf_free_task(struct domain *d)
/* Initialises idle task */
static int sedf_init_idle_task(struct exec_domain *d) {
PRINT(2,"sedf_init_idle_task was called, domain-id %i.%i\n",
- d->domain->id, d->id);
+ d->domain->domain_id, d->vcpu_id);
if ( sedf_alloc_task(d) < 0 )
return -1;
sedf_add_task(d);
EDOM_INFO(d)->deadl_abs = 0;
EDOM_INFO(d)->status &= ~SEDF_ASLEEP;
- set_bit(EDF_RUNNING, &d->flags);
+ set_bit(_VCPUF_running, &d->vcpu_flags);
/*the idle task doesn't have to turn up on any list...*/
return 0;
}
@@ -497,7 +497,7 @@ struct list_head* waitq) {
list_for_each_safe(cur, tmp, waitq) {
curinf = list_entry(cur, struct sedf_edom_info, list);
PRINT(4,"\tLooking @ dom %i.%i\n",
- curinf->exec_domain->domain->id, curinf->exec_domain->id);
+ curinf->exec_domain->domain->domain_id, curinf->exec_domain->vcpu_id);
if (PERIOD_BEGIN(curinf) <= now) {
__del_from_queue(curinf->exec_domain);
__add_to_runqueue_sort(curinf->exec_domain);
@@ -512,12 +512,12 @@ struct list_head* waitq) {
list_for_each_safe(cur, tmp, runq) {
curinf = list_entry(cur,struct sedf_edom_info,list);
PRINT(4,"\tLooking @ dom %i.%i\n",
- curinf->exec_domain->domain->id, curinf->exec_domain->id);
+ curinf->exec_domain->domain->domain_id, curinf->exec_domain->vcpu_id);
if (unlikely(curinf->slice == 0)) {
/*ignore domains with empty slice*/
PRINT(4,"\tUpdating zero-slice domain %i.%i\n",
- curinf->exec_domain->domain->id,
- curinf->exec_domain->id);
+ curinf->exec_domain->domain->domain_id,
+ curinf->exec_domain->vcpu_id);
__del_from_queue(curinf->exec_domain);
/*move them to their next period*/
@@ -534,8 +534,8 @@ struct list_head* waitq) {
PRINT(4,"\tDomain %i.%i exceeded it's deadline/"
"slice (%"PRIu64" / %"PRIu64") now: %"PRIu64
" cputime: %"PRIu64"\n",
- curinf->exec_domain->domain->id,
- curinf->exec_domain->id,
+ curinf->exec_domain->domain->domain_id,
+ curinf->exec_domain->vcpu_id,
curinf->deadl_abs, curinf->slice, now,
curinf->cputime);
__del_from_queue(curinf->exec_domain);
@@ -601,11 +601,11 @@ static inline void desched_extra_dom(s_time_t now, struct exec_domain* d) {
/*inf->short_block_lost_tot -= EXTRA_QUANTUM;*/
inf->short_block_lost_tot -= now - inf->sched_start_abs;
PRINT(3,"Domain %i.%i: Short_block_loss: %"PRIi64"\n",
- inf->exec_domain->domain->id, inf->exec_domain->id,
+ inf->exec_domain->domain->domain_id, inf->exec_domain->vcpu_id,
inf->short_block_lost_tot);
if (inf->short_block_lost_tot <= 0) {
PRINT(4,"Domain %i.%i compensated short block loss!\n",
- inf->exec_domain->domain->id, inf->exec_domain->id);
+ inf->exec_domain->domain->domain_id, inf->exec_domain->vcpu_id);
/*we have (over-)compensated our block penalty*/
inf->short_block_lost_tot = 0;
/*we don't want a place on the penalty queue anymore!*/
@@ -808,14 +808,14 @@ sched_done:
}
static void sedf_sleep(struct exec_domain *d) {
- PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->id, d->id);
+ PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
if (is_idle_task(d->domain))
return;
EDOM_INFO(d)->status |= SEDF_ASLEEP;
- if ( test_bit(EDF_RUNNING, &d->flags) ) {
+ if ( test_bit(_VCPUF_running, &d->vcpu_flags) ) {
#ifdef ADV_SCHED_HISTO
adv_sched_hist_start(d->processor);
#endif
@@ -1140,14 +1140,14 @@ void sedf_wake(struct exec_domain *d) {
s_time_t now = NOW();
struct sedf_edom_info* inf = EDOM_INFO(d);
- PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->id, d->id);
+ PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
if (unlikely(is_idle_task(d->domain)))
return;
if ( unlikely(__task_on_queue(d)) ) {
PRINT(3,"\tdomain %i.%i is already in some queue\n",
- d->domain->id, d->id);
+ d->domain->domain_id, d->vcpu_id);
return;
}
ASSERT(!sedf_runnable(d));
@@ -1160,7 +1160,7 @@ void sedf_wake(struct exec_domain *d) {
inf->deadl_abs = now + inf->slice;
PRINT(3,"waking up domain %i.%i (deadl= %"PRIu64" period= %"PRIu64" "\
- "now= %"PRIu64")\n", d->domain->id, d->id, inf->deadl_abs,
+ "now= %"PRIu64")\n", d->domain->domain_id, d->vcpu_id, inf->deadl_abs,
inf->period, now);
#ifdef SEDF_STATS
inf->block_tot++;
@@ -1222,7 +1222,7 @@ void sedf_wake(struct exec_domain *d) {
}
}
PRINT(3,"woke up domain %i.%i (deadl= %"PRIu64" period= %"PRIu64" "\
- "now= %"PRIu64")\n", d->domain->id, d->id, inf->deadl_abs,
+ "now= %"PRIu64")\n", d->domain->domain_id, d->vcpu_id, inf->deadl_abs,
inf->period, now);
if (PERIOD_BEGIN(inf) > now) {
__add_to_waitqueue_sort(d);
@@ -1257,8 +1257,8 @@ void sedf_wake(struct exec_domain *d) {
/*Print a lot of use-{full, less} information about a domains in the system*/
static void sedf_dump_domain(struct exec_domain *d) {
- printk("%i.%i has=%c ", d->domain->id, d->id,
- test_bit(EDF_RUNNING, &d->flags) ? 'T':'F');
+ printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id,
+ test_bit(_VCPUF_running, &d->vcpu_flags) ? 'T':'F');
printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu c=%"PRIu64" sc=%i xtr(%s)=%"PRIu64" ew=%hu",
EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs,
EDOM_INFO(d)->weight, d->cpu_time, EDOM_INFO(d)->score[EXTRA_UTIL_Q],
@@ -1399,7 +1399,7 @@ static int sedf_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) {
PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "\
"new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
- p->id, cmd->u.sedf.period, cmd->u.sedf.slice,
+ p->domain_id, cmd->u.sedf.period, cmd->u.sedf.slice,
cmd->u.sedf.latency, (cmd->u.sedf.extratime)?"yes":"no");
if ( cmd->direction == SCHED_INFO_PUT )
{
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 491c2f22ac..bcb6a06e6e 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -107,27 +107,27 @@ struct exec_domain *alloc_exec_domain_struct(struct domain *d,
d->exec_domain[vcpu] = ed;
ed->domain = d;
- ed->id = vcpu;
+ ed->vcpu_id = vcpu;
if ( SCHED_OP(alloc_task, ed) < 0 )
goto out;
if ( vcpu != 0 )
{
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
+ ed->vcpu_info = &d->shared_info->vcpu_data[ed->vcpu_id];
for_each_exec_domain( d, edc )
{
if ( (edc->next_in_list == NULL) ||
- (edc->next_in_list->id > vcpu) )
+ (edc->next_in_list->vcpu_id > vcpu) )
break;
}
ed->next_in_list = edc->next_in_list;
edc->next_in_list = ed;
- if (test_bit(EDF_CPUPINNED, &edc->flags)) {
+ if (test_bit(_VCPUF_cpu_pinned, &edc->vcpu_flags)) {
ed->processor = (edc->processor + 1) % smp_num_cpus;
- set_bit(EDF_CPUPINNED, &ed->flags);
+ set_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
} else {
ed->processor = (edc->processor + 1) % smp_num_cpus; /* XXX */
}
@@ -169,9 +169,9 @@ void sched_add_domain(struct exec_domain *ed)
struct domain *d = ed->domain;
/* Must be unpaused by control software to start execution. */
- set_bit(EDF_CTRLPAUSE, &ed->flags);
+ set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags);
- if ( d->id != IDLE_DOMAIN_ID )
+ if ( d->domain_id != IDLE_DOMAIN_ID )
{
/* Initialise the per-domain timer. */
init_ac_timer(&ed->timer);
@@ -185,14 +185,14 @@ void sched_add_domain(struct exec_domain *ed)
}
SCHED_OP(add_task, ed);
- TRACE_2D(TRC_SCHED_DOM_ADD, d->id, ed->id);
+ TRACE_2D(TRC_SCHED_DOM_ADD, d->domain_id, ed->vcpu_id);
}
void sched_rem_domain(struct exec_domain *ed)
{
rem_ac_timer(&ed->timer);
SCHED_OP(rem_task, ed);
- TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->id, ed->id);
+ TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->domain_id, ed->vcpu_id);
}
void init_idle_task(void)
@@ -210,10 +210,10 @@ void domain_sleep(struct exec_domain *ed)
SCHED_OP(sleep, ed);
spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
- TRACE_2D(TRC_SCHED_SLEEP, ed->domain->id, ed->id);
+ TRACE_2D(TRC_SCHED_SLEEP, ed->domain->domain_id, ed->vcpu_id);
/* Synchronous. */
- while ( test_bit(EDF_RUNNING, &ed->flags) && !domain_runnable(ed) )
+ while ( test_bit(_VCPUF_running, &ed->vcpu_flags) && !domain_runnable(ed) )
cpu_relax();
}
@@ -229,10 +229,10 @@ void domain_wake(struct exec_domain *ed)
ed->wokenup = NOW();
#endif
}
- clear_bit(EDF_MIGRATED, &ed->flags);
+ clear_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags);
spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
- TRACE_2D(TRC_SCHED_WAKE, ed->domain->id, ed->id);
+ TRACE_2D(TRC_SCHED_WAKE, ed->domain->domain_id, ed->vcpu_id);
}
/* Block the currently-executing domain until a pertinent event occurs. */
@@ -245,16 +245,16 @@ long do_block(void)
#endif
ed->vcpu_info->evtchn_upcall_mask = 0;
- set_bit(EDF_BLOCKED, &ed->flags);
+ set_bit(_VCPUF_blocked, &ed->vcpu_flags);
/* Check for events /after/ blocking: avoids wakeup waiting race. */
if ( event_pending(ed) )
{
- clear_bit(EDF_BLOCKED, &ed->flags);
+ clear_bit(_VCPUF_blocked, &ed->vcpu_flags);
}
else
{
- TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->id);
+ TRACE_2D(TRC_SCHED_BLOCK, ed->domain->domain_id, ed->vcpu_id);
__enter_scheduler();
}
@@ -268,7 +268,7 @@ static long do_yield(void)
adv_sched_hist_start(current->processor);
#endif
- TRACE_2D(TRC_SCHED_YIELD, current->domain->id, current->id);
+ TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
__enter_scheduler();
return 0;
}
@@ -297,7 +297,7 @@ long do_sched_op(unsigned long op)
case SCHEDOP_shutdown:
{
- TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->id, current->id,
+ TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->domain_id, current->vcpu_id,
(op >> SCHEDOP_reasonshift));
domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
break;
@@ -400,7 +400,7 @@ long sched_adjdom(struct sched_adjdom_cmd *cmd)
spin_unlock(&schedule_data[cpu].schedule_lock);
__clear_cpu_bits(have_lock);
- TRACE_1D(TRC_SCHED_ADJDOM, d->id);
+ TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
put_domain(d);
return 0;
}
@@ -451,7 +451,7 @@ static void __enter_scheduler(void)
add_ac_timer(&schedule_data[cpu].s_timer);
/* Must be protected by the schedule_lock! */
- set_bit(EDF_RUNNING, &next->flags);
+ set_bit(_VCPUF_running, &next->vcpu_flags);
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
@@ -492,8 +492,8 @@ static void __enter_scheduler(void)
}
TRACE_4D(TRC_SCHED_SWITCH,
- prev->domain->id, prev->id,
- next->domain->id, next->id);
+ prev->domain->domain_id, prev->vcpu_id,
+ next->domain->domain_id, next->vcpu_id);
#ifdef ADV_SCHED_HISTO
adv_sched_hist_to_stop(cpu);
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index d9eb0c474a..73394c1973 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -303,7 +303,7 @@ long do_console_io(int cmd, int count, char *buffer)
#ifndef VERBOSE
/* Only domain-0 may access the emergency console. */
- if ( current->domain->id != 0 )
+ if ( current->domain->domain_id != 0 )
return -EPERM;
#endif
diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h
index 1ddc8228db..d98a57e5a2 100644
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -275,9 +275,9 @@ extern unsigned long _end; /* standard ELF symbol */
extern unsigned long xenheap_phys_end; /* user-configurable */
#endif
-#define GDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + ((ed)->id << PDPT_VCPU_VA_SHIFT))
+#define GDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + ((ed)->vcpu_id << PDPT_VCPU_VA_SHIFT))
#define GDT_VIRT_END(ed) (GDT_VIRT_START(ed) + (64*1024))
-#define LDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + (64*1024) + ((ed)->id << PDPT_VCPU_VA_SHIFT))
+#define LDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + (64*1024) + ((ed)->vcpu_id << PDPT_VCPU_VA_SHIFT))
#define LDT_VIRT_END(ed) (LDT_VIRT_START(ed) + (64*1024))
#define PDPT_VCPU_SHIFT 5
diff --git a/xen/include/asm-x86/debugger.h b/xen/include/asm-x86/debugger.h
index 6e9cf223d5..cff37a83a8 100644
--- a/xen/include/asm-x86/debugger.h
+++ b/xen/include/asm-x86/debugger.h
@@ -62,14 +62,14 @@ static inline int debugger_trap_entry(
{
struct exec_domain *ed = current;
- if ( !KERNEL_MODE(ed, regs) || (ed->domain->id == 0) )
+ if ( !KERNEL_MODE(ed, regs) || (ed->domain->domain_id == 0) )
return 0;
switch ( vector )
{
case TRAP_int3:
case TRAP_debug:
- set_bit(EDF_CTRLPAUSE, &ed->flags);
+ set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags);
raise_softirq(SCHEDULE_SOFTIRQ);
return 1;
}
diff --git a/xen/include/asm-x86/i387.h b/xen/include/asm-x86/i387.h
index 01039ab648..a1f0feaec0 100644
--- a/xen/include/asm-x86/i387.h
+++ b/xen/include/asm-x86/i387.h
@@ -19,7 +19,7 @@ extern void save_init_fpu(struct exec_domain *tsk);
extern void restore_fpu(struct exec_domain *tsk);
#define unlazy_fpu(_tsk) do { \
- if ( test_bit(EDF_USEDFPU, &(_tsk)->flags) ) \
+ if ( test_bit(_VCPUF_fpu_dirtied, &(_tsk)->vcpu_flags) ) \
save_init_fpu(_tsk); \
} while ( 0 )
@@ -31,9 +31,9 @@ extern void restore_fpu(struct exec_domain *tsk);
/* Make domain the FPU owner */
static inline void setup_fpu(struct exec_domain *ed)
{
- if ( !test_and_set_bit(EDF_USEDFPU, &ed->flags) )
+ if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &ed->vcpu_flags) )
{
- if ( test_bit(EDF_DONEFPUINIT, &ed->flags) )
+ if ( test_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags) )
restore_fpu(ed);
else
init_fpu();
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 5ba7d72bbb..f176427991 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -317,7 +317,7 @@ extern int shadow_status_noswap;
#ifdef VERBOSE
#define SH_LOG(_f, _a...) \
printk("DOM%uP%u: SH_LOG(%d): " _f "\n", \
- current->domain->id , current->processor, __LINE__ , ## _a )
+ current->domain->domain_id , current->processor, __LINE__ , ## _a )
#else
#define SH_LOG(_f, _a...) ((void)0)
#endif
@@ -325,7 +325,7 @@ extern int shadow_status_noswap;
#if SHADOW_VERBOSE_DEBUG
#define SH_VLOG(_f, _a...) \
printk("DOM%uP%u: SH_VLOG(%d): " _f "\n", \
- current->domain->id, current->processor, __LINE__ , ## _a )
+ current->domain->domain_id, current->processor, __LINE__ , ## _a )
#else
#define SH_VLOG(_f, _a...) ((void)0)
#endif
@@ -333,7 +333,7 @@ extern int shadow_status_noswap;
#if SHADOW_VVERBOSE_DEBUG
#define SH_VVLOG(_f, _a...) \
printk("DOM%uP%u: SH_VVLOG(%d): " _f "\n", \
- current->domain->id, current->processor, __LINE__ , ## _a )
+ current->domain->domain_id, current->processor, __LINE__ , ## _a )
#else
#define SH_VVLOG(_f, _a...) ((void)0)
#endif
@@ -341,7 +341,7 @@ extern int shadow_status_noswap;
#if SHADOW_VVVERBOSE_DEBUG
#define SH_VVVLOG(_f, _a...) \
printk("DOM%uP%u: SH_VVVLOG(%d): " _f "\n", \
- current->domain->id, current->processor, __LINE__ , ## _a )
+ current->domain->domain_id, current->processor, __LINE__ , ## _a )
#else
#define SH_VVVLOG(_f, _a...) ((void)0)
#endif
@@ -349,7 +349,7 @@ extern int shadow_status_noswap;
#if FULLSHADOW_DEBUG
#define FSH_LOG(_f, _a...) \
printk("DOM%uP%u: FSH_LOG(%d): " _f "\n", \
- current->domain->id, current->processor, __LINE__ , ## _a )
+ current->domain->domain_id, current->processor, __LINE__ , ## _a )
#else
#define FSH_LOG(_f, _a...) ((void)0)
#endif
@@ -384,7 +384,8 @@ shadow_get_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
res = get_page_from_l1e(nl1e, owner);
printk("tried to map mfn %lx from domain %d into shadow page tables "
"of domain %d; %s\n",
- mfn, owner->id, d->id, res ? "success" : "failed");
+ mfn, owner->domain_id, d->domain_id,
+ res ? "success" : "failed");
}
if ( unlikely(!res) )
@@ -1189,7 +1190,7 @@ static inline unsigned long __shadow_status(
{
printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%x "
"mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n",
- d->id, gpfn, gmfn, stype,
+ d->domain_id, gpfn, gmfn, stype,
frame_table[gmfn].count_info,
frame_table[gmfn].u.inuse.type_info,
mfn_out_of_sync(gmfn), mfn_is_page_table(gmfn));
diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h
index ac7ce6b1a0..306883c594 100644
--- a/xen/include/xen/event.h
+++ b/xen/include/xen/event.h
@@ -35,15 +35,15 @@ static inline void evtchn_set_pending(struct exec_domain *ed, int port)
set_bit(0, &ed->vcpu_info->evtchn_upcall_pending);
/*
- * NB1. 'flags' and 'processor' must be checked /after/ update of
+ * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
* pending flag. These values may fluctuate (after all, we hold no
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
*
- * NB2. We save DF_RUNNING across the unblock to avoid a needless
+ * NB2. We save VCPUF_running across the unblock to avoid a needless
* IPI for domains that we IPI'd to unblock.
*/
- running = test_bit(EDF_RUNNING, &ed->flags);
+ running = test_bit(_VCPUF_running, &ed->vcpu_flags);
exec_domain_unblock(ed);
if ( running )
smp_send_event_check_cpu(ed->processor);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index dd4b95b1dc..96a3be971c 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -62,7 +62,7 @@ int init_exec_domain_event_channels(struct exec_domain *ed);
struct exec_domain
{
- int id;
+ int vcpu_id;
int processor;
@@ -80,7 +80,7 @@ struct exec_domain
s_time_t wokenup; /* time domain got woken up */
void *sched_priv; /* scheduler-specific data */
- unsigned long flags;
+ unsigned long vcpu_flags;
u16 virq_to_evtchn[NR_VIRQS];
@@ -97,7 +97,7 @@ struct exec_domain
struct domain
{
- domid_t id;
+ domid_t domain_id;
shared_info_t *shared_info; /* shared data area */
spinlock_t time_lock;
@@ -113,7 +113,7 @@ struct domain
unsigned int xenheap_pages; /* # pages allocated from Xen heap */
/* Scheduling. */
- int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
+ int shutdown_code; /* code value from OS (if DOMF_shutdown) */
void *sched_priv; /* scheduler-specific data */
struct domain *next_in_list;
@@ -135,7 +135,7 @@ struct domain
u16 pirq_to_evtchn[NR_PIRQS];
u32 pirq_mask[NR_PIRQS/32];
- unsigned long flags;
+ unsigned long domain_flags;
unsigned long vm_assist;
atomic_t refcnt;
@@ -172,7 +172,7 @@ extern struct exec_domain idle0_exec_domain;
extern struct exec_domain *idle_task[NR_CPUS];
#define IDLE_DOMAIN_ID (0x7FFFU)
-#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
+#define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
struct exec_domain *alloc_exec_domain_struct(struct domain *d,
unsigned long vcpu);
@@ -328,29 +328,67 @@ extern struct domain *domain_list;
(_ed) != NULL; \
(_ed) = (_ed)->next_in_list )
-#define EDF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
-#define EDF_USEDFPU 1 /* Has this task used the FPU since last save? */
-#define EDF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
-#define EDF_BLOCKED 3 /* Domain is blocked waiting for an event. */
-#define EDF_CTRLPAUSE 4 /* Domain is paused by controller software. */
-#define EDF_RUNNING 5 /* Currently running on a CPU. */
-#define EDF_CPUPINNED 6 /* Disables auto-migration. */
-#define EDF_MIGRATED 7 /* Domain migrated between CPUs. */
-#define EDF_DONEINIT 8 /* Initialization completed . */
-
-#define DF_CONSTRUCTED 0 /* Has the guest OS been fully built yet? */
-#define DF_IDLETASK 1 /* Is this one of the per-CPU idle domains? */
-#define DF_PRIVILEGED 2 /* Is this domain privileged? */
-#define DF_PHYSDEV 3 /* May this domain do IO to physical devices? */
-#define DF_SHUTDOWN 4 /* Guest shut itself down for some reason. */
-#define DF_CRASHED 5 /* Domain crashed inside Xen, cannot continue. */
-#define DF_DYING 6 /* Death rattle. */
+/*
+ * Per-VCPU flags (vcpu_flags).
+ */
+ /* Has the FPU been initialised? */
+#define _VCPUF_fpu_initialised 0
+#define VCPUF_fpu_initialised (1UL<<_VCPUF_fpu_initialised)
+ /* Has the FPU been used since it was last saved? */
+#define _VCPUF_fpu_dirtied 1
+#define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied)
+ /* Has the guest OS requested 'stts'? */
+#define _VCPUF_guest_stts 2
+#define VCPUF_guest_stts (1UL<<_VCPUF_guest_stts)
+ /* Domain is blocked waiting for an event. */
+#define _VCPUF_blocked 3
+#define VCPUF_blocked (1UL<<_VCPUF_blocked)
+ /* Domain is paused by controller software. */
+#define _VCPUF_ctrl_pause 4
+#define VCPUF_ctrl_pause (1UL<<_VCPUF_ctrl_pause)
+ /* Currently running on a CPU? */
+#define _VCPUF_running 5
+#define VCPUF_running (1UL<<_VCPUF_running)
+ /* Disables auto-migration between CPUs. */
+#define _VCPUF_cpu_pinned 6
+#define VCPUF_cpu_pinned (1UL<<_VCPUF_cpu_pinned)
+ /* Domain migrated between CPUs. */
+#define _VCPUF_cpu_migrated 7
+#define VCPUF_cpu_migrated (1UL<<_VCPUF_cpu_migrated)
+ /* Initialization completed. */
+#define _VCPUF_initialised 8
+#define VCPUF_initialised (1UL<<_VCPUF_initialised)
+
+/*
+ * Per-domain flags (domain_flags).
+ */
+ /* Has the guest OS been fully built yet? */
+#define _DOMF_constructed 0
+#define DOMF_constructed (1UL<<_DOMF_constructed)
+ /* Is this one of the per-CPU idle domains? */
+#define _DOMF_idle_domain 1
+#define DOMF_idle_domain (1UL<<_DOMF_idle_domain)
+ /* Is this domain privileged? */
+#define _DOMF_privileged 2
+#define DOMF_privileged (1UL<<_DOMF_privileged)
+ /* May this domain do IO to physical devices? */
+#define _DOMF_physdev_access 3
+#define DOMF_physdev_access (1UL<<_DOMF_physdev_access)
+ /* Guest shut itself down for some reason. */
+#define _DOMF_shutdown 4
+#define DOMF_shutdown (1UL<<_DOMF_shutdown)
+ /* Domain has crashed and cannot continue to execute. */
+#define _DOMF_crashed 5
+#define DOMF_crashed (1UL<<_DOMF_crashed)
+ /* Death rattle. */
+#define _DOMF_dying 6
+#define DOMF_dying (1UL<<_DOMF_dying)
static inline int domain_runnable(struct exec_domain *ed)
{
return ( (atomic_read(&ed->pausecnt) == 0) &&
- !(ed->flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
- !(ed->domain->flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
+ !(ed->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
+ !(ed->domain->domain_flags & (DOMF_shutdown|DOMF_crashed)) );
}
static inline void exec_domain_pause(struct exec_domain *ed)
@@ -392,7 +430,7 @@ static inline void domain_unpause(struct domain *d)
static inline void exec_domain_unblock(struct exec_domain *ed)
{
- if ( test_and_clear_bit(EDF_BLOCKED, &ed->flags) )
+ if ( test_and_clear_bit(_VCPUF_blocked, &ed->vcpu_flags) )
domain_wake(ed);
}
@@ -403,7 +441,7 @@ static inline void domain_pause_by_systemcontroller(struct domain *d)
for_each_exec_domain ( d, ed )
{
ASSERT(ed != current);
- if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->flags) )
+ if ( !test_and_set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
domain_sleep(ed);
}
@@ -416,14 +454,15 @@ static inline void domain_unpause_by_systemcontroller(struct domain *d)
for_each_exec_domain ( d, ed )
{
- if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->flags) )
+ if ( test_and_clear_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
domain_wake(ed);
}
}
-
-#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
-#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
+#define IS_PRIV(_d) \
+ (test_bit(_DOMF_privileged, &(_d)->domain_flags))
+#define IS_CAPABLE_PHYSDEV(_d) \
+ (test_bit(_DOMF_physdev_access, &(_d)->domain_flags))
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))