aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-06-03 11:45:16 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2004-06-03 11:45:16 +0000
commit1f83b68652a9a1b532d2204c07846a75c14d8937 (patch)
tree8b0b278f1a404e087d40197e24226dfacc876548 /xen
parent96278e6472cd5a17d8cbd03fa72565026a982e9c (diff)
downloadxen-1f83b68652a9a1b532d2204c07846a75c14d8937.tar.gz
xen-1f83b68652a9a1b532d2204c07846a75c14d8937.tar.bz2
xen-1f83b68652a9a1b532d2204c07846a75c14d8937.zip
bitkeeper revision 1.930 (40bf0f4cD6X2TCNUTUzQPA_qDsagoQ)
Reduce domid to 32 bits only -- more than enough.
Diffstat (limited to 'xen')
-rw-r--r--xen/common/dom0_ops.c337
-rw-r--r--xen/common/dom_mem_ops.c4
-rw-r--r--xen/common/domain.c78
-rw-r--r--xen/common/keyhandler.c26
-rw-r--r--xen/common/memory.c48
-rw-r--r--xen/common/network.c7
-rw-r--r--xen/common/physdev.c14
-rw-r--r--xen/common/sched_atropos.c2
-rw-r--r--xen/common/sched_bvt.c15
-rw-r--r--xen/common/schedule.c6
-rw-r--r--xen/drivers/block/xen_block.c2
-rw-r--r--xen/drivers/block/xen_vbd.c16
-rw-r--r--xen/drivers/char/console.c10
-rw-r--r--xen/include/hypervisor-ifs/dom0_ops.h101
-rw-r--r--xen/include/hypervisor-ifs/event_channel.h30
-rw-r--r--xen/include/hypervisor-ifs/hypervisor-if.h26
-rw-r--r--xen/include/hypervisor-ifs/network.h2
-rw-r--r--xen/include/hypervisor-ifs/sched_ctl.h5
-rw-r--r--xen/include/hypervisor-ifs/vbd.h2
-rw-r--r--xen/include/xen/mm.h7
-rw-r--r--xen/include/xen/sched.h2
-rw-r--r--xen/include/xen/shadow.h427
-rw-r--r--xen/net/dev.c27
23 files changed, 592 insertions, 602 deletions
diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
index b4baf6c141..76d8afd437 100644
--- a/xen/common/dom0_ops.c
+++ b/xen/common/dom0_ops.c
@@ -42,7 +42,7 @@ static void write_msr_for(void *unused)
static void read_msr_for(void *unused)
{
if (((1 << current->processor) & msr_cpu_mask))
- rdmsr(msr_addr, msr_lo, msr_hi);
+ rdmsr(msr_addr, msr_lo, msr_hi);
}
@@ -64,8 +64,9 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
return -EACCES;
}
- TRACE_5D( TRC_DOM0OP_ENTER_BASE + op->cmd,
- 0, op->u.dummy[0], op->u.dummy[1], op->u.dummy[2], op->u.dummy[3] );
+ TRACE_5D(TRC_DOM0OP_ENTER_BASE + op->cmd,
+ 0, op->u.dummy[0], op->u.dummy[1],
+ op->u.dummy[2], op->u.dummy[3] );
switch ( op->cmd )
{
@@ -102,19 +103,20 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
case DOM0_STOPDOMAIN:
{
ret = stop_other_domain(op->u.stopdomain.domain);
-
- /* This is grim, but helps for live migrate. It's also unsafe
- in the strict sense as we're not explicitly setting a
- timeout, but dom0 is bound to have other timers going off to
- wake us back up.
- We go to sleep so that the other domain can stop quicker, hence
- we have less total down time in a migrate.
- */
- if( ret == 0 && op->u.stopdomain.sync == 1 )
- {
- extern long do_block( void );
- do_block(); // Yuk...
- }
+
+ /*
+ * This is grim, but helps for live migrate. It's also unsafe
+ * in the strict sense as we're not explicitly setting a
+ * timeout, but dom0 is bound to have other timers going off to
+ * wake us back up.
+ * We go to sleep so that the other domain can stop quicker, hence
+ * we have less total down time in a migrate.
+ */
+ if( ret == 0 && op->u.stopdomain.sync == 1 )
+ {
+ extern long do_block( void );
+ do_block(); /* Yuk... */
+ }
}
break;
@@ -127,24 +129,34 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
domid_t dom;
ret = -ENOMEM;
- spin_lock(&domnr_lock);
- dom = ++domnr;
- spin_unlock(&domnr_lock);
+ /* Search for an unused domain identifier. */
+ for ( ; ; )
+ {
+ spin_lock(&domnr_lock);
+ /* Wrap the roving counter when we reach first special value. */
+ if ( (dom = ++domnr) == DOMID_SELF )
+ dom = domnr = 1;
+ spin_unlock(&domnr_lock);
+
+ if ( (p = find_domain_by_id(dom)) == NULL )
+ break;
+ put_task_struct(p);
+ }
- if (op->u.createdomain.cpu == -1 )
- pro = (unsigned int)dom % smp_num_cpus;
- else
- pro = op->u.createdomain.cpu % smp_num_cpus;
+ if (op->u.createdomain.cpu == -1 )
+ pro = (unsigned int)dom % smp_num_cpus;
+ else
+ pro = op->u.createdomain.cpu % smp_num_cpus;
p = do_createdomain(dom, pro);
if ( p == NULL )
break;
- if ( op->u.createdomain.name[0] )
+ if ( op->u.createdomain.name[0] )
{
strncpy(p->name, op->u.createdomain.name, MAX_DOMAIN_NAME);
p->name[MAX_DOMAIN_NAME - 1] = '\0';
- }
+ }
ret = alloc_new_dom_mem(p, op->u.createdomain.memory_kb);
if ( ret != 0 )
@@ -192,7 +204,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
else
{
/* Pause domain if necessary. */
- if( !(p->state & TASK_STOPPED) && !(p->state & TASK_PAUSED) )
+ if( !(p->state & TASK_STOPPED) &&
+ !(p->state & TASK_PAUSED) )
{
sched_pause_sync(p);
we_paused = 1;
@@ -210,7 +223,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
ret = 0;
}
put_task_struct(p);
- }
+ }
}
}
break;
@@ -278,13 +291,13 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
for_each_domain ( p )
{
if ( p->domain >= op->u.getdomaininfo.domain )
- break;
+ break;
}
if ( p == NULL )
{
ret = -ESRCH;
- goto gdi_out;
+ goto gdi_out;
}
else
{
@@ -304,13 +317,13 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
if ( p->state == TASK_STOPPED && op->u.getdomaininfo.ctxt )
{
- full_execution_context_t *c=NULL;
+ full_execution_context_t *c=NULL;
- if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
- {
- ret= -ENOMEM;
- goto gdi_out;
- }
+ if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
+ {
+ ret= -ENOMEM;
+ goto gdi_out;
+ }
rmb(); /* Ensure that we see saved register state. */
c->flags = 0;
@@ -360,17 +373,17 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
c->failsafe_callback_eip =
p->failsafe_address;
- if( copy_to_user(op->u.getdomaininfo.ctxt, c, sizeof(*c)) )
- {
- ret = -EINVAL;
- }
+ if( copy_to_user(op->u.getdomaininfo.ctxt, c, sizeof(*c)) )
+ {
+ ret = -EINVAL;
+ }
- if (c) kfree(c);
+ if (c) kfree(c);
}
}
- if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
- ret = -EINVAL;
+ if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
+ ret = -EINVAL;
gdi_out:
read_unlock_irqrestore(&tasklist_lock, flags);
@@ -437,16 +450,16 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
case DOM0_MSR:
{
if ( op->u.msr.write )
- {
+ {
msr_cpu_mask = op->u.msr.cpu_mask;
msr_addr = op->u.msr.msr;
msr_lo = op->u.msr.in1;
msr_hi = op->u.msr.in2;
smp_call_function(write_msr_for, NULL, 1, 1);
write_msr_for(NULL);
- }
+ }
else
- {
+ {
msr_cpu_mask = op->u.msr.cpu_mask;
msr_addr = op->u.msr.msr;
smp_call_function(read_msr_for, NULL, 1, 1);
@@ -455,7 +468,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
op->u.msr.out1 = msr_lo;
op->u.msr.out2 = msr_hi;
copy_to_user(u_dom0_op, op, sizeof(*op));
- }
+ }
ret = 0;
}
break;
@@ -481,7 +494,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
case DOM0_GETTBUFS:
{
ret = get_tb_info(&op->u.gettbufs);
- copy_to_user(u_dom0_op, op, sizeof(*op));
+ copy_to_user(u_dom0_op, op, sizeof(*op));
}
break;
#endif
@@ -489,8 +502,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
case DOM0_READCONSOLE:
{
ret = read_console_ring(op->u.readconsole.str,
- op->u.readconsole.count,
- op->u.readconsole.cmd);
+ op->u.readconsole.count,
+ op->u.readconsole.cmd);
}
break;
@@ -530,15 +543,15 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
case DOM0_SHADOW_CONTROL:
{
- struct task_struct *p;
- ret = -ESRCH;
- p = find_domain_by_id( op->u.shadow_control.domain );
- if ( p )
- {
+ struct task_struct *p;
+ ret = -ESRCH;
+ p = find_domain_by_id( op->u.shadow_control.domain );
+ if ( p )
+ {
ret = shadow_mode_control(p, &op->u.shadow_control );
- put_task_struct(p);
- copy_to_user(u_dom0_op, op, sizeof(*op));
- }
+ put_task_struct(p);
+ copy_to_user(u_dom0_op, op, sizeof(*op));
+ }
}
break;
@@ -553,131 +566,131 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
case DOM0_SETDOMAINNAME:
{
- struct task_struct *p;
- p = find_domain_by_id( op->u.setdomainname.domain );
- if ( p )
- {
- strncpy(p->name, op->u.setdomainname.name, MAX_DOMAIN_NAME);
- put_task_struct(p);
- }
- else
- ret = -ESRCH;
+ struct task_struct *p;
+ p = find_domain_by_id( op->u.setdomainname.domain );
+ if ( p )
+ {
+ strncpy(p->name, op->u.setdomainname.name, MAX_DOMAIN_NAME);
+ put_task_struct(p);
+ }
+ else
+ ret = -ESRCH;
}
break;
case DOM0_SETDOMAININITIALMEM:
{
- struct task_struct *p;
- ret = -ESRCH;
- p = find_domain_by_id( op->u.setdomaininitialmem.domain );
- if ( p )
- {
- /* should only be used *before* domain is built. */
+ struct task_struct *p;
+ ret = -ESRCH;
+ p = find_domain_by_id( op->u.setdomaininitialmem.domain );
+ if ( p )
+ {
+ /* should only be used *before* domain is built. */
if ( ! test_bit(PF_CONSTRUCTED, &p->flags) )
- ret = alloc_new_dom_mem(
- p, op->u.setdomaininitialmem.initial_memkb );
- else
- ret = -EINVAL;
- put_task_struct(p);
- }
+ ret = alloc_new_dom_mem(
+ p, op->u.setdomaininitialmem.initial_memkb );
+ else
+ ret = -EINVAL;
+ put_task_struct(p);
+ }
}
break;
case DOM0_SETDOMAINMAXMEM:
{
- struct task_struct *p;
- p = find_domain_by_id( op->u.setdomainmaxmem.domain );
- if ( p )
- {
- p->max_pages =
- (op->u.setdomainmaxmem.max_memkb+PAGE_SIZE-1)>> PAGE_SHIFT;
- put_task_struct(p);
- }
- else
- ret = -ESRCH;
+ struct task_struct *p;
+ p = find_domain_by_id( op->u.setdomainmaxmem.domain );
+ if ( p )
+ {
+ p->max_pages =
+ (op->u.setdomainmaxmem.max_memkb+PAGE_SIZE-1)>> PAGE_SHIFT;
+ put_task_struct(p);
+ }
+ else
+ ret = -ESRCH;
}
break;
case DOM0_GETPAGEFRAMEINFO2:
{
#define GPF2_BATCH 128
- int n,j;
+ int n,j;
int num = op->u.getpageframeinfo2.num;
domid_t dom = op->u.getpageframeinfo2.domain;
- unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
+ unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
struct task_struct *p;
- unsigned long l_arr[GPF2_BATCH];
+ unsigned long l_arr[GPF2_BATCH];
ret = -ESRCH;
- if ( unlikely((p = find_domain_by_id(dom)) == NULL) )
- break;
-
- if ( unlikely(num>1024) )
- {
- ret = -E2BIG;
- break;
- }
-
- ret = 0;
- for(n=0;n<num;)
- {
- int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
-
- if( copy_from_user( l_arr, &s_ptr[n], k*sizeof(unsigned long) ) )
- {
- ret = -EINVAL;
- break;
- }
-
- for(j=0;j<k;j++)
- {
- struct pfn_info *page;
- unsigned long mfn = l_arr[j];
-
- if ( unlikely(mfn >= max_page) )
- goto e2_err;
-
- page = &frame_table[mfn];
-
- if ( likely(get_page(page, p)) )
- {
- unsigned long type = 0;
- switch( page->type_and_flags & PGT_type_mask )
- {
- case PGT_l1_page_table:
- type = L1TAB;
- break;
- case PGT_l2_page_table:
- type = L2TAB;
- break;
- case PGT_l3_page_table:
- type = L3TAB;
- break;
- case PGT_l4_page_table:
- type = L4TAB;
- break;
- }
- l_arr[j] |= type;
- put_page(page);
- }
- else
- {
- e2_err:
- l_arr[j] |= XTAB;
- }
-
- }
-
- if( copy_to_user( &s_ptr[n], l_arr, k*sizeof(unsigned long) ) )
- {
- ret = -EINVAL;
- break;
- }
-
- n+=j;
- }
-
- put_task_struct(p);
+ if ( unlikely((p = find_domain_by_id(dom)) == NULL) )
+ break;
+
+ if ( unlikely(num>1024) )
+ {
+ ret = -E2BIG;
+ break;
+ }
+
+ ret = 0;
+ for(n=0;n<num;)
+ {
+ int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
+
+ if( copy_from_user( l_arr, &s_ptr[n], k*sizeof(unsigned long) ) )
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ for(j=0;j<k;j++)
+ {
+ struct pfn_info *page;
+ unsigned long mfn = l_arr[j];
+
+ if ( unlikely(mfn >= max_page) )
+ goto e2_err;
+
+ page = &frame_table[mfn];
+
+ if ( likely(get_page(page, p)) )
+ {
+ unsigned long type = 0;
+ switch( page->type_and_flags & PGT_type_mask )
+ {
+ case PGT_l1_page_table:
+ type = L1TAB;
+ break;
+ case PGT_l2_page_table:
+ type = L2TAB;
+ break;
+ case PGT_l3_page_table:
+ type = L3TAB;
+ break;
+ case PGT_l4_page_table:
+ type = L4TAB;
+ break;
+ }
+ l_arr[j] |= type;
+ put_page(page);
+ }
+ else
+ {
+ e2_err:
+ l_arr[j] |= XTAB;
+ }
+
+ }
+
+ if( copy_to_user( &s_ptr[n], l_arr, k*sizeof(unsigned long) ) )
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ n+=j;
+ }
+
+ put_task_struct(p);
}
break;
@@ -687,8 +700,8 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
}
- TRACE_5D( TRC_DOM0OP_LEAVE_BASE + op->cmd, ret,
- op->u.dummy[0], op->u.dummy[1], op->u.dummy[2], op->u.dummy[3] );
+ TRACE_5D(TRC_DOM0OP_LEAVE_BASE + op->cmd, ret,
+ op->u.dummy[0], op->u.dummy[1], op->u.dummy[2], op->u.dummy[3]);
return ret;
diff --git a/xen/common/dom_mem_ops.c b/xen/common/dom_mem_ops.c
index 97b2ffc0f1..ef8aaeb4d5 100644
--- a/xen/common/dom_mem_ops.c
+++ b/xen/common/dom_mem_ops.c
@@ -64,7 +64,7 @@ static long free_dom_mem(struct task_struct *p,
if ( unlikely(mpfn >= max_page) )
{
- DPRINTK("Domain %llu page number out of range (%08lx>=%08lx)\n",
+ DPRINTK("Domain %u page number out of range (%08lx>=%08lx)\n",
p->domain, mpfn, max_page);
rc = -EINVAL;
break;
@@ -73,7 +73,7 @@ static long free_dom_mem(struct task_struct *p,
page = &frame_table[mpfn];
if ( unlikely(!get_page(page, p)) )
{
- DPRINTK("Bad page free for domain %llu\n", p->domain);
+ DPRINTK("Bad page free for domain %u\n", p->domain);
rc = -EINVAL;
break;
}
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 894bb7ba05..c152d6532b 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -75,7 +75,7 @@ struct task_struct *do_createdomain(domid_t dom_id, unsigned int cpu)
}
/* We use a large intermediate to avoid overflow in sprintf. */
- sprintf(buf, "Domain-%llu", dom_id);
+ sprintf(buf, "Domain-%u", dom_id);
strncpy(p->name, buf, MAX_DOMAIN_NAME);
p->name[MAX_DOMAIN_NAME-1] = '\0';
@@ -90,15 +90,13 @@ struct task_struct *do_createdomain(domid_t dom_id, unsigned int cpu)
p->shared_info = (void *)get_free_page(GFP_KERNEL);
memset(p->shared_info, 0, PAGE_SIZE);
SHARE_PFN_WITH_DOMAIN(virt_to_page(p->shared_info), p);
-
- machine_to_phys_mapping[virt_to_phys(p->shared_info) >> PAGE_SHIFT] =
- 0x80000000UL; // set m2p table to magic marker (helps debug)
+ machine_to_phys_mapping[virt_to_phys(p->shared_info) >>
+ PAGE_SHIFT] = 0x80000000UL; /* debug */
p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page(GFP_KERNEL);
memset(p->mm.perdomain_pt, 0, PAGE_SIZE);
-
- machine_to_phys_mapping[virt_to_phys(p->mm.perdomain_pt) >> PAGE_SHIFT] =
- 0x0fffdeadUL; // set m2p table to magic marker (helps debug)
+ machine_to_phys_mapping[virt_to_phys(p->mm.perdomain_pt) >>
+ PAGE_SHIFT] = 0x0fffdeadUL; /* debug */
init_blkdev_info(p);
@@ -161,8 +159,8 @@ struct task_struct *find_last_domain(void)
p = plast->next_list;
while ( p != NULL )
{
- if ( p->create_time > plast->create_time )
- plast = p;
+ if ( p->create_time > plast->create_time )
+ plast = p;
p = p->next_list;
}
get_task_struct(plast);
@@ -174,7 +172,7 @@ struct task_struct *find_last_domain(void)
void kill_domain_with_errmsg(const char *err)
{
- printk("DOM%llu FATAL ERROR: %s\n", current->domain, err);
+ printk("DOM%u FATAL ERROR: %s\n", current->domain, err);
kill_domain();
}
@@ -196,7 +194,7 @@ void __kill_domain(struct task_struct *p)
if ( !sched_rem_domain(p) )
return;
- DPRINTK("Killing domain %llu\n", p->domain);
+ DPRINTK("Killing domain %u\n", p->domain);
unlink_blkdev_info(p);
@@ -270,9 +268,9 @@ void stop_domain(void)
/* OK, this is grim, but helps speed up live migrate. When a domain stops,
kick Dom0 */
{
- struct task_struct *p;
- guest_schedule_to_run( p = find_domain_by_id(0ULL) );
- put_task_struct(p);
+ struct task_struct *p;
+ guest_schedule_to_run( p = find_domain_by_id(0ULL) );
+ put_task_struct(p);
}
__enter_scheduler();
@@ -355,7 +353,7 @@ struct pfn_info *alloc_domain_page(struct task_struct *p)
spin_lock(&p->page_list_lock);
if ( unlikely(p->tot_pages >= p->max_pages) )
{
- DPRINTK("Over-allocation for domain %llu: %u >= %u\n",
+ DPRINTK("Over-allocation for domain %u: %u >= %u\n",
p->domain, p->tot_pages, p->max_pages);
spin_unlock(&p->page_list_lock);
goto free_and_exit;
@@ -393,14 +391,14 @@ void free_domain_page(struct pfn_info *page)
if ( !(page->count_and_flags & PGC_zombie) )
{
page->tlbflush_timestamp = tlbflush_clock;
- if ( likely(p != NULL) )
- {
+ if ( likely(p != NULL) )
+ {
page->u.cpu_mask = 1 << p->processor;
spin_lock(&p->page_list_lock);
- list_del(&page->list);
- p->tot_pages--;
- spin_unlock(&p->page_list_lock);
- }
+ list_del(&page->list);
+ p->tot_pages--;
+ spin_unlock(&p->page_list_lock);
+ }
}
page->count_and_flags = 0;
@@ -533,16 +531,16 @@ unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
return -ENOMEM;
}
- /* initialise to machine_to_phys_mapping table to likely pfn */
- machine_to_phys_mapping[page-frame_table] = alloc_pfns;
+ /* initialise to machine_to_phys_mapping table to likely pfn */
+ machine_to_phys_mapping[page-frame_table] = alloc_pfns;
#ifndef NDEBUG
- {
- // initialise with magic marker if in DEBUG mode
- void * a = map_domain_mem( (page-frame_table)<<PAGE_SHIFT );
- memset( a, 0x80 | (char) p->domain, PAGE_SIZE );
- unmap_domain_mem( a );
- }
+ {
+ /* Initialise with magic marker if in DEBUG mode. */
+ void * a = map_domain_mem( (page-frame_table)<<PAGE_SHIFT );
+ memset( a, 0x80 | (char) p->domain, PAGE_SIZE );
+ unmap_domain_mem( a );
+ }
#endif
}
@@ -559,7 +557,7 @@ void release_task(struct task_struct *p)
ASSERT(p->state == TASK_DYING);
ASSERT(!p->has_cpu);
- DPRINTK("Releasing task %llu\n", p->domain);
+ DPRINTK("Releasing task %u\n", p->domain);
/*
* This frees up blkdev rings and vbd-access lists. Totally safe since
@@ -588,12 +586,12 @@ int final_setup_guestos(struct task_struct *p, dom0_builddomain_t *builddomain)
full_execution_context_t *c;
if ( (c = kmalloc(sizeof(*c), GFP_KERNEL)) == NULL )
- return -ENOMEM;
+ return -ENOMEM;
if ( test_bit(PF_CONSTRUCTED, &p->flags) )
{
rc = -EINVAL;
- goto out;
+ goto out;
}
if ( copy_from_user(c, builddomain->ctxt, sizeof(*c)) )
@@ -649,7 +647,7 @@ int final_setup_guestos(struct task_struct *p, dom0_builddomain_t *builddomain)
set_bit(PF_CONSTRUCTED, &p->flags);
-out:
+ out:
if (c) kfree(c);
return rc;
@@ -682,13 +680,13 @@ static int readelfimage_base_and_size(char *elfbase,
if ( (ehdr->e_phoff + (ehdr->e_phnum * ehdr->e_phentsize)) > elfsize )
{
- printk("ELF program headers extend beyond end of image.\n");
+ printk("ELF program headers extend beyond end of image.\n");
return -EINVAL;
}
if ( (ehdr->e_shoff + (ehdr->e_shnum * ehdr->e_shentsize)) > elfsize )
{
- printk("ELF section headers extend beyond end of image.\n");
+ printk("ELF section headers extend beyond end of image.\n");
return -EINVAL;
}
@@ -760,7 +758,7 @@ static int loadelfimage(char *elfbase)
{
phdr = (Elf_Phdr *)(elfbase + ehdr->e_phoff + (h*ehdr->e_phentsize));
if ( !is_loadable_phdr(phdr) )
- continue;
+ continue;
if ( phdr->p_filesz != 0 )
memcpy((char *)phdr->p_vaddr, elfbase + phdr->p_offset,
phdr->p_filesz);
@@ -1045,10 +1043,10 @@ int construct_dom0(struct task_struct *p,
if ( initrd_len != 0 )
{
- si->mod_start = vinitrd_start;
- si->mod_len = initrd_len;
- printk("Initrd len 0x%lx, start at 0x%08lx\n",
- si->mod_len, si->mod_start);
+ si->mod_start = vinitrd_start;
+ si->mod_len = initrd_len;
+ printk("Initrd len 0x%lx, start at 0x%08lx\n",
+ si->mod_len, si->mod_start);
}
dst = si->cmd_line;
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index 92248bcca6..7cd60e439f 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -22,20 +22,20 @@ void add_key_handler(u_char key, key_handler *handler, char *desc)
char *str;
if ( key_table[key].handler != NULL )
- printk("Warning: overwriting handler for key 0x%x\n", key);
+ printk("Warning: overwriting handler for key 0x%x\n", key);
key_table[key].handler = handler;
str = key_table[key].desc;
for ( i = 0; i < STR_MAX; i++ )
{
- if ( *desc != '\0' )
- *str++ = *desc++;
- else
+ if ( *desc != '\0' )
+ *str++ = *desc++;
+ else
break;
}
if ( i == STR_MAX )
- key_table[key].desc[STR_MAX-1] = '\0';
+ key_table[key].desc[STR_MAX-1] = '\0';
}
key_handler *get_key_handler(u_char key)
@@ -49,10 +49,10 @@ static void show_handlers(u_char key, void *dev_id, struct pt_regs *regs)
printk("'%c' pressed -> showing installed handlers\n", key);
for ( i = 0; i < KEY_MAX; i++ )
- if ( key_table[i].handler != NULL )
- printk(" key '%c' (ascii '%02x') => %s\n",
- (i<33 || i>126)?(' '):(i),i,
- key_table[i].desc);
+ if ( key_table[i].handler != NULL )
+ printk(" key '%c' (ascii '%02x') => %s\n",
+ (i<33 || i>126)?(' '):(i),i,
+ key_table[i].desc);
}
@@ -89,10 +89,10 @@ void do_task_queues(u_char key, void *dev_id, struct pt_regs *regs)
for_each_domain ( p )
{
- printk("Xen: DOM %llu, CPU %d [has=%c], state = ",
+ printk("Xen: DOM %u, CPU %d [has=%c], state = ",
p->domain, p->processor, p->has_cpu ? 'T':'F');
sched_prn_state(p ->state);
- printk(", hyp_events = %08x\n", p->hyp_events);
+ printk(", hyp_events = %08x\n", p->hyp_events);
s = p->shared_info;
printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n",
s->vcpu_data[0].evtchn_upcall_pending,
@@ -123,8 +123,8 @@ void initialize_keytable(void)
/* first initialize key handler table */
for ( i = 0; i < KEY_MAX; i++ )
- key_table[i].handler = (key_handler *)NULL;
-
+ key_table[i].handler = (key_handler *)NULL;
+
/* setup own handlers */
add_key_handler('d', dump_registers, "dump registers");
add_key_handler('h', show_handlers, "show this message");
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 5b03588b2c..e1ca785968 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -142,8 +142,8 @@
#include <asm/ldt.h>
#ifndef NDEBUG
-#define MEM_LOG(_f, _a...) \
- printk("DOM%llu: (file=memory.c, line=%d) " _f "\n", \
+#define MEM_LOG(_f, _a...) \
+ printk("DOM%u: (file=memory.c, line=%d) " _f "\n", \
current->domain , __LINE__ , ## _a )
#else
#define MEM_LOG(_f, _a...) ((void)0)
@@ -177,7 +177,6 @@ static struct {
#define DOP_RELOAD_LDT (1<<1) /* Reload the LDT shadow mapping. */
unsigned long deferred_ops;
unsigned long cr0;
- domid_t subject_id;
/* General-Purpose Subject, Page-Table Subject */
struct task_struct *gps, *pts;
} percpu_info[NR_CPUS] __cacheline_aligned;
@@ -219,9 +218,9 @@ void __init init_frametable(unsigned long nr_pages)
mfn < virt_to_phys((void *)RDWR_MPT_VIRT_END)>>PAGE_SHIFT;
mfn++ )
{
- frame_table[mfn].count_and_flags = 1 | PGC_allocated;
- frame_table[mfn].type_and_flags = 1 | PGT_gdt_page; /* non-RW type */
- frame_table[mfn].u.domain = &idle0_task;
+ frame_table[mfn].count_and_flags = 1 | PGC_allocated;
+ frame_table[mfn].type_and_flags = 1 | PGT_gdt_page; /* non-RW type */
+ frame_table[mfn].u.domain = &idle0_task;
}
}
@@ -427,9 +426,9 @@ static int get_page_from_l1e(l1_pgentry_t l1e)
if ( unlikely(!pfn_is_ram(pfn)) )
{
if ( IS_PRIV(current) )
- return 1;
+ return 1;
- if ( IS_CAPABLE_PHYSDEV(current) )
+ if ( IS_CAPABLE_PHYSDEV(current) )
return domain_iomem_in_pfn(current, pfn);
MEM_LOG("Non-privileged attempt to map I/O space %08lx", pfn);
@@ -805,6 +804,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
unsigned long old_base_pfn;
struct pfn_info *page = &frame_table[pfn];
struct task_struct *p = current, *q;
+ domid_t domid;
switch ( cmd )
{
@@ -914,17 +914,12 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
break;
}
- case MMUEXT_SET_SUBJECTDOM_L:
- percpu_info[cpu].subject_id = (domid_t)((ptr&~0xFFFF)|(val>>16));
- break;
-
- case MMUEXT_SET_SUBJECTDOM_H:
- percpu_info[cpu].subject_id |=
- ((domid_t)((ptr&~0xFFFF)|(val>>16)))<<32;
+ case MMUEXT_SET_SUBJECTDOM:
+ domid = ((domid_t)((ptr&~0xFFFF)|(val>>16)));
if ( !IS_PRIV(p) )
{
- MEM_LOG("Dom %llu has no privilege to set subject domain",
+ MEM_LOG("Dom %u has no privilege to set subject domain",
p->domain);
okay = 0;
}
@@ -932,13 +927,12 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
{
if ( percpu_info[cpu].gps != NULL )
put_task_struct(percpu_info[cpu].gps);
- percpu_info[cpu].gps = find_domain_by_id(
- percpu_info[cpu].subject_id);
+ percpu_info[cpu].gps = find_domain_by_id(domid);
percpu_info[cpu].pts = (val & SET_PAGETABLE_SUBJECTDOM) ?
percpu_info[cpu].gps : NULL;
if ( percpu_info[cpu].gps == NULL )
{
- MEM_LOG("Unknown domain '%llu'", percpu_info[cpu].subject_id);
+ MEM_LOG("Unknown domain '%u'", domid);
okay = 0;
}
}
@@ -947,7 +941,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
case MMUEXT_REASSIGN_PAGE:
if ( unlikely(!IS_PRIV(p)) )
{
- MEM_LOG("Dom %llu has no privilege to reassign page ownership",
+ MEM_LOG("Dom %u has no privilege to reassign page ownership",
p->domain);
okay = 0;
}
@@ -1102,10 +1096,12 @@ int do_mmu_update(mmu_update_t *ureqs, int count, int *success_count)
machine_to_phys_mapping[pfn] = req.val;
okay = 1;
- /* if in log dirty shadow mode, mark the corresponding
- psuedo-physical page as dirty */
- if( unlikely(current->mm.shadow_mode == SHM_logdirty) )
- mark_dirty( &current->mm, pfn );
+ /*
+ * If in log-dirty mode, mark the corresponding pseudo-physical
+ * page as dirty.
+ */
+ if( unlikely(current->mm.shadow_mode == SHM_logdirty) )
+ mark_dirty( &current->mm, pfn );
put_page(&frame_table[pfn]);
break;
@@ -1155,7 +1151,7 @@ int do_mmu_update(mmu_update_t *ureqs, int count, int *success_count)
}
if ( unlikely(success_count != NULL) )
- put_user(count, success_count);
+ put_user(count, success_count);
return rc;
}
@@ -1241,7 +1237,7 @@ int do_update_va_mapping_otherdomain(unsigned long page_nr,
percpu_info[cpu].gps = p = find_domain_by_id(domid);
if ( unlikely(p == NULL) )
{
- MEM_LOG("Unknown domain '%llu'", domid);
+ MEM_LOG("Unknown domain '%u'", domid);
return -ESRCH;
}
diff --git a/xen/common/network.c b/xen/common/network.c
index befc929474..255feae2af 100644
--- a/xen/common/network.c
+++ b/xen/common/network.c
@@ -370,12 +370,12 @@ void print_net_rule(net_rule_t *r)
if ( r->src_dom == VIF_SPECIAL )
printk("=] src_dom/idx : %s\n", idx_to_name(r->src_idx));
else
- printk("=] src_dom/idx : %llu/%u\n", r->src_dom, r->src_idx);
+ printk("=] src_dom/idx : %u/%u\n", r->src_dom, r->src_idx);
if ( r->dst_dom == VIF_SPECIAL )
printk("=] dst_dom/idx : %s\n", idx_to_name(r->dst_idx));
else
- printk("=] dst_dom/idx : %llu/%u\n", r->dst_dom, r->dst_idx);
+ printk("=] dst_dom/idx : %u/%u\n", r->dst_dom, r->dst_idx);
printk("=] action : %u\n", r->action);
}
@@ -530,8 +530,7 @@ net_vif_t *net_get_target_vif(u8 *data, unsigned int len, net_vif_t *src_vif)
return target;
drop:
- printk("VIF%llu/%u: pkt to drop!\n",
- src_dom, src_idx);
+ DPRINTK("VIF%u/%u: pkt to drop!\n", src_dom, src_idx);
return VIF_DROP;
}
diff --git a/xen/common/physdev.c b/xen/common/physdev.c
index 61b7b22cb2..d7e8c7cb93 100644
--- a/xen/common/physdev.c
+++ b/xen/common/physdev.c
@@ -189,7 +189,7 @@ int physdev_pci_access_modify(
* this will allow all processes in that domain access to those
* ports as well. This will do for now, since driver domains don't
* run untrusted processes! */
- INFO("Giving domain %llu IO resources (%lx - %lx) "
+ INFO("Giving domain %u IO resources (%lx - %lx) "
"for device %s\n", dom, r->start, r->end, pdev->slot_name);
for ( j = r->start; j < r->end + 1; j++ )
{
@@ -204,7 +204,7 @@ int physdev_pci_access_modify(
}
/* rights to IO memory regions are checked when the domain maps them */
- }
+ }
out:
put_task_struct(p);
return rc;
@@ -217,7 +217,7 @@ int domain_iomem_in_pfn(struct task_struct *p, unsigned long pfn)
int ret = 0;
struct list_head *l;
- VERBOSE_INFO("Checking if physdev-capable domain %llu needs access to "
+ VERBOSE_INFO("Checking if physdev-capable domain %u needs access to "
"pfn %08lx\n", p->domain, pfn);
spin_lock(&p->pcidev_lock);
@@ -245,7 +245,7 @@ int domain_iomem_in_pfn(struct task_struct *p, unsigned long pfn)
spin_unlock(&p->pcidev_lock);
- VERBOSE_INFO("Domain %llu %s mapping of pfn %08lx\n",
+ VERBOSE_INFO("Domain %u %s mapping of pfn %08lx\n",
p->domain, ret ? "allowed" : "disallowed", pfn);
return ret;
@@ -300,8 +300,8 @@ inline static int check_dev_acc (struct task_struct *p,
* to work out the length of the io region a device probe typically does:
* 1) a = read_base_addr_reg()
* 2) write_base_addr_reg(0xffffffff)
- * 3) b = read_base_addr_reg() // device zeros lower bits
- * 4) write_base_addr_reg(a) // restore original value
+ * 3) b = read_base_addr_reg() [device zeros lower bits]
+ * 4) write_base_addr_reg(a) [restore original value]
* this function fakes out step 2-4. *no* writes are made to the device.
*
* phys_dev_t contains a bit field (a bit for each base address register).
@@ -328,7 +328,7 @@ static int do_base_address_access(phys_dev_t *pdev, int acc, int idx,
/* We could set *val to some value but the guest may well be in trouble
* anyway if this write fails. Hopefully the printk will give us a
* clue what went wrong. */
- printk("Guest %llu attempting sub-dword %s to BASE_ADDRESS %d\n",
+ printk("Guest %u attempting sub-dword %s to BASE_ADDRESS %d\n",
pdev->owner->domain, (acc == ACC_READ) ? "read" : "write", idx);
return -EPERM;
diff --git a/xen/common/sched_atropos.c b/xen/common/sched_atropos.c
index d01e4ecd4b..dfa923da3b 100644
--- a/xen/common/sched_atropos.c
+++ b/xen/common/sched_atropos.c
@@ -513,7 +513,7 @@ task_slice_t ksched_scheduler(s_time_t time)
cur_sdom->min_slice = newtime - time;
DOM_INFO(cur_sdom)->reason = reason;
- TRACE_2D(0, (cur_sdom->domain >> 32), ((u32)cur_sdom->domain));
+ TRACE_1D(0, cur_sdom->domain);
return ret;
}
diff --git a/xen/common/sched_bvt.c b/xen/common/sched_bvt.c
index a4b76b42f5..dd8e70db59 100644
--- a/xen/common/sched_bvt.c
+++ b/xen/common/sched_bvt.c
@@ -205,10 +205,10 @@ int bvt_adjdom(struct task_struct *p,
struct bvt_dom_info *inf = BVT_INFO(p);
-
- printk("Get domain %lld bvt mcu_adv=%ld, warp=%ld, warpl=%ld, warpu=%ld\n",
- p->domain, inf->mcu_advance, inf->warp,
- inf->warpl, inf->warpu );
+ DPRINTK("Get domain %u bvt mcu_adv=%ld, warp=%ld, "
+ "warpl=%ld, warpu=%ld\n",
+ p->domain, inf->mcu_advance, inf->warp,
+ inf->warpl, inf->warpu );
/* Sanity -- this can avoid divide-by-zero. */
if ( mcu_adv == 0 )
@@ -220,9 +220,10 @@ int bvt_adjdom(struct task_struct *p,
inf->warpl = warpl;
inf->warpu = warpu;
- printk("Set domain %lld bvt mcu_adv=%ld, warp=%ld, warpl=%ld, warpu=%ld\n",
- p->domain, inf->mcu_advance, inf->warp,
- inf->warpl, inf->warpu );
+ DPRINTK("Set domain %u bvt mcu_adv=%ld, warp=%ld, "
+ "warpl=%ld, warpu=%ld\n",
+ p->domain, inf->mcu_advance, inf->warp,
+ inf->warpl, inf->warpu );
spin_unlock_irqrestore(&schedule_lock[p->processor], flags);
}
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index b5e4219d6c..f3f6ad89e1 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -264,7 +264,7 @@ long do_sched_op(unsigned long op)
case SCHEDOP_exit:
{
- DPRINTK("DOM%llu killed itself!\n", current->domain);
+ DPRINTK("DOM%u killed itself!\n", current->domain);
DPRINTK(" EIP == %08lx\n", get_execution_context()->eip);
kill_domain();
break;
@@ -272,7 +272,7 @@ long do_sched_op(unsigned long op)
case SCHEDOP_stop:
{
- DPRINTK("DOM%llu stopped itself!\n", current->domain);
+ DPRINTK("DOM%u stopped itself!\n", current->domain);
DPRINTK(" EIP == %08lx\n", get_execution_context()->eip);
stop_domain();
break;
@@ -673,7 +673,7 @@ static void dump_rqueue(struct list_head *queue, char *name)
(unsigned long) queue->next, (unsigned long) queue->prev);
list_for_each (list, queue) {
p = list_entry(list, struct task_struct, run_list);
- printk("%3d: %llu has=%c ", loop++, p->domain, p->has_cpu ? 'T':'F');
+ printk("%3d: %u has=%c ", loop++, p->domain, p->has_cpu ? 'T':'F');
SCHED_OP(dump_runq_el, p);
printk("c=0x%X%08X\n", (u32)(p->cpu_time>>32), (u32)p->cpu_time);
printk(" l: %lx n: %lx p: %lx\n",
diff --git a/xen/drivers/block/xen_block.c b/xen/drivers/block/xen_block.c
index 6901262cb8..781bca177b 100644
--- a/xen/drivers/block/xen_block.c
+++ b/xen/drivers/block/xen_block.c
@@ -579,7 +579,7 @@ static void dump_blockq(u_char key, void *dev_id, struct pt_regs *regs)
read_lock_irqsave(&tasklist_lock, flags);
for_each_domain ( p )
{
- printk("Domain: %llu\n", p->domain);
+ printk("Domain: %u\n", p->domain);
blk_ring = p->blk_ring_base;
printk(" req_prod:0x%08x, req_cons:0x%08x resp_prod:0x%08x/"
"0x%08x on_list=%d\n",
diff --git a/xen/drivers/block/xen_vbd.c b/xen/drivers/block/xen_vbd.c
index 8d150f5872..dc1154a2f9 100644
--- a/xen/drivers/block/xen_vbd.c
+++ b/xen/drivers/block/xen_vbd.c
@@ -86,7 +86,7 @@ long vbd_create(vbd_create_t *create)
if ( unlikely((p = find_domain_by_id(create->domain)) == NULL) )
{
- DPRINTK("vbd_create attempted for non-existent domain %llu\n",
+ DPRINTK("vbd_create attempted for non-existent domain %u\n",
create->domain);
return -EINVAL;
}
@@ -166,7 +166,7 @@ long vbd_grow(vbd_grow_t *grow)
if ( unlikely((p = find_domain_by_id(grow->domain)) == NULL) )
{
- DPRINTK("vbd_grow: attempted for non-existent domain %llu\n",
+ DPRINTK("vbd_grow: attempted for non-existent domain %u\n",
grow->domain);
return -EINVAL;
}
@@ -192,7 +192,7 @@ long vbd_shrink(vbd_shrink_t *shrink)
if ( (p = find_domain_by_id(shrink->domain)) == NULL )
{
- DPRINTK("vbd_shrink attempted for non-existent domain %llu\n",
+ DPRINTK("vbd_shrink attempted for non-existent domain %u\n",
shrink->domain);
return -EINVAL;
}
@@ -252,7 +252,7 @@ long vbd_setextents(vbd_setextents_t *setextents)
if ( (p = find_domain_by_id(setextents->domain)) == NULL )
{
- DPRINTK("vbd_setextents attempted for non-existent domain %llu\n",
+ DPRINTK("vbd_setextents attempted for non-existent domain %u\n",
setextents->domain);
return -EINVAL;
}
@@ -346,7 +346,7 @@ long vbd_delete(vbd_delete_t *delete)
if ( (p = find_domain_by_id(delete->domain)) == NULL )
{
- DPRINTK("vbd_delete attempted for non-existent domain %llu\n",
+ DPRINTK("vbd_delete attempted for non-existent domain %u\n",
delete->domain);
return -EINVAL;
}
@@ -530,7 +530,7 @@ long vbd_probe(vbd_probe_t *probe)
if ( (probe->domain != VBD_PROBE_ALL) &&
((p = find_domain_by_id(probe->domain)) == NULL) )
{
- DPRINTK("vbd_probe attempted for non-existent domain %llu\n",
+ DPRINTK("vbd_probe attempted for non-existent domain %u\n",
probe->domain);
return -EINVAL;
}
@@ -581,7 +581,7 @@ long vbd_info(vbd_info_t *info)
if ( (p = find_domain_by_id(info->domain)) == NULL )
{
- DPRINTK("vbd_info attempted for non-existent domain %llu\n",
+ DPRINTK("vbd_info attempted for non-existent domain %u\n",
info->domain);
return -EINVAL;
}
@@ -654,7 +654,7 @@ int vbd_translate(phys_seg_t *pseg, struct task_struct *p, int operation)
goto found;
}
- DPRINTK("vbd_translate; domain %llu attempted to access "
+ DPRINTK("vbd_translate; domain %u attempted to access "
"non-existent VBD.\n", p->domain);
spin_unlock(&p->vbd_lock);
diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c
index 2744ed0c75..4844b626a5 100644
--- a/xen/drivers/char/console.c
+++ b/xen/drivers/char/console.c
@@ -43,8 +43,8 @@ spinlock_t console_lock = SPIN_LOCK_UNLOCKED;
*/
/* VGA text (mode 3) definitions. */
-#define COLUMNS 80
-#define LINES 25
+#define COLUMNS 80
+#define LINES 25
#define ATTRIBUTE 7
/* Clear the screen and initialize VIDEO, XPOS and YPOS. */
@@ -209,7 +209,7 @@ long read_console_ring(unsigned long str, unsigned int count, unsigned cmd)
return -EFAULT;
if ( cmd & CONSOLE_RING_CLEAR )
- console_ring.len = 0;
+ console_ring.len = 0;
return len;
}
@@ -418,7 +418,7 @@ long do_console_write(char *str, unsigned int count)
return -EFAULT;
safe_str[count] = '\0';
- sprintf(line_header, "DOM%llu: ", current->domain);
+ sprintf(line_header, "DOM%u: ", current->domain);
p = safe_str;
while ( *p != '\0' )
@@ -447,7 +447,7 @@ long do_console_write(char *str, unsigned int count)
#else
if ( !test_and_set_bit(PF_CONSOLEWRITEBUG, &current->flags) )
{
- printk("DOM%llu is attempting to use the deprecated "
+ printk("DOM%u is attempting to use the deprecated "
"HYPERVISOR_console_write() interface.\n", current->domain);
printk(" - For testing, create a debug build of Xen\n");
printk(" - For production, your OS must use the new console model\n");
diff --git a/xen/include/hypervisor-ifs/dom0_ops.h b/xen/include/hypervisor-ifs/dom0_ops.h
index d5de8a8a85..cb16d58afa 100644
--- a/xen/include/hypervisor-ifs/dom0_ops.h
+++ b/xen/include/hypervisor-ifs/dom0_ops.h
@@ -19,7 +19,7 @@
* This makes sure that old versions of dom0 tools will stop working in a
* well-defined way (rather than crashing the machine, for instance).
*/
-#define DOM0_INTERFACE_VERSION 0xAAAA000D
+#define DOM0_INTERFACE_VERSION 0xAAAA000E
#define MAX_DOMAIN_NAME 16
@@ -29,6 +29,7 @@
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
+ u32 __pad;
memory_t max_pfns; /* 8 */
MEMORY_PADDING;
void *buffer; /* 16 */
@@ -56,20 +57,20 @@ typedef struct {
u32 __pad; /* 28 */
/* OUT parameters. */
domid_t domain; /* 32 */
-} PACKED dom0_createdomain_t; /* 40 bytes */
+} PACKED dom0_createdomain_t; /* 36 bytes */
#define DOM0_DESTROYDOMAIN 9
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
- u32 force; /* 8 */
-} PACKED dom0_destroydomain_t; /* 12 bytes */
+ u32 force; /* 4 */
+} PACKED dom0_destroydomain_t; /* 8 bytes */
#define DOM0_STARTDOMAIN 10
typedef struct {
/* IN parameters. */
domid_t domain; /* 0 */
-} PACKED dom0_startdomain_t; /* 8 bytes */
+} PACKED dom0_startdomain_t; /* 4 bytes */
#define DOM0_STOPDOMAIN 11
typedef struct {
@@ -77,26 +78,27 @@ typedef struct {
domid_t domain; /* 0 */
/* hack to indicate that you want to wait for other domain -- replace
with proper sychronous stop soon! */
- u32 sync; /* 8 */
-} PACKED dom0_stopdomain_t; /* 12 bytes */
+ u32 sync; /* 4 */
+} PACKED dom0_stopdomain_t; /* 8 bytes */
#define DOM0_GETDOMAININFO 12
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
+ u32 __pad;
full_execution_context_t *ctxt; /* 8 */
MEMORY_PADDING;
/* OUT variables. */
- char name[MAX_DOMAIN_NAME]; /* 16 */
- u32 processor; /* 32 */
- u32 has_cpu; /* 36 */
+ u8 name[MAX_DOMAIN_NAME]; /* 16 */
+ u32 processor; /* 32 */
+ u32 has_cpu; /* 36 */
#define DOMSTATE_ACTIVE 0
#define DOMSTATE_STOPPED 1
- u32 state; /* 40 */
- u32 hyp_events; /* 44 */
- u32 tot_pages; /* 48 */
- u32 max_pages; /* 52 */
- u64 cpu_time; /* 56 */
+ u32 state; /* 40 */
+ u32 hyp_events; /* 44 */
+ u32 tot_pages; /* 48 */
+ u32 max_pages; /* 52 */
+ u64 cpu_time; /* 56 */
memory_t shared_info_frame; /* 64: MFN of shared_info struct */
MEMORY_PADDING;
} PACKED dom0_getdomaininfo_t; /* 72 bytes */
@@ -104,19 +106,18 @@ typedef struct {
#define DOM0_BUILDDOMAIN 13
typedef struct {
/* IN variables. */
- domid_t domain; /* 0 */
- u32 num_vifs;/* 8 */
- u32 __pad; /* 12 */
+ domid_t domain; /* 0 */
+ u32 num_vifs; /* 4 */
/* IN/OUT parameters */
- full_execution_context_t *ctxt; /* 16 */
+ full_execution_context_t *ctxt; /* 8 */
MEMORY_PADDING;
-} PACKED dom0_builddomain_t; /* 24 bytes */
+} PACKED dom0_builddomain_t; /* 16 bytes */
#define DOM0_IOPL 14
typedef struct {
domid_t domain; /* 0 */
- u32 iopl; /* 8 */
-} PACKED dom0_iopl_t; /* 12 bytes */
+ u32 iopl; /* 4 */
+} PACKED dom0_iopl_t; /* 8 bytes */
#define DOM0_MSR 15
typedef struct {
@@ -135,17 +136,17 @@ typedef struct {
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
- u8 opcode; /* 8 */
+ u8 opcode; /* 4 */
u8 __pad0, __pad1, __pad2;
- u32 in1; /* 12 */
- u32 in2; /* 16 */
- u32 in3; /* 20 */
- u32 in4; /* 24 */
+ u32 in1; /* 8 */
+ u32 in2; /* 12 */
+ u32 in3; /* 16 */
+ u32 in4; /* 20 */
/* OUT variables. */
- u32 status; /* 28 */
- u32 out1; /* 32 */
- u32 out2; /* 36 */
-} PACKED dom0_debug_t; /* 40 bytes */
+ u32 status; /* 24 */
+ u32 out1; /* 28 */
+ u32 out2; /* 32 */
+} PACKED dom0_debug_t; /* 36 bytes */
/*
* Set clock such that it would read <secs,usecs> after 00:00:00 UTC,
@@ -174,8 +175,8 @@ typedef struct {
domid_t domain; /* 8: To which domain does the frame belong? */
/* OUT variables. */
/* Is the page PINNED to a type? */
- u32 type; /* 16: see above type defs */
-} PACKED dom0_getpageframeinfo_t; /* 20 bytes */
+ u32 type; /* 12: see above type defs */
+} PACKED dom0_getpageframeinfo_t; /* 16 bytes */
/*
* Read console content from Xen buffer ring.
@@ -195,8 +196,8 @@ typedef struct {
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
- s32 cpu; /* 8: -1 implies unpin */
-} PACKED dom0_pincpudomain_t; /* 12 bytes */
+ s32 cpu; /* 4: -1 implies unpin */
+} PACKED dom0_pincpudomain_t; /* 8 bytes */
/* Get trace buffers physical base pointer */
#define DOM0_GETTBUFS 21
@@ -229,11 +230,11 @@ typedef struct {
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
- u32 bus; /* 8 */
- u32 dev; /* 12 */
- u32 func; /* 16 */
- u32 enable; /* 20 */
-} PACKED dom0_pcidev_access_t; /* 24 bytes */
+ u32 bus; /* 4 */
+ u32 dev; /* 8 */
+ u32 func; /* 12 */
+ u32 enable; /* 16 */
+} PACKED dom0_pcidev_access_t; /* 20 bytes */
/*
* Get the ID of the current scheduler.
@@ -259,31 +260,31 @@ typedef struct {
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
- u32 op; /* 8 */
- u32 __pad; /* 12 */
- unsigned long *dirty_bitmap; /* 16: pointer to locked buffer */
+ u32 op; /* 4 */
+ unsigned long *dirty_bitmap; /* 8: pointer to locked buffer */
MEMORY_PADDING;
/* IN/OUT variables. */
- memory_t pages; /* 24: size of buffer, updated with actual size */
+ memory_t pages; /* 16: size of buffer, updated with actual size */
MEMORY_PADDING;
/* OUT variables. */
- memory_t fault_count; /* 32 */
+ memory_t fault_count; /* 24 */
MEMORY_PADDING;
- memory_t dirty_count; /* 40 */
+ memory_t dirty_count; /* 32 */
MEMORY_PADDING;
-} PACKED dom0_shadow_control_t; /* 48 bytes */
+} PACKED dom0_shadow_control_t; /* 40 bytes */
#define DOM0_SETDOMAINNAME 26
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
- char name[MAX_DOMAIN_NAME]; /* 8 */
-} PACKED dom0_setdomainname_t; /* 24 bytes */
+ u8 name[MAX_DOMAIN_NAME]; /* 4 */
+} PACKED dom0_setdomainname_t; /* 20 bytes */
#define DOM0_SETDOMAININITIALMEM 27
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
+ u32 __pad;
memory_t initial_memkb; /* 8 */
MEMORY_PADDING;
} PACKED dom0_setdomaininitialmem_t; /* 16 bytes */
@@ -292,6 +293,7 @@ typedef struct {
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
+ u32 __pad;
memory_t max_memkb; /* 8 */
MEMORY_PADDING;
} PACKED dom0_setdomainmaxmem_t; /* 16 bytes */
@@ -300,6 +302,7 @@ typedef struct {
typedef struct {
/* IN variables. */
domid_t domain; /* 0 */
+ u32 __pad;
memory_t num; /* 8 */
MEMORY_PADDING;
/* IN/OUT variables. */
diff --git a/xen/include/hypervisor-ifs/event_channel.h b/xen/include/hypervisor-ifs/event_channel.h
index 1c84fb3c22..5868cab6fd 100644
--- a/xen/include/hypervisor-ifs/event_channel.h
+++ b/xen/include/hypervisor-ifs/event_channel.h
@@ -19,10 +19,10 @@
#define EVTCHNOP_bind_interdomain 0
typedef struct {
/* IN parameters. */
- domid_t dom1, dom2; /* 0, 8 */
+ domid_t dom1, dom2; /* 0, 4 */
/* OUT parameters. */
- u32 port1, port2; /* 16, 20 */
-} PACKED evtchn_bind_interdomain_t; /* 24 bytes */
+ u32 port1, port2; /* 8, 12 */
+} PACKED evtchn_bind_interdomain_t; /* 16 bytes */
/*
* EVTCHNOP_bind_virq: Bind a local event channel to IRQ <irq>.
@@ -65,9 +65,9 @@ typedef struct {
typedef struct {
/* IN parameters. */
domid_t dom; /* 0 */
- u32 port; /* 8 */
+ u32 port; /* 4 */
/* No OUT parameters. */
-} PACKED evtchn_close_t; /* 12 bytes */
+} PACKED evtchn_close_t; /* 8 bytes */
/*
* EVTCHNOP_send: Send an event to the remote end of the channel whose local
@@ -92,23 +92,23 @@ typedef struct {
typedef struct {
/* IN parameters */
domid_t dom; /* 0 */
- u32 port; /* 8 */
+ u32 port; /* 4 */
/* OUT parameters */
#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */
#define EVTCHNSTAT_unbound 1 /* Channel is not bound to a source. */
#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
- u32 status; /* 12 */
- union {
+ u32 status; /* 8 */
+ union { /* 12 */
struct {
- domid_t dom; /* 16 */
- u32 port; /* 24 */
+ domid_t dom; /* 12 */
+ u32 port; /* 16 */
} PACKED interdomain; /* EVTCHNSTAT_interdomain */
- u32 pirq; /* EVTCHNSTAT_pirq */ /* 16 */
- u32 virq; /* EVTCHNSTAT_virq */ /* 16 */
+ u32 pirq; /* EVTCHNSTAT_pirq */ /* 12 */
+ u32 virq; /* EVTCHNSTAT_virq */ /* 12 */
} PACKED u;
-} PACKED evtchn_status_t; /* 28 bytes */
+} PACKED evtchn_status_t; /* 20 bytes */
typedef struct {
u32 cmd; /* EVTCHNOP_* */ /* 0 */
@@ -120,8 +120,8 @@ typedef struct {
evtchn_close_t close;
evtchn_send_t send;
evtchn_status_t status;
- u8 __dummy[32];
+ u8 __dummy[24];
} PACKED u;
-} PACKED evtchn_op_t; /* 40 bytes */
+} PACKED evtchn_op_t; /* 32 bytes */
#endif /* __HYPERVISOR_IFS__EVENT_CHANNEL_H__ */
diff --git a/xen/include/hypervisor-ifs/hypervisor-if.h b/xen/include/hypervisor-ifs/hypervisor-if.h
index 66b06ae1d6..010b269c81 100644
--- a/xen/include/hypervisor-ifs/hypervisor-if.h
+++ b/xen/include/hypervisor-ifs/hypervisor-if.h
@@ -84,14 +84,14 @@
* This domain that must own all non-page-table pages that are involved in
* MMU updates. By default it is the domain that executes mmu_update(). If the
* caller has sufficient privilege then it can be changed by executing
- * MMUEXT_SET_SUBJECTDOM_{L,H}.
+ * MMUEXT_SET_SUBJECTDOM.
*
* PTS (Page-Table Subject)
* ------------------------
* This domain must own all the page-table pages that are subject to MMU
* updates. By default it is the domain that executes mmu_update(). If the
* caller has sufficient privilege then it can be changed by executing
- * MMUEXT_SET_SUBJECTDOM_H with val[14] (SET_PAGETABLE_SUBJECTDOM) set.
+ * MMUEXT_SET_SUBJECTDOM with val[14] (SET_PAGETABLE_SUBJECTDOM) set.
*
* ptr[1:0] == MMU_NORMAL_PT_UPDATE:
* Updates an entry in a page table.
@@ -122,13 +122,9 @@
* ptr[:2] -- linear address of LDT base (NB. must be page-aligned)
* val[:8] -- number of entries in LDT
*
- * val[7:0] == MMUEXT_SET_SUBJECTDOM_L:
- * (ptr[31:15],val[31:15]) -- dom[31:0]
- *
- * val[7:0] == MMUEXT_SET_SUBJECTDOM_H:
+ * val[7:0] == MMUEXT_SET_SUBJECTDOM:
* val[14] -- if TRUE then sets the PTS in addition to the GPS.
- * (ptr[31:15],val[31:15]) -- dom[63:32]
- * NB. This command must be immediately preceded by SET_SUBJECTDOM_L.
+ * (ptr[31:15],val[31:15]) -- dom[31:0]
*
* val[7:0] == MMUEXT_REASSIGN_PAGE:
* ptr[:2] -- machine address within page to be reassigned to the GPS.
@@ -156,12 +152,10 @@
#define MMUEXT_TLB_FLUSH 6 /* ptr = NULL */
#define MMUEXT_INVLPG 7 /* ptr = VA to invalidate */
#define MMUEXT_SET_LDT 8 /* ptr = VA of table; val = # entries */
-/* NB. MMUEXT_SET_SUBJECTDOM must consist of *_L followed immediately by *_H */
-#define MMUEXT_SET_SUBJECTDOM_L 9 /* (ptr[31:15],val[31:15]) = dom[31:0] */
-#define MMUEXT_SET_SUBJECTDOM_H 10 /* (ptr[31:15],val[31:15]) = dom[63:32] */
-#define SET_PAGETABLE_SUBJECTDOM (1<<14) /* OR into 'val' arg of SUBJECTDOM_H*/
-#define MMUEXT_REASSIGN_PAGE 11
-#define MMUEXT_RESET_SUBJECTDOM 12
+#define MMUEXT_SET_SUBJECTDOM 9 /* (ptr[31:15],val[31:15]) = dom[31:0] */
+#define SET_PAGETABLE_SUBJECTDOM (1<<14) /* OR into 'val' arg of SUBJECTDOM */
+#define MMUEXT_REASSIGN_PAGE 10
+#define MMUEXT_RESET_SUBJECTDOM 11
#define MMUEXT_CMD_MASK 255
#define MMUEXT_CMD_SHIFT 8
@@ -192,9 +186,9 @@
#ifndef __ASSEMBLY__
-typedef u64 domid_t;
+typedef u32 domid_t;
/* DOMID_SELF is used in certain contexts to refer to oneself. */
-#define DOMID_SELF (~1ULL)
+#define DOMID_SELF (0x7FFFFFFEU)
#include "network.h"
#include "block.h"
diff --git a/xen/include/hypervisor-ifs/network.h b/xen/include/hypervisor-ifs/network.h
index 7b00a5369a..eb3b42a4c0 100644
--- a/xen/include/hypervisor-ifs/network.h
+++ b/xen/include/hypervisor-ifs/network.h
@@ -141,7 +141,7 @@ typedef struct net_rule_st
} net_rule_t;
/* These are specified in the 'idx' if the 'dom' is SPECIAL. */
-#define VIF_SPECIAL (~0ULL)
+#define VIF_SPECIAL (0x7FFFFFFFU)
#define VIF_UNKNOWN_INTERFACE 0
#define VIF_PHYSICAL_INTERFACE 1
#define VIF_ANY_INTERFACE 2
diff --git a/xen/include/hypervisor-ifs/sched_ctl.h b/xen/include/hypervisor-ifs/sched_ctl.h
index 641ad2c192..34e1d3866e 100644
--- a/xen/include/hypervisor-ifs/sched_ctl.h
+++ b/xen/include/hypervisor-ifs/sched_ctl.h
@@ -42,9 +42,10 @@ struct sched_ctl_cmd
struct sched_adjdom_cmd
{
- u32 sched_id; /* 0 */
- u32 direction; /* 4 */
+ u32 sched_id; /* 0 */
+ u32 direction; /* 4 */
domid_t domain; /* 8 */
+ u32 __pad;
union { /* 16 */
struct bvt_adjdom
{
diff --git a/xen/include/hypervisor-ifs/vbd.h b/xen/include/hypervisor-ifs/vbd.h
index adf24e9682..9d68da62f4 100644
--- a/xen/include/hypervisor-ifs/vbd.h
+++ b/xen/include/hypervisor-ifs/vbd.h
@@ -60,7 +60,7 @@ typedef struct _vbd_delete {
u16 vdevice; /* 16 bit id domain refers to VBD as */
} vbd_delete_t;
-#define VBD_PROBE_ALL (~0ULL)
+#define VBD_PROBE_ALL (0x7FFFFFFFU)
typedef struct _vbd_probe {
domid_t domain; /* domain in question or VBD_PROBE_ALL */
xen_disk_info_t xdi; /* where's our space for VBD/disk info */
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index ecb73627a3..5202c60d4c 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -164,8 +164,11 @@ static inline int get_page(struct pfn_info *page,
unlikely(x & PGC_zombie) || /* Zombie? */
unlikely(p != domain) ) /* Wrong owner? */
{
- DPRINTK("Error pfn %08lx: ed=%p(%lld), sd=%p(%lld), caf=%08x, taf=%08x\n",
- page_to_pfn(page), domain, (domain)?domain->domain:999, p, (p && !((x & PGC_count_mask) == 0))?p->domain:999, x, page->type_and_flags);
+ DPRINTK("Error pfn %08lx: ed=%p(%u), sd=%p(%u),"
+ " caf=%08x, taf=%08x\n",
+ page_to_pfn(page), domain, (domain)?domain->domain:999,
+ p, (p && !((x & PGC_count_mask) == 0))?p->domain:999,
+ x, page->type_and_flags);
return 0;
}
__asm__ __volatile__(
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 8ced1a51c3..a9464415fc 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -226,7 +226,7 @@ struct task_struct
extern struct task_struct idle0_task;
extern struct task_struct *idle_task[NR_CPUS];
-#define IDLE_DOMAIN_ID (~0ULL)
+#define IDLE_DOMAIN_ID (0x7FFFFFFFU)
#define is_idle_task(_p) (test_bit(PF_IDLETASK, &(_p)->flags))
#include <xen/slab.h>
diff --git a/xen/include/xen/shadow.h b/xen/include/xen/shadow.h
index 1597e1feb2..cf1a6412da 100644
--- a/xen/include/xen/shadow.h
+++ b/xen/include/xen/shadow.h
@@ -10,9 +10,9 @@
/* Shadow PT flag bits in pfn_info */
-#define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
-#define PSH_pending (1<<29) /* page is in the process of being shadowed */
-#define PSH_pfn_mask ((1<<21)-1)
+#define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
+#define PSH_pending (1<<29) /* page is in the process of being shadowed */
+#define PSH_pfn_mask ((1<<21)-1)
/* Shadow PT operation mode : shadowmode variable in mm_struct */
#define SHM_test (1) /* just run domain on shadow PTs */
@@ -27,8 +27,8 @@ extern void shadow_mode_init(void);
extern int shadow_mode_control( struct task_struct *p, dom0_shadow_control_t *sc );
extern int shadow_fault( unsigned long va, long error_code );
extern void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte,
- unsigned long *prev_spfn_ptr,
- l1_pgentry_t **prev_spl1e_ptr );
+ unsigned long *prev_spfn_ptr,
+ l1_pgentry_t **prev_spl1e_ptr );
extern void shadow_l2_normal_pt_update( unsigned long pa, unsigned long gpte );
extern void unshadow_table( unsigned long gpfn, unsigned int type );
extern int shadow_mode_enable( struct task_struct *p, unsigned int mode );
@@ -51,7 +51,7 @@ struct shadow_status {
#ifndef NDEBUG
#define SH_LOG(_f, _a...) \
-printk("DOM%lld: (file=shadow.c, line=%d) " _f "\n", \
+printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
current->domain , __LINE__ , ## _a )
#else
#define SH_LOG(_f, _a...)
@@ -59,16 +59,16 @@ printk("DOM%lld: (file=shadow.c, line=%d) " _f "\n", \
#if SHADOW_DEBUG
#define SH_VLOG(_f, _a...) \
- printk("DOM%lld: (file=shadow.c, line=%d) " _f "\n", \
- current->domain , __LINE__ , ## _a )
+ printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
+ current->domain , __LINE__ , ## _a )
#else
#define SH_VLOG(_f, _a...)
#endif
#if 0
#define SH_VVLOG(_f, _a...) \
- printk("DOM%lld: (file=shadow.c, line=%d) " _f "\n", \
- current->domain , __LINE__ , ## _a )
+ printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
+ current->domain , __LINE__ , ## _a )
#else
#define SH_VVLOG(_f, _a...)
#endif
@@ -76,49 +76,44 @@ printk("DOM%lld: (file=shadow.c, line=%d) " _f "\n", \
/************************************************************************/
- static inline void __mark_dirty( struct mm_struct *m, unsigned int mfn )
+static inline void __mark_dirty( struct mm_struct *m, unsigned int mfn )
{
unsigned int pfn;
ASSERT(spin_is_locked(&m->shadow_lock));
-
- //printk("%08x %08lx\n", mfn, machine_to_phys_mapping[mfn] );
pfn = machine_to_phys_mapping[mfn];
/* We use values with the top bit set to mark MFNs that aren't
really part of the domain's psuedo-physical memory map e.g.
the shared info frame. Nothing to do here...
- */
+ */
if ( unlikely(pfn & 0x80000000U) ) return;
ASSERT(m->shadow_dirty_bitmap);
if( likely(pfn<m->shadow_dirty_bitmap_size) )
{
- /* These updates occur with mm.shadow_lock held, so use
- (__) version of test_and_set */
- if( ! __test_and_set_bit( pfn, m->shadow_dirty_bitmap ) )
- {
- m->shadow_dirty_count++;
- }
+ /* These updates occur with mm.shadow_lock held, so use
+ (__) version of test_and_set */
+ if( !__test_and_set_bit( pfn, m->shadow_dirty_bitmap) )
+ m->shadow_dirty_count++;
}
else
{
- extern void show_traceX(void);
- SH_LOG("mark_dirty OOR! mfn=%x pfn=%x max=%x (mm %p)",
- mfn, pfn, m->shadow_dirty_bitmap_size, m );
- SH_LOG("dom=%lld caf=%08x taf=%08x\n",
- frame_table[mfn].u.domain->domain,
- frame_table[mfn].count_and_flags,
- frame_table[mfn].type_and_flags );
- //show_traceX();
+ extern void show_traceX(void);
+ SH_LOG("mark_dirty OOR! mfn=%x pfn=%x max=%x (mm %p)",
+ mfn, pfn, m->shadow_dirty_bitmap_size, m );
+ SH_LOG("dom=%u caf=%08x taf=%08x\n",
+ frame_table[mfn].u.domain->domain,
+ frame_table[mfn].count_and_flags,
+ frame_table[mfn].type_and_flags );
}
}
static inline void mark_dirty( struct mm_struct *m, unsigned int mfn )
-{
+{
ASSERT(local_irq_is_enabled());
//if(spin_is_locked(&m->shadow_lock)) printk("+");
spin_lock(&m->shadow_lock);
@@ -130,7 +125,7 @@ static inline void mark_dirty( struct mm_struct *m, unsigned int mfn )
/************************************************************************/
static inline void l1pte_write_fault( struct mm_struct *m,
- unsigned long *gpte_p, unsigned long *spte_p )
+ unsigned long *gpte_p, unsigned long *spte_p )
{
unsigned long gpte = *gpte_p;
unsigned long spte = *spte_p;
@@ -138,17 +133,17 @@ static inline void l1pte_write_fault( struct mm_struct *m,
switch( m->shadow_mode )
{
case SHM_test:
- spte = gpte;
- gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
- spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
- break;
+ spte = gpte;
+ gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
+ spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
+ break;
case SHM_logdirty:
- spte = gpte;
- gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
- spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
- __mark_dirty( m, (gpte >> PAGE_SHIFT) );
- break;
+ spte = gpte;
+ gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
+ spte |= _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
+ __mark_dirty( m, (gpte >> PAGE_SHIFT) );
+ break;
}
*gpte_p = gpte;
@@ -156,7 +151,7 @@ static inline void l1pte_write_fault( struct mm_struct *m,
}
static inline void l1pte_read_fault( struct mm_struct *m,
- unsigned long *gpte_p, unsigned long *spte_p )
+ unsigned long *gpte_p, unsigned long *spte_p )
{
unsigned long gpte = *gpte_p;
unsigned long spte = *spte_p;
@@ -164,19 +159,19 @@ static inline void l1pte_read_fault( struct mm_struct *m,
switch( m->shadow_mode )
{
case SHM_test:
- spte = gpte;
- gpte |= _PAGE_ACCESSED;
- spte |= _PAGE_ACCESSED;
- if ( ! (gpte & _PAGE_DIRTY ) )
- spte &= ~ _PAGE_RW;
- break;
+ spte = gpte;
+ gpte |= _PAGE_ACCESSED;
+ spte |= _PAGE_ACCESSED;
+ if ( ! (gpte & _PAGE_DIRTY ) )
+ spte &= ~ _PAGE_RW;
+ break;
case SHM_logdirty:
- spte = gpte;
- gpte |= _PAGE_ACCESSED;
- spte |= _PAGE_ACCESSED;
- spte &= ~ _PAGE_RW;
- break;
+ spte = gpte;
+ gpte |= _PAGE_ACCESSED;
+ spte |= _PAGE_ACCESSED;
+ spte &= ~ _PAGE_RW;
+ break;
}
*gpte_p = gpte;
@@ -184,7 +179,7 @@ static inline void l1pte_read_fault( struct mm_struct *m,
}
static inline void l1pte_no_fault( struct mm_struct *m,
- unsigned long *gpte_p, unsigned long *spte_p )
+ unsigned long *gpte_p, unsigned long *spte_p )
{
unsigned long gpte = *gpte_p;
unsigned long spte = *spte_p;
@@ -192,26 +187,26 @@ static inline void l1pte_no_fault( struct mm_struct *m,
switch( m->shadow_mode )
{
case SHM_test:
- spte = 0;
- if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
- (_PAGE_PRESENT|_PAGE_ACCESSED) )
- {
- spte = gpte;
- if ( ! (gpte & _PAGE_DIRTY ) )
- spte &= ~ _PAGE_RW;
- }
- break;
+ spte = 0;
+ if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
+ (_PAGE_PRESENT|_PAGE_ACCESSED) )
+ {
+ spte = gpte;
+ if ( ! (gpte & _PAGE_DIRTY ) )
+ spte &= ~ _PAGE_RW;
+ }
+ break;
case SHM_logdirty:
- spte = 0;
- if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
- (_PAGE_PRESENT|_PAGE_ACCESSED) )
- {
- spte = gpte;
- spte &= ~ _PAGE_RW;
- }
-
- break;
+ spte = 0;
+ if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
+ (_PAGE_PRESENT|_PAGE_ACCESSED) )
+ {
+ spte = gpte;
+ spte &= ~ _PAGE_RW;
+ }
+
+ break;
}
*gpte_p = gpte;
@@ -219,8 +214,8 @@ static inline void l1pte_no_fault( struct mm_struct *m,
}
static inline void l2pde_general( struct mm_struct *m,
- unsigned long *gpde_p, unsigned long *spde_p,
- unsigned long sl1pfn)
+ unsigned long *gpde_p, unsigned long *spde_p,
+ unsigned long sl1pfn)
{
unsigned long gpde = *gpde_p;
unsigned long spde = *spde_p;
@@ -229,16 +224,16 @@ static inline void l2pde_general( struct mm_struct *m,
if ( sl1pfn )
{
- spde = (gpde & ~PAGE_MASK) | (sl1pfn<<PAGE_SHIFT) |
- _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
- gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY;
-
- if ( unlikely( (sl1pfn<<PAGE_SHIFT) == (gpde & PAGE_MASK) ) )
- {
- // detect linear map, and keep pointing at guest
- SH_VLOG("4c: linear mapping ( %08lx )",sl1pfn);
- spde = gpde & ~_PAGE_RW;
- }
+ spde = (gpde & ~PAGE_MASK) | (sl1pfn<<PAGE_SHIFT) |
+ _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
+ gpde = gpde | _PAGE_ACCESSED | _PAGE_DIRTY;
+
+ if ( unlikely( (sl1pfn<<PAGE_SHIFT) == (gpde & PAGE_MASK) ) )
+ {
+ // detect linear map, and keep pointing at guest
+ SH_VLOG("4c: linear mapping ( %08lx )",sl1pfn);
+ spde = gpde & ~_PAGE_RW;
+ }
}
*gpde_p = gpde;
@@ -254,30 +249,30 @@ static void shadow_audit(struct mm_struct *m, int print)
{
int live=0, free=0, j=0, abs;
struct shadow_status *a;
-
- for(j=0;j<shadow_ht_buckets;j++)
+
+ for( j = 0; j < shadow_ht_buckets; j++ )
{
a = &m->shadow_ht[j];
- if(a->pfn){live++; ASSERT(a->spfn_and_flags&PSH_pfn_mask);}
- ASSERT((a->pfn&0xf0000000)==0);
- ASSERT(a->pfn<0x00100000);
- a=a->next;
+ if(a->pfn){live++; ASSERT(a->spfn_and_flags&PSH_pfn_mask);}
+ ASSERT((a->pfn&0xf0000000)==0);
+ ASSERT(a->pfn<0x00100000);
+ a=a->next;
while(a && live<9999)
- {
- live++;
- if(a->pfn == 0 || a->spfn_and_flags == 0)
- {
- printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n",
- live, a->pfn, a->spfn_and_flags, a->next);
- BUG();
- }
- ASSERT(a->pfn);
- ASSERT((a->pfn&0xf0000000)==0);
- ASSERT(a->pfn<0x00100000);
- ASSERT(a->spfn_and_flags&PSH_pfn_mask);
- a=a->next;
- }
- ASSERT(live<9999);
+ {
+ live++;
+ if(a->pfn == 0 || a->spfn_and_flags == 0)
+ {
+ printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n",
+ live, a->pfn, a->spfn_and_flags, a->next);
+ BUG();
+ }
+ ASSERT(a->pfn);
+ ASSERT((a->pfn&0xf0000000)==0);
+ ASSERT(a->pfn<0x00100000);
+ ASSERT(a->spfn_and_flags&PSH_pfn_mask);
+ a=a->next;
+ }
+ ASSERT(live<9999);
}
a = m->shadow_ht_free;
@@ -288,9 +283,9 @@ static void shadow_audit(struct mm_struct *m, int print)
abs=(perfc_value(shadow_l1_pages)+perfc_value(shadow_l2_pages))-live;
if( abs < -1 || abs > 1 )
{
- printk("live=%d free=%d l1=%d l2=%d\n",live,free,
- perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
- BUG();
+ printk("live=%d free=%d l1=%d l2=%d\n",live,free,
+ perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
+ BUG();
}
}
@@ -302,14 +297,14 @@ static void shadow_audit(struct mm_struct *m, int print)
static inline struct shadow_status* hash_bucket( struct mm_struct *m,
- unsigned int gpfn )
+ unsigned int gpfn )
{
return &(m->shadow_ht[gpfn % shadow_ht_buckets]);
}
static inline unsigned long __shadow_status( struct mm_struct *m,
- unsigned int gpfn )
+ unsigned int gpfn )
{
struct shadow_status **ob, *b, *B = hash_bucket( m, gpfn );
@@ -321,33 +316,33 @@ static inline unsigned long __shadow_status( struct mm_struct *m,
do
{
- if ( b->pfn == gpfn )
- {
- unsigned long t;
- struct shadow_status *x;
-
- // swap with head
- t=B->pfn; B->pfn=b->pfn; b->pfn=t;
- t=B->spfn_and_flags; B->spfn_and_flags=b->spfn_and_flags;
- b->spfn_and_flags=t;
-
- if(ob)
- { // pull to front
- *ob=b->next;
- x=B->next;
- B->next=b;
- b->next=x;
- }
- return B->spfn_and_flags;
- }
+ if ( b->pfn == gpfn )
+ {
+ unsigned long t;
+ struct shadow_status *x;
+
+ // swap with head
+ t=B->pfn; B->pfn=b->pfn; b->pfn=t;
+ t=B->spfn_and_flags; B->spfn_and_flags=b->spfn_and_flags;
+ b->spfn_and_flags=t;
+
+ if( ob )
+ { // pull to front
+ *ob=b->next;
+ x=B->next;
+ B->next=b;
+ b->next=x;
+ }
+ return B->spfn_and_flags;
+ }
#if SHADOW_HASH_DEBUG
- else
- {
- if(b!=B)ASSERT(b->pfn);
- }
+ else
+ {
+ if(b!=B)ASSERT(b->pfn);
+ }
#endif
- ob=&b->next;
- b=b->next;
+ ob=&b->next;
+ b=b->next;
}
while (b);
@@ -359,7 +354,7 @@ ever becomes a problem, but since we need a spin lock on the hash table
anyway its probably not worth being too clever. */
static inline unsigned long get_shadow_status( struct mm_struct *m,
- unsigned int gpfn )
+ unsigned int gpfn )
{
unsigned long res;
@@ -370,15 +365,15 @@ static inline unsigned long get_shadow_status( struct mm_struct *m,
bit in the dirty bitmap.
NB: the VA update path doesn't use this so needs to be handled
independnetly.
- */
+ */
ASSERT(local_irq_is_enabled());
//if(spin_is_locked(&m->shadow_lock)) printk("*");
spin_lock(&m->shadow_lock);
if( m->shadow_mode == SHM_logdirty )
- __mark_dirty( m, gpfn );
-
+ __mark_dirty( m, gpfn );
+
res = __shadow_status( m, gpfn );
if (!res) spin_unlock(&m->shadow_lock);
return res;
@@ -392,7 +387,7 @@ static inline void put_shadow_status( struct mm_struct *m )
static inline void delete_shadow_status( struct mm_struct *m,
- unsigned int gpfn )
+ unsigned int gpfn )
{
struct shadow_status *b, *B, **ob;
@@ -406,29 +401,29 @@ static inline void delete_shadow_status( struct mm_struct *m,
if( b->pfn == gpfn )
{
- if (b->next)
- {
- struct shadow_status *D=b->next;
- b->spfn_and_flags = b->next->spfn_and_flags;
- b->pfn = b->next->pfn;
-
- b->next = b->next->next;
- D->next = m->shadow_ht_free;
- D->pfn = 0;
- D->spfn_and_flags = 0;
- m->shadow_ht_free = D;
- }
- else
- {
- b->pfn = 0;
- b->spfn_and_flags = 0;
- }
+ if (b->next)
+ {
+ struct shadow_status *D=b->next;
+ b->spfn_and_flags = b->next->spfn_and_flags;
+ b->pfn = b->next->pfn;
+
+ b->next = b->next->next;
+ D->next = m->shadow_ht_free;
+ D->pfn = 0;
+ D->spfn_and_flags = 0;
+ m->shadow_ht_free = D;
+ }
+ else
+ {
+ b->pfn = 0;
+ b->spfn_and_flags = 0;
+ }
#if SHADOW_HASH_DEBUG
- if( __shadow_status(m,gpfn) ) BUG();
- shadow_audit(m,0);
+ if( __shadow_status(m,gpfn) ) BUG();
+ shadow_audit(m,0);
#endif
- return;
+ return;
}
ob = &b->next;
@@ -436,25 +431,25 @@ static inline void delete_shadow_status( struct mm_struct *m,
do
{
- if ( b->pfn == gpfn )
- {
- b->pfn = 0;
- b->spfn_and_flags = 0;
+ if ( b->pfn == gpfn )
+ {
+ b->pfn = 0;
+ b->spfn_and_flags = 0;
- // b is in the list
- *ob=b->next;
- b->next = m->shadow_ht_free;
- m->shadow_ht_free = b;
+ // b is in the list
+ *ob=b->next;
+ b->next = m->shadow_ht_free;
+ m->shadow_ht_free = b;
#if SHADOW_HASH_DEBUG
- if( __shadow_status(m,gpfn) ) BUG();
+ if( __shadow_status(m,gpfn) ) BUG();
#endif
- shadow_audit(m,0);
- return;
- }
+ shadow_audit(m,0);
+ return;
+ }
- ob = &b->next;
- b=b->next;
+ ob = &b->next;
+ b=b->next;
}
while (b);
@@ -464,7 +459,7 @@ static inline void delete_shadow_status( struct mm_struct *m,
static inline void set_shadow_status( struct mm_struct *m,
- unsigned int gpfn, unsigned long s )
+ unsigned int gpfn, unsigned long s )
{
struct shadow_status *b, *B, *extra, **fptr;
int i;
@@ -474,22 +469,20 @@ static inline void set_shadow_status( struct mm_struct *m,
B = b = hash_bucket( m, gpfn );
ASSERT(gpfn);
- //ASSERT(s);
- //ASSERT(s&PSH_pfn_mask);
SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, b, b->next );
shadow_audit(m,0);
do
{
- if ( b->pfn == gpfn )
- {
- b->spfn_and_flags = s;
- shadow_audit(m,0);
- return;
- }
-
- b=b->next;
+ if ( b->pfn == gpfn )
+ {
+ b->spfn_and_flags = s;
+ shadow_audit(m,0);
+ return;
+ }
+
+ b=b->next;
}
while (b);
@@ -499,41 +492,41 @@ static inline void set_shadow_status( struct mm_struct *m,
if ( B->pfn == 0 )
{
- // we can use this head
- ASSERT( B->next == 0 );
- B->pfn = gpfn;
- B->spfn_and_flags = s;
- shadow_audit(m,0);
- return;
+ // we can use this head
+ ASSERT( B->next == 0 );
+ B->pfn = gpfn;
+ B->spfn_and_flags = s;
+ shadow_audit(m,0);
+ return;
}
if( unlikely(m->shadow_ht_free == NULL) )
{
- SH_LOG("allocate more shadow hashtable blocks");
+ SH_LOG("allocate more shadow hashtable blocks");
- // we need to allocate more space
- extra = kmalloc( sizeof(void*) + (shadow_ht_extra_size *
- sizeof(struct shadow_status)), GFP_KERNEL );
+ // we need to allocate more space
+ extra = kmalloc( sizeof(void*) + (shadow_ht_extra_size *
+ sizeof(struct shadow_status)), GFP_KERNEL );
- if( ! extra ) BUG(); // should be more graceful here....
+ if( ! extra ) BUG(); // should be more graceful here....
- memset( extra, 0, sizeof(void*) + (shadow_ht_extra_size *
- sizeof(struct shadow_status)) );
+ memset( extra, 0, sizeof(void*) + (shadow_ht_extra_size *
+ sizeof(struct shadow_status)) );
- m->shadow_extras_count++;
-
- // add extras to free list
- fptr = &m->shadow_ht_free;
- for ( i=0; i<shadow_ht_extra_size; i++ )
- {
- *fptr = &extra[i];
- fptr = &(extra[i].next);
- }
- *fptr = NULL;
+ m->shadow_extras_count++;
- *((struct shadow_status ** ) &extra[shadow_ht_extra_size]) =
- m->shadow_ht_extras;
- m->shadow_ht_extras = extra;
+ // add extras to free list
+ fptr = &m->shadow_ht_free;
+ for ( i=0; i<shadow_ht_extra_size; i++ )
+ {
+ *fptr = &extra[i];
+ fptr = &(extra[i].next);
+ }
+ *fptr = NULL;
+
+ *((struct shadow_status ** ) &extra[shadow_ht_extra_size]) =
+ m->shadow_ht_extras;
+ m->shadow_ht_extras = extra;
}
@@ -555,10 +548,10 @@ static inline void __shadow_mk_pagetable( struct mm_struct *mm )
unsigned long gpfn, spfn=0;
gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
-
+
if ( unlikely((spfn=__shadow_status(mm, gpfn)) == 0 ) )
{
- spfn = shadow_l2_table(mm, gpfn );
+ spfn = shadow_l2_table(mm, gpfn );
}
mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
}
@@ -566,21 +559,21 @@ static inline void __shadow_mk_pagetable( struct mm_struct *mm )
static inline void shadow_mk_pagetable( struct mm_struct *mm )
{
SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
- pagetable_val(mm->pagetable), mm->shadow_mode );
+ pagetable_val(mm->pagetable), mm->shadow_mode );
if ( unlikely(mm->shadow_mode) )
{
- ASSERT(local_irq_is_enabled());
+ ASSERT(local_irq_is_enabled());
spin_lock(&mm->shadow_lock);
- __shadow_mk_pagetable( mm );
+ __shadow_mk_pagetable( mm );
- spin_unlock(&mm->shadow_lock);
+ spin_unlock(&mm->shadow_lock);
}
SH_VVLOG("leaving shadow_mk_pagetable( gptbase=%08lx, mode=%d ) sh=%08lx",
- pagetable_val(mm->pagetable), mm->shadow_mode,
- pagetable_val(mm->shadow_table) );
+ pagetable_val(mm->pagetable), mm->shadow_mode,
+ pagetable_val(mm->shadow_table) );
}
diff --git a/xen/net/dev.c b/xen/net/dev.c
index 909e586b53..eb7747d6eb 100644
--- a/xen/net/dev.c
+++ b/xen/net/dev.c
@@ -2094,7 +2094,7 @@ static void get_rx_bufs(net_vif_t *vif)
if ( unlikely(pte_pfn >= max_page) ||
unlikely(!get_page_and_type(pte_page, p, PGT_l1_page_table)) )
{
- DPRINTK("Bad page frame for ppte %llu,%08lx,%08lx,%08x\n",
+ DPRINTK("Bad page frame for ppte %u,%08lx,%08lx,%08x\n",
p->domain, pte_pfn, max_page, pte_page->type_and_flags);
make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0);
continue;
@@ -2265,24 +2265,13 @@ long flush_bufs_for_vif(net_vif_t *vif)
put_page_and_type(&frame_table[rx->pte_ptr >> PAGE_SHIFT]);
- /* if in shadow mode, mark the PTE as dirty */
- if( p->mm.shadow_mode == SHM_logdirty )
- {
- mark_dirty( &p->mm, rx->pte_ptr>>PAGE_SHIFT );
-#if 0
- mark_dirty( &p->mm, rx->buf_pfn ); // XXXXXXX debug
-
- {
- unsigned long * p = map_domain_mem( rx->buf_pfn<<PAGE_SHIFT );
- p[2] = 0xdeadc001;
- unmap_domain_mem(p);
- }
-#endif
-
- }
- /* assume the shadow page table is about to be blown away,
- and that its not worth marking the buffer as dirty */
-
+ /*
+ * If in shadow mode, mark the PTE as dirty.
+ * (We assume the shadow page table is about to be blown away,
+ * and so it's not worth marking the buffer as dirty.)
+ */
+ if ( p->mm.shadow_mode == SHM_logdirty )
+ mark_dirty(&p->mm, rx->pte_ptr>>PAGE_SHIFT);
make_rx_response(vif, rx->id, 0, RING_STATUS_DROPPED, 0);
}