aboutsummaryrefslogtreecommitdiffstats
path: root/xen
diff options
context:
space:
mode:
authoriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>2004-05-13 11:20:02 +0000
committeriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>2004-05-13 11:20:02 +0000
commit11c23eec7386c4c81ba99214006a0676d6b3372f (patch)
tree4310919fdb301f844d34acc0a133fb5d4faafa5f /xen
parent34907c0472b945d27b9763363892248f72446d44 (diff)
downloadxen-11c23eec7386c4c81ba99214006a0676d6b3372f.tar.gz
xen-11c23eec7386c4c81ba99214006a0676d6b3372f.tar.bz2
xen-11c23eec7386c4c81ba99214006a0676d6b3372f.zip
bitkeeper revision 1.904 (40a359e2w2OMbG-v-Q3bBA8dXOcCyg)
aborted attempt at using smp_call_function for implementing scheduler pause in shadow clean. Code might be useful to someone else, though.
Diffstat (limited to 'xen')
-rw-r--r--xen/common/shadow.c50
-rw-r--r--xen/include/xen/shadow.h24
2 files changed, 41 insertions, 33 deletions
diff --git a/xen/common/shadow.c b/xen/common/shadow.c
index ec5ce29d5a..44945556e1 100644
--- a/xen/common/shadow.c
+++ b/xen/common/shadow.c
@@ -170,9 +170,6 @@ int shadow_mode_enable( struct task_struct *p, unsigned int mode )
struct shadow_status **fptr;
int i;
- spin_lock_init(&m->shadow_lock);
- spin_lock(&m->shadow_lock);
-
m->shadow_mode = mode;
// allocate hashtable
@@ -186,8 +183,10 @@ int shadow_mode_enable( struct task_struct *p, unsigned int mode )
// allocate space for first lot of extra nodes
- m->shadow_ht_extras = kmalloc( sizeof(void*) + (shadow_ht_extra_size *
- sizeof(struct shadow_status)), GFP_KERNEL );
+ m->shadow_ht_extras = kmalloc( sizeof(void*) +
+ (shadow_ht_extra_size *
+ sizeof(struct shadow_status)),
+ GFP_KERNEL );
if( ! m->shadow_ht_extras )
goto nomem;
@@ -222,14 +221,11 @@ int shadow_mode_enable( struct task_struct *p, unsigned int mode )
memset(m->shadow_dirty_bitmap,0,m->shadow_dirty_bitmap_size/8);
}
- spin_unlock(&m->shadow_lock);
-
// call shadow_mk_pagetable
- shadow_mk_pagetable( m );
+ __shadow_mk_pagetable( m );
return 0;
nomem:
- spin_unlock(&m->shadow_lock);
return -ENOMEM;
}
@@ -238,10 +234,8 @@ void shadow_mode_disable( struct task_struct *p )
struct mm_struct *m = &p->mm;
struct shadow_status *next;
- spin_lock(&m->shadow_lock);
__free_shadow_table( m );
m->shadow_mode = 0;
- spin_unlock(&m->shadow_lock);
SH_LOG("freed tables count=%d l1=%d l2=%d",
m->shadow_page_count, perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages));
@@ -285,9 +279,6 @@ static int shadow_mode_table_op( struct task_struct *p,
return -EINVAL;
}
-
- spin_lock(&m->shadow_lock);
-
SH_VLOG("shadow mode table op %08lx %08lx count %d",pagetable_val( m->pagetable),pagetable_val(m->shadow_table), m->shadow_page_count);
shadow_audit(m,1);
@@ -348,16 +339,14 @@ static int shadow_mode_table_op( struct task_struct *p,
out:
- spin_unlock(&m->shadow_lock);
-
SH_VLOG("shadow mode table op : page count %d", m->shadow_page_count);
shadow_audit(m,1);
// call shadow_mk_pagetable
- shadow_mk_pagetable( m );
+ __shadow_mk_pagetable( m );
- return rc;
+ return rc;
}
int shadow_mode_control( struct task_struct *p, dom0_shadow_control_t *sc )
@@ -380,25 +369,31 @@ int shadow_mode_control( struct task_struct *p, dom0_shadow_control_t *sc )
Oh, and let's hope someone doesn't repin the CPU while we're here.
Also, prey someone else doesn't do this in another domain.
At least there's only one dom0 at the moment...
+
*/
-printk("SMC\n");
+
+printk("XXX\n");
+ spin_lock(&p->mm.shadow_lock);
+
+printk("SMC irq=%d\n",local_irq_is_enabled());
spin_lock( &cpu_stall_lock );
cpu = p->processor;
-printk("got %d %d\n",cpu, current->processor );
+printk("got target cpu=%d this cpu=%d\n",cpu, current->processor );
if ( cpu != current->processor )
{
-printk("CPU %d %d\n",cpu, current->processor );
static void cpu_stall(void * data)
{
if ( current->processor == (int) data )
{
- printk("Stall %d\n",(int)data);
+ printk("Stall cpu=%d is locked %d irq=%d\n",(int)data,spin_is_locked(&cpu_stall_lock),local_irq_is_enabled());
spin_lock( &cpu_stall_lock );
+ printk("release\n");
spin_unlock( &cpu_stall_lock );
}
}
-
+printk("before\n");
smp_call_function(cpu_stall, (void*)cpu, 1, 0); // don't wait!
+printk("after\n");
}
if ( p->mm.shadow_mode && cmd == DOM0_SHADOW_CONTROL_OP_OFF )
@@ -417,7 +412,9 @@ printk("CPU %d %d\n",cpu, current->processor );
}
else if ( p->mm.shadow_mode && cmd >= DOM0_SHADOW_CONTROL_OP_FLUSH && cmd<=DOM0_SHADOW_CONTROL_OP_CLEAN )
{
+printk("+");
rc = shadow_mode_table_op(p, sc);
+printk("=");
}
else
{
@@ -425,7 +422,10 @@ printk("CPU %d %d\n",cpu, current->processor );
}
spin_unlock( &cpu_stall_lock );
-printk("SMC-\n");
+printk("SMC- %d\n",rc);
+
+ spin_unlock(&p->mm.shadow_lock);
+
return rc;
}
@@ -549,6 +549,8 @@ int shadow_fault( unsigned long va, long error_code )
unsigned long gpte, spte;
struct mm_struct *m = &current->mm;
+ // we know interrupts are always on entry to the page fault handler
+
SH_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code );
check_pagetable( current, current->mm.pagetable, "pre-sf" );
diff --git a/xen/include/xen/shadow.h b/xen/include/xen/shadow.h
index bfb2a04256..587f9178bd 100644
--- a/xen/include/xen/shadow.h
+++ b/xen/include/xen/shadow.h
@@ -528,23 +528,30 @@ static inline void set_shadow_status( struct mm_struct *m,
return;
}
-static inline void shadow_mk_pagetable( struct mm_struct *mm )
+static inline void __shadow_mk_pagetable( struct mm_struct *mm )
{
unsigned long gpfn, spfn=0;
+ gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
+
+ if ( unlikely((spfn=__shadow_status(mm, gpfn)) == 0 ) )
+ {
+ spfn = shadow_l2_table(mm, gpfn );
+ }
+ mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
+}
+
+static inline void shadow_mk_pagetable( struct mm_struct *mm )
+{
SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
pagetable_val(mm->pagetable), mm->shadow_mode );
if ( unlikely(mm->shadow_mode) )
{
- gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
-
spin_lock(&mm->shadow_lock);
- if ( unlikely((spfn=__shadow_status(mm, gpfn)) == 0 ) )
- {
- spfn = shadow_l2_table(mm, gpfn );
- }
- mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
+
+ __shadow_mk_pagetable( mm );
+
spin_unlock(&mm->shadow_lock);
}
@@ -555,7 +562,6 @@ static inline void shadow_mk_pagetable( struct mm_struct *mm )
}
-
#if SHADOW_DEBUG
extern int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s);
#else