aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir.fraser@citrix.com>2008-04-14 16:07:54 +0100
committerKeir Fraser <keir.fraser@citrix.com>2008-04-14 16:07:54 +0100
commit8a7a02ee55bfd1861ae79d191215c8ccd79c3d78 (patch)
tree2e55f2b5a79b7ad502eb74c114d2c951af29c327
parentf541851c79dc661beff358c02ad92f803571f7f2 (diff)
downloadxen-8a7a02ee55bfd1861ae79d191215c8ccd79c3d78.tar.gz
xen-8a7a02ee55bfd1861ae79d191215c8ccd79c3d78.tar.bz2
xen-8a7a02ee55bfd1861ae79d191215c8ccd79c3d78.zip
Rename 'big lock' to 'domain lock'.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
-rw-r--r--xen/arch/ia64/xen/mm.c4
-rw-r--r--xen/arch/x86/domain.c4
-rw-r--r--xen/arch/x86/hvm/hvm.c8
-rw-r--r--xen/arch/x86/mm.c30
-rw-r--r--xen/arch/x86/traps.c4
-rw-r--r--xen/arch/x86/x86_64/compat/mm.c4
-rw-r--r--xen/common/compat/domain.c4
-rw-r--r--xen/common/domain.c10
-rw-r--r--xen/common/grant_table.c4
-rw-r--r--xen/include/xen/sched.h7
10 files changed, 40 insertions, 39 deletions
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
index 15057ba2eb..5c8c6dd8de 100644
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -2827,7 +2827,7 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
return -EINVAL;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
/* Check remapping necessity */
prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
@@ -2853,7 +2853,7 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
guest_physmap_add_page(d, xatp.gpfn, mfn);
out:
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
rcu_unlock_domain(d);
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index ae1097416c..4418c51ff9 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -951,9 +951,9 @@ arch_do_vcpu_op(
if ( copy_from_guest(&info, arg, 1) )
break;
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = map_vcpu_info(v, info.mfn, info.offset);
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
break;
}
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 10f9f9e71e..3730710346 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -479,11 +479,11 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
vc = &v->arch.guest_context;
/* Need to init this vcpu before loading its contents */
- LOCK_BIGLOCK(d);
+ domain_lock(d);
if ( !v->is_initialised )
if ( (rc = boot_vcpu(d, vcpuid, vc)) != 0 )
return rc;
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
return -EINVAL;
@@ -719,11 +719,11 @@ static void hvm_vcpu_down(void)
vcpu_sleep_nosync(v);
/* Any other VCPUs online? ... */
- LOCK_BIGLOCK(d);
+ domain_lock(d);
for_each_vcpu ( d, v )
if ( !test_bit(_VPF_down, &v->pause_flags) )
online_count++;
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
/* ... Shut down the domain if not. */
if ( online_count == 0 )
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 2376e941de..15f2cf57eb 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2188,7 +2188,7 @@ int do_mmuext_op(
goto out;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
for ( i = 0; i < count; i++ )
{
@@ -2437,7 +2437,7 @@ int do_mmuext_op(
process_deferred_ops();
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
perfc_add(num_mmuext_ops, i);
@@ -2492,7 +2492,7 @@ int do_mmu_update(
domain_mmap_cache_init(&mapcache);
- LOCK_BIGLOCK(d);
+ domain_lock(d);
for ( i = 0; i < count; i++ )
{
@@ -2664,7 +2664,7 @@ int do_mmu_update(
process_deferred_ops();
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
domain_mmap_cache_destroy(&mapcache);
@@ -2693,7 +2693,7 @@ static int create_grant_pte_mapping(
l1_pgentry_t ol1e;
struct domain *d = v->domain;
- ASSERT(spin_is_locked(&d->big_lock));
+ ASSERT(domain_is_locked(d));
adjust_guest_l1e(nl1e, d);
@@ -2816,7 +2816,7 @@ static int create_grant_va_mapping(
unsigned long gl1mfn;
int okay;
- ASSERT(spin_is_locked(&d->big_lock));
+ ASSERT(domain_is_locked(d));
adjust_guest_l1e(nl1e, d);
@@ -3014,7 +3014,7 @@ int do_update_va_mapping(unsigned long va, u64 val64,
if ( rc )
return rc;
- LOCK_BIGLOCK(d);
+ domain_lock(d);
pl1e = guest_map_l1e(v, va, &gl1mfn);
@@ -3027,7 +3027,7 @@ int do_update_va_mapping(unsigned long va, u64 val64,
process_deferred_ops();
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
switch ( flags & UVMF_FLUSHTYPE_MASK )
{
@@ -3172,12 +3172,12 @@ long do_set_gdt(XEN_GUEST_HANDLE(ulong) frame_list, unsigned int entries)
if ( copy_from_guest(frames, frame_list, nr_pages) )
return -EFAULT;
- LOCK_BIGLOCK(curr->domain);
+ domain_lock(curr->domain);
if ( (ret = set_gdt(curr, frames, entries)) == 0 )
flush_tlb_local();
- UNLOCK_BIGLOCK(curr->domain);
+ domain_unlock(curr->domain);
return ret;
}
@@ -3311,7 +3311,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
return -EINVAL;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
/* Remove previously mapped page if it was present. */
prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
@@ -3333,7 +3333,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
/* Map at new location. */
guest_physmap_add_page(d, xatp.gpfn, mfn);
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
rcu_unlock_domain(d);
@@ -3669,7 +3669,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
struct ptwr_emulate_ctxt ptwr_ctxt;
int rc;
- LOCK_BIGLOCK(d);
+ domain_lock(d);
/* Attempt to read the PTE that maps the VA being accessed. */
guest_get_eff_l1e(v, addr, &pte);
@@ -3694,12 +3694,12 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
if ( rc == X86EMUL_UNHANDLEABLE )
goto bail;
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
perfc_incr(ptwr_emulations);
return EXCRET_fault_fixed;
bail:
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
return 0;
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index c9f879b351..5e39c9b417 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1979,14 +1979,14 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
break;
case 3: /* Write CR3 */
- LOCK_BIGLOCK(v->domain);
+ domain_lock(v->domain);
if ( !is_pv_32on64_vcpu(v) )
rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
#ifdef CONFIG_COMPAT
else
rc = new_guest_cr3(gmfn_to_mfn(v->domain, compat_cr3_to_pfn(*reg)));
#endif
- UNLOCK_BIGLOCK(v->domain);
+ domain_unlock(v->domain);
if ( rc == 0 ) /* not okay */
goto fail;
break;
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index 256f7a5ac8..a1de1bab27 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -28,12 +28,12 @@ int compat_set_gdt(XEN_GUEST_HANDLE(uint) frame_list, unsigned int entries)
guest_handle_add_offset(frame_list, 1);
}
- LOCK_BIGLOCK(current->domain);
+ domain_lock(current->domain);
if ( (ret = set_gdt(current, frames, entries)) == 0 )
flush_tlb_local();
- UNLOCK_BIGLOCK(current->domain);
+ domain_unlock(current->domain);
return ret;
}
diff --git a/xen/common/compat/domain.c b/xen/common/compat/domain.c
index 002a8a8362..9e58cb145c 100644
--- a/xen/common/compat/domain.c
+++ b/xen/common/compat/domain.c
@@ -42,11 +42,11 @@ int compat_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
break;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = -EEXIST;
if ( !v->is_initialised )
rc = boot_vcpu(d, vcpuid, cmp_ctxt);
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
xfree(cmp_ctxt);
break;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 76b48f4296..c74fb07c0e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -80,7 +80,7 @@ struct domain *alloc_domain(domid_t domid)
}
atomic_set(&d->refcnt, 1);
- spin_lock_init(&d->big_lock);
+ spin_lock_init(&d->domain_lock);
spin_lock_init(&d->page_alloc_lock);
spin_lock_init(&d->shutdown_lock);
spin_lock_init(&d->hypercall_deadlock_mutex);
@@ -629,7 +629,7 @@ int vcpu_reset(struct vcpu *v)
int rc;
domain_pause(d);
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = arch_vcpu_reset(v);
if ( rc != 0 )
@@ -646,7 +646,7 @@ int vcpu_reset(struct vcpu *v)
clear_bit(_VPF_blocked, &v->pause_flags);
out:
- UNLOCK_BIGLOCK(v->domain);
+ domain_unlock(v->domain);
domain_unpause(d);
return rc;
@@ -678,11 +678,11 @@ long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
return -EFAULT;
}
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = -EEXIST;
if ( !v->is_initialised )
rc = boot_vcpu(d, vcpuid, ctxt);
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
xfree(ctxt);
break;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index d582713c7e..53662df865 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1445,7 +1445,7 @@ do_grant_table_op(
if ( count > 512 )
return -EINVAL;
- LOCK_BIGLOCK(d);
+ domain_lock(d);
rc = -EFAULT;
switch ( cmd )
@@ -1516,7 +1516,7 @@ do_grant_table_op(
}
out:
- UNLOCK_BIGLOCK(d);
+ domain_unlock(d);
return rc;
}
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index de036ceaf8..ff5241a532 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -138,8 +138,9 @@ struct vcpu
};
/* Per-domain lock can be recursively acquired in fault handlers. */
-#define LOCK_BIGLOCK(_d) spin_lock_recursive(&(_d)->big_lock)
-#define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock)
+#define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
+#define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
+#define domain_is_locked(d) spin_is_locked(&(d)->domain_lock)
struct domain
{
@@ -147,7 +148,7 @@ struct domain
shared_info_t *shared_info; /* shared data area */
- spinlock_t big_lock;
+ spinlock_t domain_lock;
spinlock_t page_alloc_lock; /* protects all the following fields */
struct list_head page_list; /* linked list, of size tot_pages */