aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--xen/arch/x86/hvm/vmsi.c14
-rw-r--r--xen/include/xen/rcupdate.h4
-rw-r--r--xen/include/xen/sched.h8
-rw-r--r--xen/xsm/flask/avc.c24
4 files changed, 27 insertions, 23 deletions
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index d9ec9fb0a4..31ee9b7cfe 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -162,6 +162,8 @@ struct msixtbl_entry
struct rcu_head rcu;
};
+static DEFINE_RCU_READ_LOCK(msixtbl_rcu_lock);
+
static struct msixtbl_entry *msixtbl_find_entry(
struct vcpu *v, unsigned long addr)
{
@@ -207,7 +209,7 @@ static int msixtbl_read(
void *virt;
int r = X86EMUL_UNHANDLEABLE;
- rcu_read_lock();
+ rcu_read_lock(&msixtbl_rcu_lock);
if ( len != 4 )
goto out;
@@ -225,7 +227,7 @@ static int msixtbl_read(
r = X86EMUL_OKAY;
out:
- rcu_read_unlock();
+ rcu_read_unlock(&msixtbl_rcu_lock);
return r;
}
@@ -238,7 +240,7 @@ static int msixtbl_write(struct vcpu *v, unsigned long address,
int nr_entry;
int r = X86EMUL_UNHANDLEABLE;
- rcu_read_lock();
+ rcu_read_lock(&msixtbl_rcu_lock);
if ( len != 4 )
goto out;
@@ -265,7 +267,7 @@ static int msixtbl_write(struct vcpu *v, unsigned long address,
r = X86EMUL_OKAY;
out:
- rcu_read_unlock();
+ rcu_read_unlock(&msixtbl_rcu_lock);
return r;
}
@@ -274,12 +276,12 @@ static int msixtbl_range(struct vcpu *v, unsigned long addr)
struct msixtbl_entry *entry;
void *virt;
- rcu_read_lock();
+ rcu_read_lock(&msixtbl_rcu_lock);
entry = msixtbl_find_entry(v, addr);
virt = msixtbl_addr_to_virt(entry, addr);
- rcu_read_unlock();
+ rcu_read_unlock(&msixtbl_rcu_lock);
return !!virt;
}
diff --git a/xen/include/xen/rcupdate.h b/xen/include/xen/rcupdate.h
index 285e574e28..68f98acfda 100644
--- a/xen/include/xen/rcupdate.h
+++ b/xen/include/xen/rcupdate.h
@@ -145,14 +145,14 @@ typedef struct _rcu_read_lock rcu_read_lock_t;
*
* It is illegal to block while in an RCU read-side critical section.
*/
-#define rcu_read_lock(x) do { } while (0)
+#define rcu_read_lock(x) ((void)(x))
/**
* rcu_read_unlock - marks the end of an RCU read-side critical section.
*
* See rcu_read_lock() for more information.
*/
-#define rcu_read_unlock(x) do { } while (0)
+#define rcu_read_unlock(x) ((void)(x))
/*
* So where is rcu_write_lock()? It does not exist, as there is no
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 6e4a89854a..ab92088fc1 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -344,6 +344,10 @@ struct domain_setup_info
unsigned long symtab_len;
};
+/* Protect updates/reads (resp.) of domain_list and domain_hash. */
+extern spinlock_t domlist_update_lock;
+extern rcu_read_lock_t domlist_read_lock;
+
extern struct vcpu *idle_vcpu[NR_CPUS];
#define IDLE_DOMAIN_ID (0x7FFFU)
#define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID)
@@ -533,10 +537,6 @@ unsigned long hypercall_create_continuation(
local_events_need_delivery() \
))
-/* Protect updates/reads (resp.) of domain_list and domain_hash. */
-extern spinlock_t domlist_update_lock;
-extern rcu_read_lock_t domlist_read_lock;
-
extern struct domain *domain_list;
/* Caller must hold the domlist_read_lock or domlist_update_lock. */
diff --git a/xen/xsm/flask/avc.c b/xen/xsm/flask/avc.c
index 57b1657d61..60efc62458 100644
--- a/xen/xsm/flask/avc.c
+++ b/xen/xsm/flask/avc.c
@@ -124,6 +124,8 @@ DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
static struct avc_cache avc_cache;
static struct avc_callback_node *avc_callbacks;
+static DEFINE_RCU_READ_LOCK(avc_rcu_lock);
+
static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
{
return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
@@ -255,7 +257,7 @@ int avc_get_hash_stats(char *buf, uint32_t size)
struct avc_node *node;
struct hlist_head *head;
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
slots_used = 0;
max_chain_len = 0;
@@ -275,7 +277,7 @@ int avc_get_hash_stats(char *buf, uint32_t size)
}
}
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
return snprintf(buf, size, "entries: %d\nbuckets used: %d/%d\n"
"longest chain: %d\n",
@@ -328,7 +330,7 @@ static inline int avc_reclaim_node(void)
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flags);
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
hlist_for_each_entry(node, next, head, list)
{
avc_node_delete(node);
@@ -336,12 +338,12 @@ static inline int avc_reclaim_node(void)
ecx++;
if ( ecx >= AVC_CACHE_RECLAIM )
{
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
spin_unlock_irqrestore(lock, flags);
goto out;
}
}
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
spin_unlock_irqrestore(lock, flags);
}
out:
@@ -727,10 +729,10 @@ int avc_ss_reset(u32 seqno)
lock = &avc_cache.slots_lock[i];
spin_lock_irqsave(lock, flag);
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
hlist_for_each_entry(node, next, head, list)
avc_node_delete(node);
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
spin_unlock_irqrestore(lock, flag);
}
@@ -780,12 +782,12 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
BUG_ON(!requested);
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
node = avc_lookup(ssid, tsid, tclass);
if ( !node )
{
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
if ( in_avd )
avd = in_avd;
@@ -795,7 +797,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
rc = security_compute_av(ssid,tsid,tclass,requested,avd);
if ( rc )
goto out;
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
node = avc_insert(ssid,tsid,tclass,avd);
} else {
if ( in_avd )
@@ -814,7 +816,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
rc = -EACCES;
}
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
out:
return rc;
}