aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@suse.com>2012-05-30 09:22:17 +0100
committerJan Beulich <JBeulich@suse.com>2012-05-30 09:22:17 +0100
commite26560a4b056dad6d85ffd9ebfad9565f210a9cc (patch)
treef5577e0d7730bb6cf4a2ff0d908504fab848127c
parent346e974dcabe907927d6169c5db936ffec19358b (diff)
downloadxen-e26560a4b056dad6d85ffd9ebfad9565f210a9cc.tar.gz
xen-e26560a4b056dad6d85ffd9ebfad9565f210a9cc.tar.bz2
xen-e26560a4b056dad6d85ffd9ebfad9565f210a9cc.zip
gnttab: don't use domain lock for serialization
Instead use the affected domain's grant table lock, at once reducing the scopes during which locks are being held and hence allowing significantly better parallelism. Signed-off-by: Jan Beulich <jbeulich@suse.com> Tested-by: Andrew Thomas <andrew.thomas@oracle.com> Committed-by: Keir Fraser <keir@xen.org>
-rw-r--r--xen/arch/x86/mm.c4
-rw-r--r--xen/common/grant_table.c131
2 files changed, 79 insertions, 56 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index ac414d1b7c..876e1ef2c7 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3727,8 +3727,6 @@ static int create_grant_pte_mapping(
l1_pgentry_t ol1e;
struct domain *d = v->domain;
- ASSERT(domain_is_locked(d));
-
adjust_guest_l1e(nl1e, d);
gmfn = pte_addr >> PAGE_SHIFT;
@@ -3855,8 +3853,6 @@ static int create_grant_va_mapping(
struct page_info *l1pg;
int okay;
- ASSERT(domain_is_locked(d));
-
adjust_guest_l1e(nl1e, d);
pl1e = guest_map_l1e(v, va, &gl1mfn);
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 692828464c..974d4f482c 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -169,6 +169,30 @@ static int __get_paged_frame(unsigned long gfn, unsigned long *frame, struct pag
return rc;
}
+static inline void
+double_gt_lock(struct grant_table *lgt, struct grant_table *rgt)
+{
+ if ( lgt < rgt )
+ {
+ spin_lock(&lgt->lock);
+ spin_lock(&rgt->lock);
+ }
+ else
+ {
+ if ( lgt != rgt )
+ spin_lock(&rgt->lock);
+ spin_lock(&lgt->lock);
+ }
+}
+
+static inline void
+double_gt_unlock(struct grant_table *lgt, struct grant_table *rgt)
+{
+ spin_unlock(&lgt->lock);
+ if ( lgt != rgt )
+ spin_unlock(&rgt->lock);
+}
+
static inline int
__get_maptrack_handle(
struct grant_table *t)
@@ -184,8 +208,10 @@ static inline void
put_maptrack_handle(
struct grant_table *t, int handle)
{
+ spin_lock(&t->lock);
maptrack_entry(t, handle).ref = t->maptrack_head;
t->maptrack_head = handle;
+ spin_unlock(&t->lock);
}
static inline int
@@ -197,46 +223,35 @@ get_maptrack_handle(
struct grant_mapping *new_mt;
unsigned int new_mt_limit, nr_frames;
- if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
+ spin_lock(&lgt->lock);
+
+ while ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
{
- spin_lock(&lgt->lock);
+ nr_frames = nr_maptrack_frames(lgt);
+ if ( nr_frames >= max_nr_maptrack_frames() )
+ break;
- if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
- {
- nr_frames = nr_maptrack_frames(lgt);
- if ( nr_frames >= max_nr_maptrack_frames() )
- {
- spin_unlock(&lgt->lock);
- return -1;
- }
+ new_mt = alloc_xenheap_page();
+ if ( !new_mt )
+ break;
- new_mt = alloc_xenheap_page();
- if ( new_mt == NULL )
- {
- spin_unlock(&lgt->lock);
- return -1;
- }
+ clear_page(new_mt);
- clear_page(new_mt);
+ new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
- new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;
+ for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
+ new_mt[i % MAPTRACK_PER_PAGE].ref = i + 1;
- for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
- {
- new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
- new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
- }
+ lgt->maptrack[nr_frames] = new_mt;
+ smp_wmb();
+ lgt->maptrack_limit = new_mt_limit;
- lgt->maptrack[nr_frames] = new_mt;
- lgt->maptrack_limit = new_mt_limit;
+ gdprintk(XENLOG_INFO, "Increased maptrack size to %u frames\n",
+ nr_frames + 1);
+ }
- gdprintk(XENLOG_INFO,
- "Increased maptrack size to %u frames.\n", nr_frames + 1);
- handle = __get_maptrack_handle(lgt);
- }
+ spin_unlock(&lgt->lock);
- spin_unlock(&lgt->lock);
- }
return handle;
}
@@ -425,25 +440,23 @@ static int _set_status(unsigned gt_version,
}
static void mapcount(
- struct domain *ld, unsigned long mfn,
+ struct domain *ld, struct domain *rd, unsigned long mfn,
unsigned int *wrc, unsigned int *rdc)
{
struct grant_table *gt = ld->grant_table;
struct grant_mapping *map;
grant_handle_t handle;
- struct domain *rd;
*wrc = *rdc = 0;
for ( handle = 0; handle < gt->maptrack_limit; handle++ )
{
map = &maptrack_entry(gt, handle);
- if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) )
+ if ( !(map->flags & (GNTMAP_device_map|GNTMAP_host_map)) ||
+ map->domid != rd->domain_id )
continue;
- rd = rcu_lock_domain_by_id(map->domid);
if ( active_entry(rd->grant_table, map->ref).frame == mfn )
(map->flags & GNTMAP_readonly) ? (*rdc)++ : (*wrc)++;
- rcu_unlock_domain(rd);
}
}
@@ -662,6 +675,8 @@ __gnttab_map_grant_ref(
goto undo_out;
}
+ double_gt_lock(ld->grant_table, rd->grant_table);
+
if ( !is_hvm_domain(ld) && need_iommu(ld) )
{
unsigned int wrc, rdc;
@@ -670,7 +685,7 @@ __gnttab_map_grant_ref(
BUG_ON(paging_mode_translate(ld));
/* We're not translated, so we know that gmfns and mfns are
the same things, so the IOMMU entry is always 1-to-1. */
- mapcount(ld, frame, &wrc, &rdc);
+ mapcount(ld, rd, frame, &wrc, &rdc);
if ( (act_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) &&
!(old_pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) )
{
@@ -685,6 +700,7 @@ __gnttab_map_grant_ref(
}
if ( err )
{
+ double_gt_unlock(ld->grant_table, rd->grant_table);
rc = GNTST_general_error;
goto undo_out;
}
@@ -697,6 +713,8 @@ __gnttab_map_grant_ref(
mt->ref = op->ref;
mt->flags = op->flags;
+ double_gt_unlock(ld->grant_table, rd->grant_table);
+
op->dev_bus_addr = (u64)frame << PAGE_SHIFT;
op->handle = handle;
op->status = GNTST_okay;
@@ -787,18 +805,20 @@ __gnttab_unmap_common(
}
op->map = &maptrack_entry(ld->grant_table, op->handle);
+ spin_lock(&ld->grant_table->lock);
if ( unlikely(!op->map->flags) )
{
+ spin_unlock(&ld->grant_table->lock);
gdprintk(XENLOG_INFO, "Zero flags for handle (%d).\n", op->handle);
op->status = GNTST_bad_handle;
return;
}
- dom = op->map->domid;
- op->flags = op->map->flags;
+ dom = op->map->domid;
+ spin_unlock(&ld->grant_table->lock);
- if ( unlikely((op->rd = rd = rcu_lock_domain_by_id(dom)) == NULL) )
+ if ( unlikely((rd = rcu_lock_domain_by_id(dom)) == NULL) )
{
/* This can happen when a grant is implicitly unmapped. */
gdprintk(XENLOG_INFO, "Could not find domain %d\n", dom);
@@ -816,8 +836,17 @@ __gnttab_unmap_common(
TRACE_1D(TRC_MEM_PAGE_GRANT_UNMAP, dom);
- spin_lock(&rd->grant_table->lock);
+ double_gt_lock(ld->grant_table, rd->grant_table);
+ op->flags = op->map->flags;
+ if ( unlikely(!op->flags) || unlikely(op->map->domid != dom) )
+ {
+ gdprintk(XENLOG_WARNING, "Unstable handle %u\n", op->handle);
+ rc = GNTST_bad_handle;
+ goto unmap_out;
+ }
+
+ op->rd = rd;
act = &active_entry(rd->grant_table, op->map->ref);
if ( op->frame == 0 )
@@ -861,7 +890,7 @@ __gnttab_unmap_common(
unsigned int wrc, rdc;
int err = 0;
BUG_ON(paging_mode_translate(ld));
- mapcount(ld, op->frame, &wrc, &rdc);
+ mapcount(ld, rd, op->frame, &wrc, &rdc);
if ( (wrc + rdc) == 0 )
err = iommu_unmap_page(ld, op->frame);
else if ( wrc == 0 )
@@ -878,8 +907,8 @@ __gnttab_unmap_common(
gnttab_mark_dirty(rd, op->frame);
unmap_out:
+ double_gt_unlock(ld->grant_table, rd->grant_table);
op->status = rc;
- spin_unlock(&rd->grant_table->lock);
rcu_unlock_domain(rd);
}
@@ -891,6 +920,7 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
grant_entry_header_t *sha;
struct page_info *pg;
uint16_t *status;
+ bool_t put_handle = 0;
rd = op->rd;
@@ -962,10 +992,7 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
}
if ( (op->map->flags & (GNTMAP_device_map|GNTMAP_host_map)) == 0 )
- {
- op->map->flags = 0;
- put_maptrack_handle(ld->grant_table, op->handle);
- }
+ put_handle = 1;
if ( ((act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0) &&
!(op->flags & GNTMAP_readonly) )
@@ -976,6 +1003,11 @@ __gnttab_unmap_common_complete(struct gnttab_unmap_common *op)
unmap_out:
spin_unlock(&rd->grant_table->lock);
+ if ( put_handle )
+ {
+ op->map->flags = 0;
+ put_maptrack_handle(ld->grant_table, op->handle);
+ }
rcu_unlock_domain(rd);
}
@@ -2361,13 +2393,10 @@ do_grant_table_op(
unsigned int cmd, XEN_GUEST_HANDLE(void) uop, unsigned int count)
{
long rc;
- struct domain *d = current->domain;
if ( (int)count < 0 )
return -EINVAL;
- domain_lock(d);
-
rc = -EFAULT;
switch ( cmd )
{
@@ -2494,8 +2523,6 @@ do_grant_table_op(
}
out:
- domain_unlock(d);
-
if ( rc > 0 )
{
ASSERT(rc < count);