aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2012-10-25 15:23:17 +0200
committerKeir Fraser <keir@xen.org>2012-10-25 15:23:17 +0200
commitb71cf4adf804df4a7c039bbdb7f0c45346add4ce (patch)
tree39f2dee236c6fba17c12826c290fef8d9923b7cc
parentd0d01342ab01090edb43f65a4c8e798de1fd30c7 (diff)
downloadxen-b71cf4adf804df4a7c039bbdb7f0c45346add4ce.tar.gz
xen-b71cf4adf804df4a7c039bbdb7f0c45346add4ce.tar.bz2
xen-b71cf4adf804df4a7c039bbdb7f0c45346add4ce.zip
More efficient TLB-flush filtering in alloc_heap_pages().
Rather than per-cpu filtering for every page in a super-page allocation, simply remember the most recent TLB timestamp across all allocated pages, and filter on that, just once, at the end of the function. For large-CPU systems, doing 2MB allocations during domain creation, this cuts down the domain creation time *massively*. TODO: It may make sense to move the filtering out into some callers, such as memory.c:populate_physmap() and memory.c:increase_reservation(), so that the filtering can be moved outside their loops, too. Signed-off-by: Keir Fraser <keir@xen.org> xen-unstable changeset: 26056:177fdda0be56 xen-unstable date: Mon Oct 15 15:38:11 UTC 2012
-rw-r--r--xen/common/page_alloc.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 1d7359db29..95bd7949f7 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -414,9 +414,10 @@ static struct page_info *alloc_heap_pages(
unsigned int first_node, i, j, zone = 0, nodemask_retry = 0;
unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1);
unsigned long request = 1UL << order;
- cpumask_t mask;
struct page_info *pg;
nodemask_t nodemask = (d != NULL ) ? d->node_affinity : node_online_map;
+ bool_t need_tlbflush = 0;
+ uint32_t tlbflush_timestamp = 0;
if ( node == NUMA_NO_NODE )
{
@@ -530,22 +531,19 @@ static struct page_info *alloc_heap_pages(
if ( d != NULL )
d->last_alloc_node = node;
- cpumask_clear(&mask);
-
for ( i = 0; i < (1 << order); i++ )
{
/* Reference count must continuously be zero for free pages. */
BUG_ON(pg[i].count_info != PGC_state_free);
pg[i].count_info = PGC_state_inuse;
- if ( pg[i].u.free.need_tlbflush )
+ if ( pg[i].u.free.need_tlbflush &&
+ (pg[i].tlbflush_timestamp <= tlbflush_current_time()) &&
+ (!need_tlbflush ||
+ (pg[i].tlbflush_timestamp > tlbflush_timestamp)) )
{
- /* Add in extra CPUs that need flushing because of this page. */
- static cpumask_t extra_cpus_mask;
-
- cpumask_andnot(&extra_cpus_mask, &cpu_online_map, &mask);
- tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
- cpumask_or(&mask, &mask, &extra_cpus_mask);
+ need_tlbflush = 1;
+ tlbflush_timestamp = pg[i].tlbflush_timestamp;
}
/* Initialise fields which have other uses for free pages. */
@@ -555,10 +553,15 @@ static struct page_info *alloc_heap_pages(
spin_unlock(&heap_lock);
- if ( unlikely(!cpumask_empty(&mask)) )
+ if ( need_tlbflush )
{
- perfc_incr(need_flush_tlb_flush);
- flush_tlb_mask(&mask);
+ cpumask_t mask = cpu_online_map;
+ tlbflush_filter(mask, tlbflush_timestamp);
+ if ( !cpumask_empty(&mask) )
+ {
+ perfc_incr(need_flush_tlb_flush);
+ flush_tlb_mask(&mask);
+ }
}
return pg;