aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/domain_page.c
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-02-28 11:08:13 +0100
committerJan Beulich <jbeulich@suse.com>2013-02-28 11:08:13 +0100
commit703ac3abcfc5f649c038070867ee12c67f730548 (patch)
tree348bce6c9a99e9b6cf9f1c933f084345e0798d10 /xen/arch/x86/domain_page.c
parentcf0c29d1a99f17fe9e501e4e09d9d1e3eced6f98 (diff)
downloadxen-703ac3abcfc5f649c038070867ee12c67f730548.tar.gz
xen-703ac3abcfc5f649c038070867ee12c67f730548.tar.bz2
xen-703ac3abcfc5f649c038070867ee12c67f730548.zip
x86: introduce create_perdomain_mapping()
... as well as free_perdomain_mappings(), and use them to carry out the existing per-domain mapping setup/teardown. This at once makes the setup of the first sub-range PV domain specific (with idle domains also excluded), as the GDT/LDT mapping area is needed only for those. Also fix an improperly scaled BUILD_BUG_ON() expression in mapcache_domain_init(). Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Keir Fraser <keir@xen.org>
Diffstat (limited to 'xen/arch/x86/domain_page.c')
-rw-r--r--xen/arch/x86/domain_page.c129
1 files changed, 24 insertions, 105 deletions
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index acc9486981..989ec2926d 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -243,10 +243,7 @@ void copy_domain_page(unsigned long dmfn, unsigned long smfn)
int mapcache_domain_init(struct domain *d)
{
struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
- l3_pgentry_t *l3tab;
- l2_pgentry_t *l2tab;
- unsigned int i, bitmap_pages, memf = MEMF_node(domain_to_node(d));
- unsigned long *end;
+ unsigned int bitmap_pages;
if ( is_hvm_domain(d) || is_idle_domain(d) )
return 0;
@@ -256,48 +253,23 @@ int mapcache_domain_init(struct domain *d)
return 0;
#endif
- dcache->l1tab = xzalloc_array(l1_pgentry_t *, MAPCACHE_L2_ENTRIES + 1);
- d->arch.perdomain_l2_pg[MAPCACHE_SLOT] = alloc_domheap_page(NULL, memf);
- if ( !dcache->l1tab || !d->arch.perdomain_l2_pg[MAPCACHE_SLOT] )
+ dcache->l1tab = xzalloc_array(l1_pgentry_t *, MAPCACHE_L2_ENTRIES);
+ if ( !dcache->l1tab )
return -ENOMEM;
- clear_domain_page(page_to_mfn(d->arch.perdomain_l2_pg[MAPCACHE_SLOT]));
- l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
- l3tab[l3_table_offset(MAPCACHE_VIRT_START)] =
- l3e_from_page(d->arch.perdomain_l2_pg[MAPCACHE_SLOT],
- __PAGE_HYPERVISOR);
- unmap_domain_page(l3tab);
-
- l2tab = __map_domain_page(d->arch.perdomain_l2_pg[MAPCACHE_SLOT]);
-
- BUILD_BUG_ON(MAPCACHE_VIRT_END + 3 +
- 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long)) >
+ BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 +
+ 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) >
MAPCACHE_VIRT_START + (PERDOMAIN_SLOT_MBYTES << 20));
bitmap_pages = PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long));
dcache->inuse = (void *)MAPCACHE_VIRT_END + PAGE_SIZE;
dcache->garbage = dcache->inuse +
(bitmap_pages + 1) * PAGE_SIZE / sizeof(long);
- end = dcache->garbage + bitmap_pages * PAGE_SIZE / sizeof(long);
-
- for ( i = l2_table_offset((unsigned long)dcache->inuse);
- i <= l2_table_offset((unsigned long)(end - 1)); ++i )
- {
- ASSERT(i <= MAPCACHE_L2_ENTRIES);
- dcache->l1tab[i] = alloc_xenheap_pages(0, memf);
- if ( !dcache->l1tab[i] )
- {
- unmap_domain_page(l2tab);
- return -ENOMEM;
- }
- clear_page(dcache->l1tab[i]);
- l2tab[i] = l2e_from_paddr(__pa(dcache->l1tab[i]), __PAGE_HYPERVISOR);
- }
-
- unmap_domain_page(l2tab);
spin_lock_init(&dcache->lock);
- return 0;
+ return create_perdomain_mapping(d, (unsigned long)dcache->inuse,
+ 2 * bitmap_pages + 1,
+ NIL(l1_pgentry_t *), NULL);
}
void mapcache_domain_exit(struct domain *d)
@@ -307,94 +279,41 @@ void mapcache_domain_exit(struct domain *d)
if ( is_hvm_domain(d) )
return;
- if ( dcache->l1tab )
- {
- unsigned long i;
-
- for ( i = (unsigned long)dcache->inuse; ; i += PAGE_SIZE )
- {
- l1_pgentry_t *pl1e;
-
- if ( l2_table_offset(i) > MAPCACHE_L2_ENTRIES ||
- !dcache->l1tab[l2_table_offset(i)] )
- break;
-
- pl1e = &dcache->l1tab[l2_table_offset(i)][l1_table_offset(i)];
- if ( l1e_get_flags(*pl1e) )
- free_domheap_page(l1e_get_page(*pl1e));
- }
-
- for ( i = 0; i < MAPCACHE_L2_ENTRIES + 1; ++i )
- free_xenheap_page(dcache->l1tab[i]);
-
- xfree(dcache->l1tab);
- }
+ xfree(dcache->l1tab);
}
int mapcache_vcpu_init(struct vcpu *v)
{
struct domain *d = v->domain;
struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
- l2_pgentry_t *l2tab;
unsigned long i;
- unsigned int memf = MEMF_node(vcpu_to_node(v));
+ unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
+ unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
if ( is_hvm_vcpu(v) || !dcache->l1tab )
return 0;
- l2tab = __map_domain_page(d->arch.perdomain_l2_pg[MAPCACHE_SLOT]);
-
- while ( dcache->entries < d->max_vcpus * MAPCACHE_VCPU_ENTRIES )
+ if ( ents > dcache->entries )
{
- unsigned int ents = dcache->entries + MAPCACHE_VCPU_ENTRIES;
- l1_pgentry_t *pl1e;
-
/* Populate page tables. */
- if ( !dcache->l1tab[i = mapcache_l2_entry(ents - 1)] )
- {
- dcache->l1tab[i] = alloc_xenheap_pages(0, memf);
- if ( !dcache->l1tab[i] )
- {
- unmap_domain_page(l2tab);
- return -ENOMEM;
- }
- clear_page(dcache->l1tab[i]);
- l2tab[i] = l2e_from_paddr(__pa(dcache->l1tab[i]),
- __PAGE_HYPERVISOR);
- }
+ int rc = create_perdomain_mapping(d, MAPCACHE_VIRT_START,
+ d->max_vcpus * MAPCACHE_VCPU_ENTRIES,
+ dcache->l1tab, NULL);
/* Populate bit maps. */
- i = (unsigned long)(dcache->inuse + BITS_TO_LONGS(ents));
- pl1e = &dcache->l1tab[l2_table_offset(i)][l1_table_offset(i)];
- if ( !l1e_get_flags(*pl1e) )
- {
- struct page_info *pg = alloc_domheap_page(NULL, memf);
-
- if ( pg )
- {
- clear_domain_page(page_to_mfn(pg));
- *pl1e = l1e_from_page(pg, __PAGE_HYPERVISOR);
- pg = alloc_domheap_page(NULL, memf);
- }
- if ( !pg )
- {
- unmap_domain_page(l2tab);
- return -ENOMEM;
- }
-
- i = (unsigned long)(dcache->garbage + BITS_TO_LONGS(ents));
- pl1e = &dcache->l1tab[l2_table_offset(i)][l1_table_offset(i)];
- ASSERT(!l1e_get_flags(*pl1e));
-
- clear_domain_page(page_to_mfn(pg));
- *pl1e = l1e_from_page(pg, __PAGE_HYPERVISOR);
- }
+ if ( !rc )
+ rc = create_perdomain_mapping(d, (unsigned long)dcache->inuse,
+ nr, NULL, NIL(struct page_info *));
+ if ( !rc )
+ rc = create_perdomain_mapping(d, (unsigned long)dcache->garbage,
+ nr, NULL, NIL(struct page_info *));
+
+ if ( rc )
+ return rc;
dcache->entries = ents;
}
- unmap_domain_page(l2tab);
-
/* Mark all maphash entries as not in use. */
BUILD_BUG_ON(MAPHASHENT_NOTINUSE < MAPCACHE_ENTRIES);
for ( i = 0; i < MAPHASH_ENTRIES; i++ )