aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-08-14 17:32:30 +0000
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>2005-08-14 17:32:30 +0000
commitf3df19f657197d1f57b8175b6f858e3b8b468a9e (patch)
tree3218c63d5f389edb2096e133cfb8b1ffc6a97609
parentc0586bf3b0fc4c0819c482a6dd460ba93c23146e (diff)
downloadxen-f3df19f657197d1f57b8175b6f858e3b8b468a9e.tar.gz
xen-f3df19f657197d1f57b8175b6f858e3b8b468a9e.tar.bz2
xen-f3df19f657197d1f57b8175b6f858e3b8b468a9e.zip
Some arch/x86 gnttab cleanups in Xen.
Signed-off-by: Keir Fraser <keir@xensource.com>
-rw-r--r--xen/arch/x86/mm.c286
-rw-r--r--xen/common/grant_table.c28
-rw-r--r--xen/include/asm-x86/mm.h19
-rw-r--r--xen/include/public/grant_table.h2
-rw-r--r--xen/include/xen/grant_table.h17
5 files changed, 141 insertions, 211 deletions
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9b52c80e64..ef7b05cc96 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2269,36 +2269,20 @@ int do_mmu_update(
}
-int update_grant_va_mapping_pte(unsigned long pte_addr,
- l1_pgentry_t _nl1e,
- struct domain *d,
- struct vcpu *v)
-{
- /* Caller must:
- * . own d's BIGLOCK
- * . already have 'get_page' correctly on the to-be-installed nl1e
- * . be responsible for flushing the TLB
- * . check PTE being installed isn't DISALLOWED
- */
-
+int update_grant_pte_mapping(
+ unsigned long pte_addr, l1_pgentry_t _nl1e,
+ struct domain *d, struct vcpu *v)
+{
int rc = GNTST_okay;
void *va;
unsigned long gpfn, mfn;
struct pfn_info *page;
- struct domain_mmap_cache mapcache, sh_mapcache;
u32 type_info;
- l1_pgentry_t ol1e;
-
- /* Grant tables and shadow mode don't currently work together. */
- ASSERT( !shadow_mode_refcounts(d) );
-
- /* There shouldn't be any strange bits set on the PTE. */
- ASSERT( (l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
-
- cleanup_writable_pagetable(d);
+ l1_pgentry_t ol1e;
- domain_mmap_cache_init(&mapcache);
- domain_mmap_cache_init(&sh_mapcache);
+ ASSERT(spin_is_locked(&d->big_lock));
+ ASSERT(!shadow_mode_refcounts(d));
+ ASSERT((l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
gpfn = pte_addr >> PAGE_SHIFT;
mfn = __gpfn_to_mfn(d, gpfn);
@@ -2306,88 +2290,61 @@ int update_grant_va_mapping_pte(unsigned long pte_addr,
if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
{
MEM_LOG("Could not get page for normal update");
- rc = -EINVAL;
- goto failed_norefs;
+ return GNTST_general_error;
}
- va = map_domain_page_with_cache(mfn, &mapcache);
- va = (void *)((unsigned long)va +
- (unsigned long)(pte_addr & ~PAGE_MASK));
- page = &frame_table[mfn];
+ va = map_domain_page(mfn);
+ va = (void *)((unsigned long)va + (pte_addr & ~PAGE_MASK));
+ page = pfn_to_page(mfn);
type_info = page->u.inuse.type_info;
- if ( (type_info & PGT_type_mask) != PGT_l1_page_table) {
+ if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
+ !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) )
+ {
DPRINTK("Grant map attempted to update a non-L1 page\n");
- rc = -EINVAL;
+ rc = GNTST_general_error;
goto failed;
}
- if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) )
+ if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) ||
+ !update_l1e(va, ol1e, _nl1e) )
{
+ put_page_type(page);
+ rc = GNTST_general_error;
+ goto failed;
+ }
- if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va,
- sizeof(ol1e)) != 0) ) {
- put_page_type(page);
- rc = -EINVAL;
- goto failed;
- }
+ put_page_from_l1e(ol1e, d);
- if ( update_l1e(va, ol1e, _nl1e) )
- {
- put_page_from_l1e(ol1e, d);
+ rc = (l1e_get_flags(ol1e) & _PAGE_PRESENT) ? GNTST_flush_all : GNTST_okay;
- if ( l1e_get_flags(ol1e) & _PAGE_PRESENT )
- rc = GNTST_flush_all; /* We don't know what vaddr to flush */
- else
- rc = GNTST_okay; /* Caller need not invalidate TLB entry */
-
- if ( unlikely(shadow_mode_enabled(d)) )
- shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache);
- }
- else
- rc = -EINVAL;
-
- put_page_type(page);
+ if ( unlikely(shadow_mode_enabled(d)) )
+ {
+ struct domain_mmap_cache sh_mapcache;
+ domain_mmap_cache_init(&sh_mapcache);
+ shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache);
+ domain_mmap_cache_destroy(&sh_mapcache);
}
+ put_page_type(page);
+
failed:
- unmap_domain_page_with_cache(va, &mapcache);
+ unmap_domain_page(va);
put_page(page);
-
- failed_norefs:
- domain_mmap_cache_destroy(&mapcache);
- domain_mmap_cache_destroy(&sh_mapcache);
-
return rc;
}
-
-
-int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame,
- struct domain *d)
+int clear_grant_pte_mapping(
+ unsigned long addr, unsigned long frame, struct domain *d)
{
- /* Caller must:
- * . own d's BIGLOCK
- * . already have 'get_page' correctly on the to-be-installed nl1e
- * . be responsible for flushing the TLB
- * . check PTE being installed isn't DISALLOWED
- */
-
int rc = GNTST_okay;
void *va;
unsigned long gpfn, mfn;
struct pfn_info *page;
- struct domain_mmap_cache mapcache, sh_mapcache;
u32 type_info;
- l1_pgentry_t ol1e;
-
- /* Grant tables and shadow mode don't work together. */
- ASSERT( !shadow_mode_refcounts(d) );
-
- cleanup_writable_pagetable(d);
+ l1_pgentry_t ol1e;
- domain_mmap_cache_init(&mapcache);
- domain_mmap_cache_init(&sh_mapcache);
+ ASSERT(!shadow_mode_refcounts(d));
gpfn = addr >> PAGE_SHIFT;
mfn = __gpfn_to_mfn(d, gpfn);
@@ -2395,119 +2352,91 @@ int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame,
if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
{
MEM_LOG("Could not get page for normal update");
- rc = -EINVAL;
- goto failed_norefs;
+ return GNTST_general_error;
}
- va = map_domain_page_with_cache(mfn, &mapcache);
- va = (void *)((unsigned long)va +
- (unsigned long)(addr & ~PAGE_MASK));
- page = &frame_table[mfn];
+ va = map_domain_page(mfn);
+ va = (void *)((unsigned long)va + (addr & ~PAGE_MASK));
+ page = pfn_to_page(mfn);
type_info = page->u.inuse.type_info;
- if ( (type_info & PGT_type_mask) != PGT_l1_page_table) {
+ if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
+ !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) )
+ {
DPRINTK("Grant map attempted to update a non-L1 page\n");
- rc = -EINVAL;
+ rc = GNTST_general_error;
goto failed;
}
- if ( likely(get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask))) )
+ if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) )
{
- if ( unlikely(__copy_from_user(&ol1e, (l1_pgentry_t *)va,
- sizeof(ol1e)) != 0) )
- {
- rc = -EINVAL;
- put_page_type(page);
- goto failed;
- }
+ put_page_type(page);
+ rc = GNTST_general_error;
+ goto failed;
+ }
- /*
- * Check that the virtual address supplied is actually mapped to frame.
- */
- if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame ))
- {
- DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
- (unsigned long)l1e_get_intpte(ol1e), addr, frame);
- rc = -EINVAL;
- put_page_type(page);
- goto failed;
- }
+ /* Check that the virtual address supplied is actually mapped to frame. */
+ if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) )
+ {
+ DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
+ (unsigned long)l1e_get_intpte(ol1e), addr, frame);
+ put_page_type(page);
+ rc = GNTST_general_error;
+ goto failed;
+ }
- /* Delete pagetable entry. */
- if ( unlikely(__put_user(0, (unsigned long *)va)))
- {
- DPRINTK("Cannot delete PTE entry at %p.\n", va);
- rc = -EINVAL;
- } else {
- if ( unlikely(shadow_mode_enabled(d)) )
- shadow_l1_normal_pt_update(d, addr, l1e_empty(),
- &sh_mapcache);
- }
+ /* Delete pagetable entry. */
+ if ( unlikely(__put_user(0, (unsigned long *)va)))
+ {
+ DPRINTK("Cannot delete PTE entry at %p.\n", va);
put_page_type(page);
+ rc = GNTST_general_error;
+ goto failed;
}
- failed:
- unmap_domain_page_with_cache(va, &mapcache);
- put_page(page);
+ if ( unlikely(shadow_mode_enabled(d)) )
+ {
+ struct domain_mmap_cache sh_mapcache;
+ domain_mmap_cache_init(&sh_mapcache);
+ shadow_l1_normal_pt_update(d, addr, l1e_empty(), &sh_mapcache);
+ domain_mmap_cache_destroy(&sh_mapcache);
+ }
- failed_norefs:
- domain_mmap_cache_destroy(&mapcache);
- domain_mmap_cache_destroy(&sh_mapcache);
+ put_page_type(page);
+ failed:
+ unmap_domain_page(va);
+ put_page(page);
return rc;
}
-
-/* This function assumes the caller is holding the domain's BIGLOCK
- * and is running in a shadow mode
- */
-int update_grant_va_mapping(unsigned long va,
- l1_pgentry_t _nl1e,
- struct domain *d,
- struct vcpu *v)
-{
- /* Caller must:
- * . own d's BIGLOCK
- * . already have 'get_page' correctly on the to-be-installed nl1e
- * . be responsible for flushing the TLB
- * . check PTE being installed isn't DISALLOWED
- */
-
- int rc = GNTST_okay;
- l1_pgentry_t *pl1e;
- l1_pgentry_t ol1e;
+int update_grant_va_mapping(
+ unsigned long va, l1_pgentry_t _nl1e, struct domain *d, struct vcpu *v)
+{
+ int rc = GNTST_okay;
+ l1_pgentry_t *pl1e, ol1e;
- cleanup_writable_pagetable(d);
+ ASSERT(spin_is_locked(&d->big_lock));
+ ASSERT(!shadow_mode_refcounts(d));
+ ASSERT((l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
- // This is actually overkill - we don't need to sync the L1 itself,
- // just everything involved in getting to this L1 (i.e. we need
- // linear_pg_table[l1_linear_offset(va)] to be in sync)...
- //
+ /*
+ * This is actually overkill - we don't need to sync the L1 itself,
+ * just everything involved in getting to this L1 (i.e. we need
+ * linear_pg_table[l1_linear_offset(va)] to be in sync)...
+ */
__shadow_sync_va(v, va);
pl1e = &linear_pg_table[l1_linear_offset(va)];
- if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
- rc = -EINVAL;
- else if ( !shadow_mode_refcounts(d) )
- {
- if ( update_l1e(pl1e, ol1e, _nl1e) )
- {
- put_page_from_l1e(ol1e, d);
- if ( l1e_get_flags(ol1e) & _PAGE_PRESENT )
- rc = GNTST_flush_one;
- else
- rc = GNTST_okay; /* Caller need not invalidate TLB entry */
- }
- else
- rc = -EINVAL;
- }
- else
- {
- printk("grant tables and shadow mode currently don't work together\n");
- BUG();
- }
+ if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ||
+ !update_l1e(pl1e, ol1e, _nl1e) )
+ return GNTST_general_error;
+
+ put_page_from_l1e(ol1e, d);
+
+ rc = (l1e_get_flags(ol1e) & _PAGE_PRESENT) ? GNTST_flush_one : GNTST_okay;
if ( unlikely(shadow_mode_enabled(d)) )
shadow_do_update_va_mapping(va, _nl1e, v);
@@ -2517,15 +2446,15 @@ int update_grant_va_mapping(unsigned long va,
int clear_grant_va_mapping(unsigned long addr, unsigned long frame)
{
- l1_pgentry_t *pl1e;
- unsigned long _ol1e;
+ l1_pgentry_t *pl1e;
+ unsigned long _ol1e;
pl1e = &linear_pg_table[l1_linear_offset(addr)];
if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
{
DPRINTK("Could not find PTE entry for address %lx\n", addr);
- return -EINVAL;
+ return GNTST_general_error;
}
/*
@@ -2536,14 +2465,14 @@ int clear_grant_va_mapping(unsigned long addr, unsigned long frame)
{
DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
_ol1e, addr, frame);
- return -EINVAL;
+ return GNTST_general_error;
}
/* Delete pagetable entry. */
if ( unlikely(__put_user(0, (unsigned long *)pl1e)))
{
DPRINTK("Cannot delete PTE entry at %p.\n", (unsigned long *)pl1e);
- return -EINVAL;
+ return GNTST_general_error;
}
return 0;
@@ -2583,10 +2512,11 @@ int do_update_va_mapping(unsigned long va, u64 val64,
(shadow_mode_translate(d) ||
shadow_mode_translate(percpu_info[cpu].foreign))) )
{
- // The foreign domain's pfn's are in a different namespace.
- // There's not enough information in just a gpte to figure out
- // how to (re-)shadow this entry.
- //
+ /*
+ * The foreign domain's pfn's are in a different namespace. There's
+ * not enough information in just a gpte to figure out how to
+ * (re-)shadow this entry.
+ */
domain_crash();
}
@@ -3054,7 +2984,7 @@ void ptwr_flush(struct domain *d, const int which)
*/
BUG();
}
- PTWR_PRINTK("[%c] disconnected_l1va at %p is %lx\n",
+ PTWR_PRINTK("[%c] disconnected_l1va at %p is %"PRIpte"\n",
PTWR_PRINT_WHICH, ptep, pte.l1);
l1e_remove_flags(pte, _PAGE_RW);
@@ -3072,7 +3002,7 @@ void ptwr_flush(struct domain *d, const int which)
/* Ensure that there are no stale writable mappings in any TLB. */
/* NB. INVLPG is a serialising instruction: flushes pending updates. */
flush_tlb_one_mask(d->cpumask, l1va);
- PTWR_PRINTK("[%c] disconnected_l1va at %p now %lx\n",
+ PTWR_PRINTK("[%c] disconnected_l1va at %p now %"PRIpte"\n",
PTWR_PRINT_WHICH, ptep, pte.l1);
/*
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index b5bec9e229..785cfa97a0 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -266,7 +266,6 @@ __gnttab_activate_grant_ref(
spin_unlock(&granting_d->grant_table->lock);
-
if ( (addr != 0) && (dev_hst_ro_flags & GNTMAP_host_map) )
{
/* Write update into the pagetable. */
@@ -278,14 +277,10 @@ __gnttab_activate_grant_ref(
if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
l1e_add_flags(pte,_PAGE_RW);
- if (!(dev_hst_ro_flags & GNTMAP_contains_pte))
- {
- rc = update_grant_va_mapping( addr, pte,
- mapping_d, mapping_ed );
- } else {
- rc = update_grant_va_mapping_pte( addr, pte,
- mapping_d, mapping_ed );
- }
+ if ( dev_hst_ro_flags & GNTMAP_contains_pte )
+ rc = update_grant_pte_mapping(addr, pte, mapping_d, mapping_ed);
+ else
+ rc = update_grant_va_mapping(addr, pte, mapping_d, mapping_ed);
/* IMPORTANT: rc indicates the degree of TLB flush that is required.
* GNTST_flush_one (1) or GNTST_flush_all (2). This is done in the
@@ -586,11 +581,13 @@ __gnttab_unmap_grant_ref(
(flags & GNTMAP_host_map) &&
((act->pin & (GNTPIN_hstw_mask | GNTPIN_hstr_mask)) > 0))
{
- if (flags & GNTMAP_contains_pte)
+ if ( flags & GNTMAP_contains_pte )
{
- if ( (rc = clear_grant_va_mapping_pte(addr, frame, ld)) < 0 )
+ if ( (rc = clear_grant_pte_mapping(addr, frame, ld)) < 0 )
goto unmap_out;
- } else {
+ }
+ else
+ {
if ( (rc = clear_grant_va_mapping(addr, frame)) < 0 )
goto unmap_out;
}
@@ -961,11 +958,14 @@ do_grant_table_op(
unsigned int cmd, void *uop, unsigned int count)
{
long rc;
+ struct domain *d = current->domain;
if ( count > 512 )
return -EINVAL;
- LOCK_BIGLOCK(current->domain);
+ LOCK_BIGLOCK(d);
+
+ sync_pagetable_state(d);
rc = -EFAULT;
switch ( cmd )
@@ -1001,7 +1001,7 @@ do_grant_table_op(
}
out:
- UNLOCK_BIGLOCK(current->domain);
+ UNLOCK_BIGLOCK(d);
return rc;
}
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index b0389df3dd..3c378a28df 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -377,17 +377,14 @@ void propagate_page_fault(unsigned long addr, u16 error_code);
* Caller must own d's BIGLOCK, is responsible for flushing the TLB, and must
* hold a reference to the page.
*/
-int update_grant_va_mapping(unsigned long va,
- l1_pgentry_t _nl1e,
- struct domain *d,
- struct vcpu *v);
-int update_grant_va_mapping_pte(unsigned long pte_addr,
- l1_pgentry_t _nl1e,
- struct domain *d,
- struct vcpu *v);
-
+int update_grant_va_mapping(
+ unsigned long va, l1_pgentry_t _nl1e,
+ struct domain *d, struct vcpu *v);
+int update_grant_pte_mapping(
+ unsigned long pte_addr, l1_pgentry_t _nl1e,
+ struct domain *d, struct vcpu *v);
int clear_grant_va_mapping(unsigned long addr, unsigned long frame);
-int clear_grant_va_mapping_pte(unsigned long addr, unsigned long frame,
- struct domain *d);
+int clear_grant_pte_mapping(
+ unsigned long addr, unsigned long frame, struct domain *d);
#endif /* __ASM_X86_MM_H__ */
diff --git a/xen/include/public/grant_table.h b/xen/include/public/grant_table.h
index b70bb65856..d3bf9ed9d3 100644
--- a/xen/include/public/grant_table.h
+++ b/xen/include/public/grant_table.h
@@ -261,8 +261,6 @@ typedef struct {
/*
* Values for error status returns. All errors are -ve.
*/
-#define GNTST_flush_all (2) /* Success, need to flush entire TLB. */
-#define GNTST_flush_one (1) /* Success, need to flush a vaddr. */
#define GNTST_okay (0) /* Normal return. */
#define GNTST_general_error (-1) /* General undefined error. */
#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
index 1250c5abaf..0e9e7f71d1 100644
--- a/xen/include/xen/grant_table.h
+++ b/xen/include/xen/grant_table.h
@@ -53,8 +53,8 @@ typedef struct {
#define ORDER_GRANT_FRAMES 2
#define NR_GRANT_FRAMES (1U << ORDER_GRANT_FRAMES)
-#define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
-
+#define NR_GRANT_ENTRIES \
+ ((NR_GRANT_FRAMES << PAGE_SHIFT) / sizeof(grant_entry_t))
/*
* Tracks a mapping of another domain's grant reference. Each domain has a
@@ -65,8 +65,8 @@ typedef struct {
domid_t domid; /* granting domain */
} grant_mapping_t;
#define MAPTRACK_GNTMAP_MASK 0x1f
-#define MAPTRACK_REF_SHIFT 5
-#define MAPTRACK_MAX_ENTRIES ( 1 << (16 - MAPTRACK_REF_SHIFT) )
+#define MAPTRACK_REF_SHIFT 5
+#define MAPTRACK_MAX_ENTRIES (1 << (16 - MAPTRACK_REF_SHIFT))
/* Per-domain grant information. */
typedef struct {
@@ -109,10 +109,15 @@ gnttab_prepare_for_transfer(
/* Notify 'rd' of a completed transfer via an already-locked grant entry. */
void
gnttab_notify_transfer(
- struct domain *rd, struct domain *ld, grant_ref_t ref, unsigned long frame);
+ struct domain *rd, struct domain *ld,
+ grant_ref_t ref, unsigned long frame);
-/* Pre-domain destruction release of granted device mappings of other domains.*/
+/* Domain death release of granted device mappings of other domains.*/
void
gnttab_release_dev_mappings(grant_table_t *gt);
+/* Extra GNTST_ values, for internal use only. */
+#define GNTST_flush_all (2) /* Success, need to flush entire TLB. */
+#define GNTST_flush_one (1) /* Success, need to flush a vaddr. */
+
#endif /* __XEN_GRANT_H__ */