aboutsummaryrefslogtreecommitdiffstats
path: root/linux-2.6-xen-sparse/arch
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-01-22 15:42:13 +0000
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-01-22 15:42:13 +0000
commit44a156d31f9099cd8bc9c11e3a3f3cc9c867dd08 (patch)
treead797da68cb4dae2b237738f921de76814b0c39a /linux-2.6-xen-sparse/arch
parent2bdecd53829f618ae71e2f67e5cd1c122643eb8e (diff)
downloadxen-44a156d31f9099cd8bc9c11e3a3f3cc9c867dd08.tar.gz
xen-44a156d31f9099cd8bc9c11e3a3f3cc9c867dd08.tar.bz2
xen-44a156d31f9099cd8bc9c11e3a3f3cc9c867dd08.zip
linux/i386: relax highpte pinning/write-protecting
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Diffstat (limited to 'linux-2.6-xen-sparse/arch')
-rw-r--r--linux-2.6-xen-sparse/arch/i386/mm/highmem-xen.c4
-rw-r--r--linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c52
2 files changed, 25 insertions, 31 deletions
diff --git a/linux-2.6-xen-sparse/arch/i386/mm/highmem-xen.c b/linux-2.6-xen-sparse/arch/i386/mm/highmem-xen.c
index ee7deae2a7..20838cce53 100644
--- a/linux-2.6-xen-sparse/arch/i386/mm/highmem-xen.c
+++ b/linux-2.6-xen-sparse/arch/i386/mm/highmem-xen.c
@@ -55,7 +55,9 @@ void *kmap_atomic(struct page *page, enum km_type type)
/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
void *kmap_atomic_pte(struct page *page, enum km_type type)
{
- return __kmap_atomic(page, type, PAGE_KERNEL_RO);
+ return __kmap_atomic(page, type,
+ test_bit(PG_pinned, &page->flags)
+ ? PAGE_KERNEL_RO : kmap_prot);
}
void kunmap_atomic(void *kvaddr, enum km_type type)
diff --git a/linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c b/linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c
index 2af24831ec..18f27c7cde 100644
--- a/linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c
+++ b/linux-2.6-xen-sparse/arch/i386/mm/pgtable-xen.c
@@ -25,7 +25,6 @@
#include <asm/mmu_context.h>
#include <xen/features.h>
-#include <xen/foreign_page.h>
#include <asm/hypervisor.h>
static void pgd_test_and_unpin(pgd_t *pgd);
@@ -239,14 +238,6 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
#ifdef CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
- if (pte && PageHighMem(pte)) {
- struct mmuext_op op;
-
- kmap_flush_unused();
- op.cmd = MMUEXT_PIN_L1_TABLE;
- op.arg1.mfn = pfn_to_mfn(page_to_pfn(pte));
- BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
- }
#else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
@@ -267,13 +258,8 @@ void pte_free(struct page *pte)
if (!pte_write(*virt_to_ptep(va)))
BUG_ON(HYPERVISOR_update_va_mapping(
va, pfn_pte(pfn, PAGE_KERNEL), 0));
- } else {
- struct mmuext_op op;
-
- op.cmd = MMUEXT_UNPIN_TABLE;
- op.arg1.mfn = pfn_to_mfn(pfn);
- BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
- }
+ } else
+ clear_bit(PG_pinned, &pte->flags);
ClearPageForeign(pte);
init_page_count(pte);
@@ -587,46 +573,48 @@ void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
}
}
-static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
+static inline int pgd_walk_set_prot(struct page *page, pgprot_t flags)
{
- struct page *page = virt_to_page(pt);
unsigned long pfn = page_to_pfn(page);
if (PageHighMem(page))
- return;
+ return pgprot_val(flags) & _PAGE_RW
+ ? test_and_clear_bit(PG_pinned, &page->flags)
+ : !test_and_set_bit(PG_pinned, &page->flags);
+
BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte(pfn, flags), 0));
+
+ return 0;
}
-static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
+static int pgd_walk(pgd_t *pgd_base, pgprot_t flags)
{
pgd_t *pgd = pgd_base;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte;
- int g, u, m;
+ int g, u, m, flush;
if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
+ return 0;
- for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
+ for (g = 0, flush = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
if (pgd_none(*pgd))
continue;
pud = pud_offset(pgd, 0);
if (PTRS_PER_PUD > 1) /* not folded */
- pgd_walk_set_prot(pud,flags);
+ flush |= pgd_walk_set_prot(virt_to_page(pud),flags);
for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
if (pud_none(*pud))
continue;
pmd = pmd_offset(pud, 0);
if (PTRS_PER_PMD > 1) /* not folded */
- pgd_walk_set_prot(pmd,flags);
+ flush |= pgd_walk_set_prot(virt_to_page(pmd),flags);
for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
if (pmd_none(*pmd))
continue;
- pte = pte_offset_kernel(pmd,0);
- pgd_walk_set_prot(pte,flags);
+ flush |= pgd_walk_set_prot(pmd_page(*pmd),flags);
}
}
}
@@ -635,11 +623,14 @@ static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
(unsigned long)pgd_base,
pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
UVMF_TLB_FLUSH));
+
+ return flush;
}
static void __pgd_pin(pgd_t *pgd)
{
- pgd_walk(pgd, PAGE_KERNEL_RO);
+ if (pgd_walk(pgd, PAGE_KERNEL_RO))
+ kmap_flush_unused();
xen_pgd_pin(__pa(pgd));
set_bit(PG_pinned, &virt_to_page(pgd)->flags);
}
@@ -647,7 +638,8 @@ static void __pgd_pin(pgd_t *pgd)
static void __pgd_unpin(pgd_t *pgd)
{
xen_pgd_unpin(__pa(pgd));
- pgd_walk(pgd, PAGE_KERNEL);
+ if (pgd_walk(pgd, PAGE_KERNEL))
+ kmap_flush_unused();
clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
}