aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2003-10-14 15:27:17 +0000
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>2003-10-14 15:27:17 +0000
commitc9b8f195ee70cd1ae90a10cc3c1ae3bc62c5c93e (patch)
treec7769611ba918f002b9aa33bafbdb71703d58ce8
parentffc277e54b2bc84b397592a272181c3768599395 (diff)
downloadxen-c9b8f195ee70cd1ae90a10cc3c1ae3bc62c5c93e.tar.gz
xen-c9b8f195ee70cd1ae90a10cc3c1ae3bc62c5c93e.tar.bz2
xen-c9b8f195ee70cd1ae90a10cc3c1ae3bc62c5c93e.zip
bitkeeper revision 1.516 (3f8c15d525UM6tJE0aWBdRe-3P9cVw)
memory.c, traps.c: Preload the first page of the guest LDT when the shadow mapping is invalidated for any reason.
-rw-r--r--xen/arch/i386/traps.c55
-rw-r--r--xen/common/memory.c85
2 files changed, 77 insertions, 63 deletions
diff --git a/xen/arch/i386/traps.c b/xen/arch/i386/traps.c
index c645d3e00e..33bc451d03 100644
--- a/xen/arch/i386/traps.c
+++ b/xen/arch/i386/traps.c
@@ -286,10 +286,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
{
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
trap_info_t *ti;
- unsigned long addr, off, fixup, l1e, *ldt_page;
+ unsigned long addr, fixup;
struct task_struct *p = current;
- struct pfn_info *page;
- int i;
+ extern int map_ldt_shadow_page(unsigned int);
__asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
@@ -320,57 +319,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
fault_in_xen_space:
if ( (addr < LDT_VIRT_START) ||
- (addr >= (LDT_VIRT_START + (p->mm.ldt_ents*LDT_ENTRY_SIZE))) )
+ (addr >= (LDT_VIRT_START + (p->mm.ldt_ents*LDT_ENTRY_SIZE))) ||
+ map_ldt_shadow_page((addr - LDT_VIRT_START) >> PAGE_SHIFT) )
goto propagate_fault;
-
- off = addr - LDT_VIRT_START;
- addr = p->mm.ldt_base + off;
-
- spin_lock(&p->page_lock);
-
- __get_user(l1e, (unsigned long *)(linear_pg_table+(addr>>PAGE_SHIFT)));
- if ( !(l1e & _PAGE_PRESENT) )
- goto unlock_and_propagate_fault;
-
- page = frame_table + (l1e >> PAGE_SHIFT);
- if ( (page->flags & PG_type_mask) != PGT_ldt_page )
- {
- if ( page->type_count != 0 )
- goto unlock_and_propagate_fault;
-
- /* Check all potential LDT entries in the page. */
- ldt_page = (unsigned long *)(addr & PAGE_MASK);
- for ( i = 0; i < 512; i++ )
- if ( !check_descriptor(ldt_page[i*2], ldt_page[i*2+1]) )
- goto unlock_and_propagate_fault;
-
- if ( page->flags & PG_need_flush )
- {
- perfc_incrc(need_flush_tlb_flush);
- local_flush_tlb();
- page->flags &= ~PG_need_flush;
- }
-
- page->flags &= ~PG_type_mask;
- page->flags |= PGT_ldt_page;
- }
-
- /* Success! */
- get_page_type(page);
- get_page_tot(page);
- p->mm.perdomain_pt[l1_table_offset(off)+16] = mk_l1_pgentry(l1e|_PAGE_RW);
- p->mm.shadow_ldt_mapcnt++;
-
- spin_unlock(&p->page_lock);
return;
-
- unlock_and_propagate_fault:
-
- spin_unlock(&p->page_lock);
- goto propagate_fault;
-
-
fault_in_hypervisor:
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
diff --git a/xen/common/memory.c b/xen/common/memory.c
index b230c7cb7c..405bce903f 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -172,8 +172,10 @@ spinlock_t free_list_lock = SPIN_LOCK_UNLOCKED;
unsigned int free_pfns;
/* Used to defer flushing of memory structures. */
-static int flush_tlb[NR_CPUS] __cacheline_aligned;
-
+static struct {
+ int flush_tlb;
+ int refresh_ldt;
+} deferred_op[NR_CPUS] __cacheline_aligned;
/*
* init_frametable:
@@ -186,7 +188,7 @@ void __init init_frametable(unsigned long nr_pages)
unsigned long page_index;
unsigned long flags;
- memset(flush_tlb, 0, sizeof(flush_tlb));
+ memset(deferred_op, 0, sizeof(deferred_op));
max_page = nr_pages;
frame_table_size = nr_pages * sizeof(struct pfn_info);
@@ -213,7 +215,7 @@ void __init init_frametable(unsigned long nr_pages)
static void __invalidate_shadow_ldt(void)
{
- int i;
+ int i, cpu = smp_processor_id();
unsigned long pfn;
struct pfn_info *page;
@@ -233,7 +235,8 @@ static void __invalidate_shadow_ldt(void)
}
/* Dispose of the (now possibly invalid) mappings from the TLB. */
- flush_tlb[smp_processor_id()] = 1;
+ deferred_op[cpu].flush_tlb = 1;
+ deferred_op[cpu].refresh_ldt = 1;
}
@@ -244,6 +247,58 @@ static inline void invalidate_shadow_ldt(void)
}
+/* Map shadow page at offset @off. Returns 0 on success. */
+int map_ldt_shadow_page(unsigned int off)
+{
+ struct task_struct *p = current;
+ unsigned long addr = p->mm.ldt_base + (off << PAGE_SHIFT);
+ unsigned long l1e, *ldt_page;
+ struct pfn_info *page;
+ int i, ret = -1;
+
+ spin_lock(&p->page_lock);
+
+ __get_user(l1e, (unsigned long *)(linear_pg_table+(addr>>PAGE_SHIFT)));
+ if ( unlikely(!(l1e & _PAGE_PRESENT)) )
+ goto out;
+
+ page = frame_table + (l1e >> PAGE_SHIFT);
+ if ( unlikely((page->flags & PG_type_mask) != PGT_ldt_page) )
+ {
+ if ( unlikely(page->type_count != 0) )
+ goto out;
+
+ /* Check all potential LDT entries in the page. */
+ ldt_page = (unsigned long *)addr;
+ for ( i = 0; i < 512; i++ )
+ if ( unlikely(!check_descriptor(ldt_page[i*2], ldt_page[i*2+1])) )
+ goto out;
+
+ if ( unlikely(page->flags & PG_need_flush) )
+ {
+ perfc_incrc(need_flush_tlb_flush);
+ __write_cr3_counted(pagetable_val(p->mm.pagetable));
+ page->flags &= ~PG_need_flush;
+ }
+
+ page->flags &= ~PG_type_mask;
+ page->flags |= PGT_ldt_page;
+ }
+
+ /* Success! */
+ get_page_type(page);
+ get_page_tot(page);
+ p->mm.perdomain_pt[l1_table_offset(off)+16] = mk_l1_pgentry(l1e|_PAGE_RW);
+ p->mm.shadow_ldt_mapcnt++;
+
+ ret = 0;
+
+ out:
+ spin_unlock(&p->page_lock);
+ return ret;
+}
+
+
/* Return original refcnt, or -1 on error. */
static int inc_page_refcnt(unsigned long page_nr, unsigned int type)
{
@@ -274,7 +329,7 @@ static int inc_page_refcnt(unsigned long page_nr, unsigned int type)
if ( unlikely(flags & PG_need_flush) )
{
- flush_tlb[smp_processor_id()] = 1;
+ deferred_op[smp_processor_id()].flush_tlb = 1;
page->flags &= ~PG_need_flush;
perfc_incrc(need_flush_tlb_flush);
}
@@ -628,7 +683,7 @@ static int mod_l1_entry(l1_pgentry_t *p_l1_entry, l1_pgentry_t new_l1_entry)
static int do_extended_command(unsigned long ptr, unsigned long val)
{
- int err = 0;
+ int err = 0, cpu = smp_processor_id();
unsigned int cmd = val & PGEXT_CMD_MASK;
unsigned long pfn = ptr >> PAGE_SHIFT;
struct pfn_info *page = frame_table + pfn;
@@ -694,7 +749,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
put_l2_table(pagetable_val(current->mm.pagetable) >> PAGE_SHIFT);
current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
invalidate_shadow_ldt();
- flush_tlb[smp_processor_id()] = 1;
+ deferred_op[cpu].flush_tlb = 1;
}
else
{
@@ -703,7 +758,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
break;
case PGEXT_TLB_FLUSH:
- flush_tlb[smp_processor_id()] = 1;
+ deferred_op[cpu].flush_tlb = 1;
break;
case PGEXT_INVLPG:
@@ -729,6 +784,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
current->mm.ldt_base = ptr;
current->mm.ldt_ents = ents;
load_LDT(current);
+ deferred_op[cpu].refresh_ldt = (ents != 0);
}
break;
}
@@ -748,7 +804,7 @@ int do_process_page_updates(page_update_request_t *ureqs, int count)
page_update_request_t req;
unsigned long flags, pfn, l1e;
struct pfn_info *page;
- int err = 0, i;
+ int err = 0, i, cpu = smp_processor_id();
unsigned int cmd;
unsigned long cr0 = 0;
@@ -884,11 +940,16 @@ int do_process_page_updates(page_update_request_t *ureqs, int count)
ureqs++;
}
- if ( flush_tlb[smp_processor_id()] )
+ if ( deferred_op[cpu].flush_tlb )
{
- flush_tlb[smp_processor_id()] = 0;
+ deferred_op[cpu].flush_tlb = 0;
__write_cr3_counted(pagetable_val(current->mm.pagetable));
+ }
+ if ( deferred_op[cpu].refresh_ldt )
+ {
+ deferred_op[cpu].refresh_ldt = 0;
+ (void)map_ldt_shadow_page(0);
}
if ( cr0 != 0 )