diff options
31 files changed, 856 insertions, 843 deletions
diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c index 8b06d8bc2c..7418321346 100644 --- a/xen/arch/x86/dom0_ops.c +++ b/xen/arch/x86/dom0_ops.c @@ -340,49 +340,50 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op) return ret; } -void arch_getdomaininfo_ctxt(struct exec_domain *d, full_execution_context_t *c) +void arch_getdomaininfo_ctxt( + struct exec_domain *ed, full_execution_context_t *c) { int i; c->flags = 0; memcpy(&c->cpu_ctxt, - &d->thread.user_ctxt, - sizeof(d->thread.user_ctxt)); - if ( test_bit(EDF_DONEFPUINIT, &d->ed_flags) ) + &ed->arch.user_ctxt, + sizeof(ed->arch.user_ctxt)); + if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) ) c->flags |= ECF_I387_VALID; memcpy(&c->fpu_ctxt, - &d->thread.i387, - sizeof(d->thread.i387)); + &ed->arch.i387, + sizeof(ed->arch.i387)); memcpy(&c->trap_ctxt, - d->thread.traps, - sizeof(d->thread.traps)); + ed->arch.traps, + sizeof(ed->arch.traps)); #ifdef ARCH_HAS_FAST_TRAP - if ( (d->thread.fast_trap_desc.a == 0) && - (d->thread.fast_trap_desc.b == 0) ) + if ( (ed->arch.fast_trap_desc.a == 0) && + (ed->arch.fast_trap_desc.b == 0) ) c->fast_trap_idx = 0; else c->fast_trap_idx = - d->thread.fast_trap_idx; + ed->arch.fast_trap_idx; #endif - c->ldt_base = d->mm.ldt_base; - c->ldt_ents = d->mm.ldt_ents; + c->ldt_base = ed->arch.ldt_base; + c->ldt_ents = ed->arch.ldt_ents; c->gdt_ents = 0; - if ( GET_GDT_ADDRESS(d) == GDT_VIRT_START(d) ) + if ( GET_GDT_ADDRESS(ed) == GDT_VIRT_START(ed) ) { for ( i = 0; i < 16; i++ ) c->gdt_frames[i] = - l1_pgentry_to_pagenr(d->mm.perdomain_ptes[i]); - c->gdt_ents = GET_GDT_ENTRIES(d); + l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[i]); + c->gdt_ents = GET_GDT_ENTRIES(ed); } - c->guestos_ss = d->thread.guestos_ss; - c->guestos_esp = d->thread.guestos_sp; + c->guestos_ss = ed->arch.guestos_ss; + c->guestos_esp = ed->arch.guestos_sp; c->pt_base = - pagetable_val(d->mm.pagetable); + pagetable_val(ed->arch.pagetable); memcpy(c->debugreg, - d->thread.debugreg, - sizeof(d->thread.debugreg)); - c->event_callback_cs = d->thread.event_selector; - c->event_callback_eip = d->thread.event_address; - c->failsafe_callback_cs = d->thread.failsafe_selector; - c->failsafe_callback_eip = d->thread.failsafe_address; + ed->arch.debugreg, + sizeof(ed->arch.debugreg)); + c->event_callback_cs = ed->arch.event_selector; + c->event_callback_eip = ed->arch.event_address; + c->failsafe_callback_cs = ed->arch.failsafe_selector; + c->failsafe_callback_eip = ed->arch.failsafe_address; } diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 1fa6a0f0e5..f456a7b79a 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /****************************************************************************** * arch/x86/domain.c * @@ -231,7 +232,7 @@ void arch_free_exec_domain_struct(struct exec_domain *ed) void free_perdomain_pt(struct domain *d) { - free_xenheap_page((unsigned long)d->mm_perdomain_pt); + free_xenheap_page((unsigned long)d->arch.mm_perdomain_pt); } static void continue_idle_task(struct exec_domain *ed) @@ -248,15 +249,15 @@ void arch_do_createdomain(struct exec_domain *ed) { struct domain *d = ed->domain; - SET_DEFAULT_FAST_TRAP(&ed->thread); + SET_DEFAULT_FAST_TRAP(&ed->arch); if ( d->id == IDLE_DOMAIN_ID ) { - ed->thread.schedule_tail = continue_idle_task; + ed->arch.schedule_tail = continue_idle_task; } else { - ed->thread.schedule_tail = continue_nonidle_task; + ed->arch.schedule_tail = continue_nonidle_task; d->shared_info = (void *)alloc_xenheap_page(); memset(d->shared_info, 0, PAGE_SIZE); @@ -265,36 +266,37 @@ void arch_do_createdomain(struct exec_domain *ed) machine_to_phys_mapping[virt_to_phys(d->shared_info) >> PAGE_SHIFT] = INVALID_P2M_ENTRY; - d->mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page(); - memset(d->mm_perdomain_pt, 0, PAGE_SIZE); - machine_to_phys_mapping[virt_to_phys(d->mm_perdomain_pt) >> + d->arch.mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page(); + memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE); + machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT] = INVALID_P2M_ENTRY; - ed->mm.perdomain_ptes = d->mm_perdomain_pt; + ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt; } } void arch_do_boot_vcpu(struct exec_domain *ed) { struct domain *d = ed->domain; - ed->thread.schedule_tail = d->exec_domain[0]->thread.schedule_tail; - ed->mm.perdomain_ptes = d->mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT); + ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail; + ed->arch.perdomain_ptes = + d->arch.mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT); } #ifdef CONFIG_VMX void arch_vmx_do_resume(struct exec_domain *ed) { - u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->thread.arch_vmx.vmcs); + u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->arch.arch_vmx.vmcs); - load_vmcs(&ed->thread.arch_vmx, vmcs_phys_ptr); + load_vmcs(&ed->arch.arch_vmx, vmcs_phys_ptr); vmx_do_resume(ed); reset_stack_and_jump(vmx_asm_do_resume); } void arch_vmx_do_launch(struct exec_domain *ed) { - u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->thread.arch_vmx.vmcs); + u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->arch.arch_vmx.vmcs); - load_vmcs(&ed->thread.arch_vmx, vmcs_phys_ptr); + load_vmcs(&ed->arch.arch_vmx, vmcs_phys_ptr); vmx_do_launch(ed); reset_stack_and_jump(vmx_asm_do_launch); } @@ -304,7 +306,6 @@ static void monitor_mk_pagetable(struct exec_domain *ed) unsigned long mpfn; l2_pgentry_t *mpl2e; struct pfn_info *mpfn_info; - struct mm_struct *m = &ed->mm; struct domain *d = ed->domain; mpfn_info = alloc_domheap_page(NULL); @@ -318,11 +319,11 @@ static void monitor_mk_pagetable(struct exec_domain *ed) &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE], HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); - m->monitor_table = mk_pagetable(mpfn << L1_PAGETABLE_SHIFT); - m->shadow_mode = SHM_full_32; + ed->arch.monitor_table = mk_pagetable(mpfn << L1_PAGETABLE_SHIFT); + d->arch.shadow_mode = SHM_full_32; mpl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = - mk_l2_pgentry((__pa(d->mm_perdomain_pt) & PAGE_MASK) + mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK) | __PAGE_HYPERVISOR); unmap_domain_mem(mpl2e); @@ -333,13 +334,12 @@ static void monitor_mk_pagetable(struct exec_domain *ed) */ static void monitor_rm_pagetable(struct exec_domain *ed) { - struct mm_struct *m = &ed->mm; l2_pgentry_t *mpl2e; unsigned long mpfn; - ASSERT( pagetable_val(m->monitor_table) ); + ASSERT( pagetable_val(ed->arch.monitor_table) ); - mpl2e = (l2_pgentry_t *) map_domain_mem(pagetable_val(m->monitor_table)); + mpl2e = (l2_pgentry_t *) map_domain_mem(pagetable_val(ed->arch.monitor_table)); /* * First get the pfn for guest_pl2e_cache by looking at monitor_table */ @@ -352,10 +352,10 @@ static void monitor_rm_pagetable(struct exec_domain *ed) /* * Then free monitor_table. */ - mpfn = (pagetable_val(m->monitor_table)) >> PAGE_SHIFT; + mpfn = (pagetable_val(ed->arch.monitor_table)) >> PAGE_SHIFT; free_domheap_page(&frame_table[mpfn]); - m->monitor_table = mk_pagetable(0); + ed->arch.monitor_table = mk_pagetable(0); } static int vmx_final_setup_guestos(struct exec_domain *ed, @@ -375,21 +375,21 @@ static int vmx_final_setup_guestos(struct exec_domain *ed, return -ENOMEM; } - memset(&ed->thread.arch_vmx, 0, sizeof (struct arch_vmx_struct)); + memset(&ed->arch.arch_vmx, 0, sizeof (struct arch_vmx_struct)); - ed->thread.arch_vmx.vmcs = vmcs; - error = construct_vmcs(&ed->thread.arch_vmx, context, full_context, VMCS_USE_HOST_ENV); + ed->arch.arch_vmx.vmcs = vmcs; + error = construct_vmcs(&ed->arch.arch_vmx, context, full_context, VMCS_USE_HOST_ENV); if (error < 0) { printk("Failed to construct a new VMCS\n"); goto out; } monitor_mk_pagetable(ed); - ed->thread.schedule_tail = arch_vmx_do_launch; - clear_bit(VMX_CPU_STATE_PG_ENABLED, &ed->thread.arch_vmx.cpu_state); + ed->arch.schedule_tail = arch_vmx_do_launch; + clear_bit(VMX_CPU_STATE_PG_ENABLED, &ed->arch.arch_vmx.cpu_state); #if defined (__i386) - ed->thread.arch_vmx.vmx_platform.real_mode_data = + ed->arch.arch_vmx.vmx_platform.real_mode_data = (unsigned long *) context->esi; #endif @@ -406,12 +406,13 @@ static int vmx_final_setup_guestos(struct exec_domain *ed, out: free_vmcs(vmcs); - ed->thread.arch_vmx.vmcs = 0; + ed->arch.arch_vmx.vmcs = 0; return error; } #endif -int arch_final_setup_guestos(struct exec_domain *d, full_execution_context_t *c) +int arch_final_setup_guestos( + struct exec_domain *d, full_execution_context_t *c) { unsigned long phys_basetab; int i, rc; @@ -420,13 +421,13 @@ int arch_final_setup_guestos(struct exec_domain *d, full_execution_context_t *c) if ( c->flags & ECF_I387_VALID ) set_bit(EDF_DONEFPUINIT, &d->ed_flags); - memcpy(&d->thread.user_ctxt, + memcpy(&d->arch.user_ctxt, &c->cpu_ctxt, - sizeof(d->thread.user_ctxt)); + sizeof(d->arch.user_ctxt)); /* Clear IOPL for unprivileged domains. */ if (!IS_PRIV(d->domain)) - d->thread.user_ctxt.eflags &= 0xffffcfff; + d->arch.user_ctxt.eflags &= 0xffffcfff; /* * This is sufficient! If the descriptor DPL differs from CS RPL then we'll @@ -434,37 +435,37 @@ int arch_final_setup_guestos(struct exec_domain *d, full_execution_context_t *c) * If SS RPL or DPL differs from CS RPL then we'll #GP. */ if (!(c->flags & ECF_VMX_GUEST)) - if ( ((d->thread.user_ctxt.cs & 3) == 0) || - ((d->thread.user_ctxt.ss & 3) == 0) ) + if ( ((d->arch.user_ctxt.cs & 3) == 0) || + ((d->arch.user_ctxt.ss & 3) == 0) ) return -EINVAL; - memcpy(&d->thread.i387, + memcpy(&d->arch.i387, &c->fpu_ctxt, - sizeof(d->thread.i387)); + sizeof(d->arch.i387)); - memcpy(d->thread.traps, + memcpy(d->arch.traps, &c->trap_ctxt, - sizeof(d->thread.traps)); + sizeof(d->arch.traps)); if ( (rc = (int)set_fast_trap(d, c->fast_trap_idx)) != 0 ) return rc; - d->mm.ldt_base = c->ldt_base; - d->mm.ldt_ents = c->ldt_ents; + d->arch.ldt_base = c->ldt_base; + d->arch.ldt_ents = c->ldt_ents; - d->thread.guestos_ss = c->guestos_ss; - d->thread.guestos_sp = c->guestos_esp; + d->arch.guestos_ss = c->guestos_ss; + d->arch.guestos_sp = c->guestos_esp; for ( i = 0; i < 8; i++ ) (void)set_debugreg(d, i, c->debugreg[i]); - d->thread.event_selector = c->event_callback_cs; - d->thread.event_address = c->event_callback_eip; - d->thread.failsafe_selector = c->failsafe_callback_cs; - d->thread.failsafe_address = c->failsafe_callback_eip; + d->arch.event_selector = c->event_callback_cs; + d->arch.event_address = c->event_callback_eip; + d->arch.failsafe_selector = c->failsafe_callback_cs; + d->arch.failsafe_address = c->failsafe_callback_eip; phys_basetab = c->pt_base; - d->mm.pagetable = mk_pagetable(phys_basetab); + d->arch.pagetable = mk_pagetable(phys_basetab); if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d->domain, PGT_base_page_table) ) return -EINVAL; @@ -494,7 +495,7 @@ void new_thread(struct exec_domain *d, unsigned long start_stack, unsigned long start_info) { - execution_context_t *ec = &d->thread.user_ctxt; + execution_context_t *ec = &d->arch.user_ctxt; /* * Initial register values: @@ -519,19 +520,18 @@ void new_thread(struct exec_domain *d, /* * This special macro can be used to load a debugging register */ -#define loaddebug(thread,register) \ - __asm__("mov %0,%%db" #register \ +#define loaddebug(_ed,_reg) \ + __asm__("mov %0,%%db" #_reg \ : /* no output */ \ - :"r" (thread->debugreg[register])) + :"r" ((_ed)->debugreg[_reg])) void switch_to(struct exec_domain *prev_p, struct exec_domain *next_p) { - struct thread_struct *next = &next_p->thread; struct tss_struct *tss = init_tss + smp_processor_id(); execution_context_t *stack_ec = get_execution_context(); int i; #ifdef CONFIG_VMX - unsigned long vmx_domain = next_p->thread.arch_vmx.flags; + unsigned long vmx_domain = next_p->arch.arch_vmx.flags; #endif __cli(); @@ -539,73 +539,73 @@ void switch_to(struct exec_domain *prev_p, struct exec_domain *next_p) /* Switch guest general-register state. */ if ( !is_idle_task(prev_p->domain) ) { - memcpy(&prev_p->thread.user_ctxt, + memcpy(&prev_p->arch.user_ctxt, stack_ec, sizeof(*stack_ec)); unlazy_fpu(prev_p); - CLEAR_FAST_TRAP(&prev_p->thread); + CLEAR_FAST_TRAP(&prev_p->arch); } if ( !is_idle_task(next_p->domain) ) { memcpy(stack_ec, - &next_p->thread.user_ctxt, + &next_p->arch.user_ctxt, sizeof(*stack_ec)); /* Maybe switch the debug registers. */ - if ( unlikely(next->debugreg[7]) ) + if ( unlikely(next_p->arch.debugreg[7]) ) { - loaddebug(next, 0); - loaddebug(next, 1); - loaddebug(next, 2); - loaddebug(next, 3); + loaddebug(&next_p->arch, 0); + loaddebug(&next_p->arch, 1); + loaddebug(&next_p->arch, 2); + loaddebug(&next_p->arch, 3); /* no 4 and 5 */ - loaddebug(next, 6); - loaddebug(next, 7); + loaddebug(&next_p->arch, 6); + loaddebug(&next_p->arch, 7); } #ifdef CONFIG_VMX if ( vmx_domain ) { /* Switch page tables. */ - write_ptbase(&next_p->mm); + write_ptbase(next_p); set_current(next_p); /* Switch GDT and LDT. */ - __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->mm.gdt)); + __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->arch.gdt)); __sti(); return; } #endif - SET_FAST_TRAP(&next_p->thread); + SET_FAST_TRAP(&next_p->arch); #ifdef __i386__ /* Switch the guest OS ring-1 stack. */ - tss->esp1 = next->guestos_sp; - tss->ss1 = next->guestos_ss; + tss->esp1 = next_p->arch.guestos_sp; + tss->ss1 = next_p->arch.guestos_ss; #endif /* Switch page tables. */ - write_ptbase(&next_p->mm); + write_ptbase(next_p); } - if ( unlikely(prev_p->thread.io_bitmap != NULL) ) + if ( unlikely(prev_p->arch.io_bitmap != NULL) ) { - for ( i = 0; i < sizeof(prev_p->thread.io_bitmap_sel) * 8; i++ ) - if ( !test_bit(i, &prev_p->thread.io_bitmap_sel) ) + for ( i = 0; i < sizeof(prev_p->arch.io_bitmap_sel) * 8; i++ ) + if ( !test_bit(i, &prev_p->arch.io_bitmap_sel) ) memset(&tss->io_bitmap[i * IOBMP_BYTES_PER_SELBIT], ~0U, IOBMP_BYTES_PER_SELBIT); tss->bitmap = IOBMP_INVALID_OFFSET; } - if ( unlikely(next_p->thread.io_bitmap != NULL) ) + if ( unlikely(next_p->arch.io_bitmap != NULL) ) { - for ( i = 0; i < sizeof(next_p->thread.io_bitmap_sel) * 8; i++ ) - if ( !test_bit(i, &next_p->thread.io_bitmap_sel) ) + for ( i = 0; i < sizeof(next_p->arch.io_bitmap_sel) * 8; i++ ) + if ( !test_bit(i, &next_p->arch.io_bitmap_sel) ) memcpy(&tss->io_bitmap[i * IOBMP_BYTES_PER_SELBIT], - &next_p->thread.io_bitmap[i * IOBMP_BYTES_PER_SELBIT], + &next_p->arch.io_bitmap[i * IOBMP_BYTES_PER_SELBIT], IOBMP_BYTES_PER_SELBIT); tss->bitmap = IOBMP_OFFSET; } @@ -613,7 +613,7 @@ void switch_to(struct exec_domain *prev_p, struct exec_domain *next_p) set_current(next_p); /* Switch GDT and LDT. */ - __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->mm.gdt)); + __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->arch.gdt)); load_LDT(next_p); __sti(); @@ -731,9 +731,9 @@ static void vmx_domain_relinquish_memory(struct exec_domain *ed) /* * Free VMCS */ - ASSERT(ed->thread.arch_vmx.vmcs); - free_vmcs(ed->thread.arch_vmx.vmcs); - ed->thread.arch_vmx.vmcs = 0; + ASSERT(ed->arch.arch_vmx.vmcs); + free_vmcs(ed->arch.arch_vmx.vmcs); + ed->arch.arch_vmx.vmcs = 0; monitor_rm_pagetable(ed); @@ -744,7 +744,7 @@ static void vmx_domain_relinquish_memory(struct exec_domain *ed) for (i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++) { unsigned long l1e; - l1e = l1_pgentry_val(d->mm_perdomain_pt[i]); + l1e = l1_pgentry_val(d->arch.mm_perdomain_pt[i]); if (l1e & _PAGE_PRESENT) { pfn = l1e >> PAGE_SHIFT; free_domheap_page(&frame_table[pfn]); @@ -768,8 +768,8 @@ void domain_relinquish_memory(struct domain *d) /* Drop the in-use reference to the page-table base. */ for_each_exec_domain ( d, ed ) { - if ( pagetable_val(ed->mm.pagetable) != 0 ) - put_page_and_type(&frame_table[pagetable_val(ed->mm.pagetable) >> + if ( pagetable_val(ed->arch.pagetable) != 0 ) + put_page_and_type(&frame_table[pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT]); } diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c index fe7bae3d99..902da0b57f 100644 --- a/xen/arch/x86/i387.c +++ b/xen/arch/x86/i387.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * linux/arch/i386/kernel/i387.c * @@ -24,10 +25,10 @@ static inline void __save_init_fpu( struct exec_domain *tsk ) { if ( cpu_has_fxsr ) { asm volatile( "fxsave %0 ; fnclex" - : "=m" (tsk->thread.i387) ); + : "=m" (tsk->arch.i387) ); } else { asm volatile( "fnsave %0 ; fwait" - : "=m" (tsk->thread.i387) ); + : "=m" (tsk->arch.i387) ); } clear_bit(EDF_USEDFPU, &tsk->ed_flags); } @@ -48,9 +49,9 @@ void restore_fpu( struct exec_domain *tsk ) { if ( cpu_has_fxsr ) { asm volatile( "fxrstor %0" - : : "m" (tsk->thread.i387) ); + : : "m" (tsk->arch.i387) ); } else { asm volatile( "frstor %0" - : : "m" (tsk->thread.i387) ); + : : "m" (tsk->arch.i387) ); } } diff --git a/xen/arch/x86/idle0_task.c b/xen/arch/x86/idle0_task.c index cc31d7df1b..b3cb95b81b 100644 --- a/xen/arch/x86/idle0_task.c +++ b/xen/arch/x86/idle0_task.c @@ -1,24 +1,19 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ + #include <xen/config.h> #include <xen/sched.h> #include <asm/desc.h> -#define IDLE0_EXEC_DOMAIN(_ed,_d) \ -{ \ - processor: 0, \ - mm: IDLE0_MM, \ - thread: INIT_THREAD, \ - domain: (_d) \ -} - -#define IDLE0_DOMAIN(_t) \ -{ \ - id: IDLE_DOMAIN_ID, \ - d_flags: 1<<DF_IDLETASK, \ - refcnt: ATOMIC_INIT(1) \ -} +struct domain idle0_domain = { + id: IDLE_DOMAIN_ID, + d_flags: 1<<DF_IDLETASK, + refcnt: ATOMIC_INIT(1) +}; -struct domain idle0_domain = IDLE0_DOMAIN(idle0_domain); -struct exec_domain idle0_exec_domain = IDLE0_EXEC_DOMAIN(idle0_exec_domain, - &idle0_domain); +struct exec_domain idle0_exec_domain = { + processor: 0, + domain: &idle0_domain, + arch: IDLE0_ARCH_EXEC_DOMAIN +}; struct tss_struct init_tss[NR_CPUS]; diff --git a/xen/arch/x86/memory.c b/xen/arch/x86/memory.c index 44e1275e54..c532700e3c 100644 --- a/xen/arch/x86/memory.c +++ b/xen/arch/x86/memory.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /****************************************************************************** * arch/x86/memory.c * @@ -193,19 +194,41 @@ void arch_init_memory(void) subarch_init_memory(dom_xen); } +void write_ptbase(struct exec_domain *ed) +{ + struct domain *d = ed->domain; + unsigned long pa; + +#ifdef CONFIG_VMX + if ( unlikely(d->arch.shadow_mode) ) + pa = ((d->arch.shadow_mode == SHM_full_32) ? + pagetable_val(ed->arch.monitor_table) : + pagetable_val(ed->arch.shadow_table)); + else + pa = pagetable_val(ed->arch.pagetable); +#else + if ( unlikely(d->arch.shadow_mode) ) + pa = pagetable_val(ed->arch.shadow_table); + else + pa = pagetable_val(ed->arch.pagetable); +#endif + + write_cr3(pa); +} + static void __invalidate_shadow_ldt(struct exec_domain *d) { int i; unsigned long pfn; struct pfn_info *page; - d->mm.shadow_ldt_mapcnt = 0; + d->arch.shadow_ldt_mapcnt = 0; for ( i = 16; i < 32; i++ ) { - pfn = l1_pgentry_to_pagenr(d->mm.perdomain_ptes[i]); + pfn = l1_pgentry_to_pagenr(d->arch.perdomain_ptes[i]); if ( pfn == 0 ) continue; - d->mm.perdomain_ptes[i] = mk_l1_pgentry(0); + d->arch.perdomain_ptes[i] = mk_l1_pgentry(0); page = &frame_table[pfn]; ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page); ASSERT_PAGE_IS_DOMAIN(page, d->domain); @@ -219,7 +242,7 @@ static void __invalidate_shadow_ldt(struct exec_domain *d) static inline void invalidate_shadow_ldt(struct exec_domain *d) { - if ( d->mm.shadow_ldt_mapcnt != 0 ) + if ( d->arch.shadow_ldt_mapcnt != 0 ) __invalidate_shadow_ldt(d); } @@ -252,7 +275,7 @@ int map_ldt_shadow_page(unsigned int off) if ( unlikely(in_irq()) ) BUG(); - __get_user(l1e, (unsigned long *)&linear_pg_table[(ed->mm.ldt_base >> + __get_user(l1e, (unsigned long *)&linear_pg_table[(ed->arch.ldt_base >> PAGE_SHIFT) + off]); if ( unlikely(!(l1e & _PAGE_PRESENT)) || @@ -260,8 +283,8 @@ int map_ldt_shadow_page(unsigned int off) d, PGT_ldt_page)) ) return 0; - ed->mm.perdomain_ptes[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW); - ed->mm.shadow_ldt_mapcnt++; + ed->arch.perdomain_ptes[off + 16] = mk_l1_pgentry(l1e | _PAGE_RW); + ed->arch.shadow_ldt_mapcnt++; return 1; } @@ -512,7 +535,7 @@ static int alloc_l2_table(struct pfn_info *page) pl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry((page_nr << PAGE_SHIFT) | __PAGE_HYPERVISOR); pl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = - mk_l2_pgentry(__pa(page_get_owner(page)->mm_perdomain_pt) | + mk_l2_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR); #endif @@ -747,11 +770,11 @@ void free_page_type(struct pfn_info *page, unsigned int type) BUG(); } - if ( unlikely(d->exec_domain[0]->mm.shadow_mode) && - (get_shadow_status(&d->exec_domain[0]->mm, page_to_pfn(page)) & PSH_shadowed) ) + if ( unlikely(d->arch.shadow_mode) && + (get_shadow_status(d, page_to_pfn(page)) & PSH_shadowed) ) { unshadow_table(page_to_pfn(page), type); - put_shadow_status(&d->exec_domain[0]->mm); + put_shadow_status(d); } } @@ -922,12 +945,12 @@ int new_guest_cr3(unsigned long pfn) invalidate_shadow_ldt(ed); percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB; - old_base_pfn = pagetable_val(ed->mm.pagetable) >> PAGE_SHIFT; - ed->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT); + old_base_pfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT; + ed->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT); - shadow_mk_pagetable(&ed->mm); + shadow_mk_pagetable(ed); - write_ptbase(&ed->mm); + write_ptbase(ed); put_page_and_type(&frame_table[old_base_pfn]); } @@ -1038,12 +1061,12 @@ static int do_extended_command(unsigned long ptr, unsigned long val) okay = 0; MEM_LOG("Bad args to SET_LDT: ptr=%08lx, ents=%08lx", ptr, ents); } - else if ( (ed->mm.ldt_ents != ents) || - (ed->mm.ldt_base != ptr) ) + else if ( (ed->arch.ldt_ents != ents) || + (ed->arch.ldt_base != ptr) ) { invalidate_shadow_ldt(ed); - ed->mm.ldt_base = ptr; - ed->mm.ldt_ents = ents; + ed->arch.ldt_base = ptr; + ed->arch.ldt_ents = ents; load_LDT(ed); percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT; if ( ents != 0 ) @@ -1409,13 +1432,13 @@ int do_mmu_update( okay = mod_l1_entry((l1_pgentry_t *)va, mk_l1_pgentry(req.val)); - if ( unlikely(ed->mm.shadow_mode) && okay && - (get_shadow_status(&ed->mm, page-frame_table) & + if ( unlikely(d->arch.shadow_mode) && okay && + (get_shadow_status(d, page-frame_table) & PSH_shadowed) ) { shadow_l1_normal_pt_update( req.ptr, req.val, &prev_spfn, &prev_spl1e); - put_shadow_status(&ed->mm); + put_shadow_status(d); } put_page_type(page); @@ -1428,12 +1451,12 @@ int do_mmu_update( mk_l2_pgentry(req.val), pfn); - if ( unlikely(ed->mm.shadow_mode) && okay && - (get_shadow_status(&ed->mm, page-frame_table) & + if ( unlikely(d->arch.shadow_mode) && okay && + (get_shadow_status(d, page-frame_table) & PSH_shadowed) ) { shadow_l2_normal_pt_update(req.ptr, req.val); - put_shadow_status(&ed->mm); + put_shadow_status(d); } put_page_type(page); @@ -1466,9 +1489,9 @@ int do_mmu_update( * If in log-dirty mode, mark the corresponding pseudo-physical * page as dirty. */ - if ( unlikely(ed->mm.shadow_mode == SHM_logdirty) && - mark_dirty(&ed->mm, pfn) ) - ed->mm.shadow_dirty_block_count++; + if ( unlikely(d->arch.shadow_mode == SHM_logdirty) && + mark_dirty(d, pfn) ) + d->arch.shadow_dirty_block_count++; put_page(&frame_table[pfn]); break; @@ -1555,11 +1578,11 @@ int do_update_va_mapping(unsigned long page_nr, mk_l1_pgentry(val))) ) err = -EINVAL; - if ( unlikely(ed->mm.shadow_mode) ) + if ( unlikely(d->arch.shadow_mode) ) { unsigned long sval; - l1pte_propagate_from_guest(&ed->mm, &val, &sval); + l1pte_propagate_from_guest(d, &val, &sval); if ( unlikely(__put_user(sval, ((unsigned long *)( &shadow_linear_pg_table[page_nr])))) ) @@ -1576,10 +1599,10 @@ int do_update_va_mapping(unsigned long page_nr, * the PTE in the PT-holding page. We need the machine frame number * for this. */ - if ( ed->mm.shadow_mode == SHM_logdirty ) - mark_dirty(¤t->mm, va_to_l1mfn(page_nr << PAGE_SHIFT)); + if ( d->arch.shadow_mode == SHM_logdirty ) + mark_dirty(d, va_to_l1mfn(page_nr << PAGE_SHIFT)); - check_pagetable(&ed->mm, ed->mm.pagetable, "va"); /* debug */ + check_pagetable(d, ed->arch.pagetable, "va"); /* debug */ } deferred_ops = percpu_info[cpu].deferred_ops; @@ -1673,15 +1696,15 @@ void ptwr_flush(const int which) PTWR_PRINT_WHICH, ptep, pte); pte &= ~_PAGE_RW; - if ( unlikely(ed->mm.shadow_mode) ) + if ( unlikely(d->arch.shadow_mode) ) { /* Write-protect the p.t. page in the shadow page table. */ - l1pte_propagate_from_guest(&ed->mm, &pte, &spte); + l1pte_propagate_from_guest(d, &pte, &spte); __put_user( spte, (unsigned long *)&shadow_linear_pg_table[l1va>>PAGE_SHIFT]); /* Is the p.t. page itself shadowed? Map it into Xen space if so. */ - sstat = get_shadow_status(&ed->mm, pte >> PAGE_SHIFT); + sstat = get_shadow_status(d, pte >> PAGE_SHIFT); if ( sstat & PSH_shadowed ) sl1e = map_domain_mem((sstat & PSH_pfn_mask) << PAGE_SHIFT); } @@ -1730,7 +1753,7 @@ void ptwr_flush(const int which) { if ( unlikely(sl1e != NULL) ) l1pte_propagate_from_guest( - &ed->mm, &l1_pgentry_val(nl1e), + d, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i])); put_page_type(&frame_table[l1_pgentry_to_pagenr(nl1e)]); } @@ -1754,7 +1777,7 @@ void ptwr_flush(const int which) if ( unlikely(sl1e != NULL) ) l1pte_propagate_from_guest( - &ed->mm, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i])); + d, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i])); if ( unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT) ) put_page_from_l1e(ol1e, d); @@ -1765,7 +1788,7 @@ void ptwr_flush(const int which) * STEP 3. Reattach the L1 p.t. page into the current address space. */ - if ( (which == PTWR_PT_ACTIVE) && likely(!ed->mm.shadow_mode) ) + if ( (which == PTWR_PT_ACTIVE) && likely(!d->arch.shadow_mode) ) { pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx]; *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); @@ -1780,7 +1803,7 @@ void ptwr_flush(const int which) if ( unlikely(sl1e != NULL) ) { unmap_domain_mem(sl1e); - put_shadow_status(&ed->mm); + put_shadow_status(d); } } @@ -1868,7 +1891,8 @@ int ptwr_do_page_fault(unsigned long addr) ptwr_info[cpu].ptinfo[which].l2_idx = l2_idx; /* For safety, disconnect the L1 p.t. page from current space. */ - if ( (which == PTWR_PT_ACTIVE) && likely(!current->mm.shadow_mode) ) + if ( (which == PTWR_PT_ACTIVE) && + likely(!current->domain->arch.shadow_mode) ) { *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT); #if 1 @@ -2059,7 +2083,7 @@ void audit_domain(struct domain *d) synchronise_pagetables(~0UL); printk("pt base=%lx sh_info=%x\n", - pagetable_val(d->exec_domain[0]->mm.pagetable)>>PAGE_SHIFT, + pagetable_val(d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT, virt_to_page(d->shared_info)-frame_table); spin_lock(&d->page_alloc_lock); @@ -2109,7 +2133,7 @@ void audit_domain(struct domain *d) /* PHASE 1 */ - adjust(&frame_table[pagetable_val(d->exec_domain[0]->mm.pagetable)>>PAGE_SHIFT], -1, 1); + adjust(&frame_table[pagetable_val(d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], -1, 1); list_ent = d->page_list.next; for ( i = 0; (list_ent != &d->page_list); i++ ) @@ -2353,7 +2377,8 @@ void audit_domain(struct domain *d) spin_unlock(&d->page_alloc_lock); - adjust(&frame_table[pagetable_val(d->exec_domain[0]->mm.pagetable)>>PAGE_SHIFT], 1, 1); + adjust(&frame_table[pagetable_val( + d->exec_domain[0]->arch.pagetable)>>PAGE_SHIFT], 1, 1); printk("Audit %d: Done. ctot=%d ttot=%d\n", d->id, ctot, ttot ); diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index e05643be99..eef2556a76 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ #include <xen/config.h> #include <xen/init.h> @@ -308,7 +309,7 @@ void __init cpu_init(void) /* Set up GDT and IDT. */ SET_GDT_ENTRIES(current, DEFAULT_GDT_ENTRIES); SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS); - __asm__ __volatile__ ( "lgdt %0" : "=m" (*current->mm.gdt) ); + __asm__ __volatile__ ( "lgdt %0" : "=m" (*current->arch.gdt) ); __asm__ __volatile__ ( "lidt %0" : "=m" (idt_descr) ); /* No nested task. */ @@ -338,7 +339,7 @@ void __init cpu_init(void) percpu_traps_init(); /* Install correct page table. */ - write_ptbase(¤t->mm); + write_ptbase(current); init_idle_task(); } diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c index 2fc8bb4f55..1ac4109155 100644 --- a/xen/arch/x86/shadow.c +++ b/xen/arch/x86/shadow.c @@ -28,9 +28,9 @@ hypercall lock anyhow (at least initially). ********/ static inline void free_shadow_page( - struct mm_struct *m, struct pfn_info *page) + struct domain *d, struct pfn_info *page) { - m->shadow_page_count--; + d->arch.shadow_page_count--; switch ( page->u.inuse.type_info & PGT_type_mask ) { @@ -51,7 +51,7 @@ static inline void free_shadow_page( free_domheap_page(page); } -static void free_shadow_state(struct mm_struct *m) +static void free_shadow_state(struct domain *d) { int i, free = 0; struct shadow_status *x, *n; @@ -61,19 +61,19 @@ static void free_shadow_state(struct mm_struct *m) * e.g., You are expected to have paused the domain and synchronized CR3. */ - shadow_audit(m, 1); + shadow_audit(d, 1); /* Free each hash chain in turn. */ for ( i = 0; i < shadow_ht_buckets; i++ ) { /* Skip empty buckets. */ - x = &m->shadow_ht[i]; + x = &d->arch.shadow_ht[i]; if ( x->pfn == 0 ) continue; /* Free the head page. */ free_shadow_page( - m, &frame_table[x->spfn_and_flags & PSH_pfn_mask]); + d, &frame_table[x->spfn_and_flags & PSH_pfn_mask]); /* Reinitialise the head node. */ x->pfn = 0; @@ -88,7 +88,7 @@ static void free_shadow_state(struct mm_struct *m) { /* Free the shadow page. */ free_shadow_page( - m, &frame_table[x->spfn_and_flags & PSH_pfn_mask]); + d, &frame_table[x->spfn_and_flags & PSH_pfn_mask]); /* Re-initialise the chain node. */ x->pfn = 0; @@ -96,20 +96,20 @@ static void free_shadow_state(struct mm_struct *m) /* Add to the free list. */ n = x->next; - x->next = m->shadow_ht_free; - m->shadow_ht_free = x; + x->next = d->arch.shadow_ht_free; + d->arch.shadow_ht_free = x; free++; } - shadow_audit(m, 0); + shadow_audit(d, 0); } SH_LOG("Free shadow table. Freed=%d.", free); } static inline int clear_shadow_page( - struct mm_struct *m, struct shadow_status *x) + struct domain *d, struct shadow_status *x) { unsigned long *p; int restart = 0; @@ -120,7 +120,7 @@ static inline int clear_shadow_page( /* We clear L2 pages by zeroing the guest entries. */ case PGT_l2_page_table: p = map_domain_mem((spage - frame_table) << PAGE_SHIFT); - if (m->shadow_mode == SHM_full_32) + if (d->arch.shadow_mode == SHM_full_32) memset(p, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(*p)); else memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p)); @@ -129,8 +129,8 @@ static inline int clear_shadow_page( /* We clear L1 pages by freeing them: no benefit from zeroing them. */ case PGT_l1_page_table: - delete_shadow_status(m, x->pfn); - free_shadow_page(m, spage); + delete_shadow_status(d, x->pfn); + free_shadow_page(d, spage); restart = 1; /* We need to go to start of list again. */ break; } @@ -138,29 +138,29 @@ static inline int clear_shadow_page( return restart; } -static void clear_shadow_state(struct mm_struct *m) +static void clear_shadow_state(struct domain *d) { int i; struct shadow_status *x; - shadow_audit(m, 1); + shadow_audit(d, 1); for ( i = 0; i < shadow_ht_buckets; i++ ) { retry: /* Skip empty buckets. */ - x = &m->shadow_ht[i]; + x = &d->arch.shadow_ht[i]; if ( x->pfn == 0 ) continue; - if ( clear_shadow_page(m, x) ) + if ( clear_shadow_page(d, x) ) goto retry; for ( x = x->next; x != NULL; x = x->next ) - if ( clear_shadow_page(m, x) ) + if ( clear_shadow_page(d, x) ) goto retry; - shadow_audit(m, 0); + shadow_audit(d, 0); } SH_VLOG("Scan shadow table. l1=%d l2=%d", @@ -172,119 +172,118 @@ void shadow_mode_init(void) { } -int shadow_mode_enable(struct domain *p, unsigned int mode) +int shadow_mode_enable(struct domain *d, unsigned int mode) { - struct mm_struct *m = &p->exec_domain[0]->mm; - - m->shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets); - if ( m->shadow_ht == NULL ) + d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets); + if ( d->arch.shadow_ht == NULL ) goto nomem; - memset(m->shadow_ht, 0, shadow_ht_buckets * sizeof(struct shadow_status)); + memset(d->arch.shadow_ht, 0, + shadow_ht_buckets * sizeof(struct shadow_status)); if ( mode == SHM_logdirty ) { - m->shadow_dirty_bitmap_size = (p->max_pages + 63) & ~63; - m->shadow_dirty_bitmap = - xmalloc_array(unsigned long, m->shadow_dirty_bitmap_size / + d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63; + d->arch.shadow_dirty_bitmap = + xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size / (8 * sizeof(unsigned long))); - if ( m->shadow_dirty_bitmap == NULL ) + if ( d->arch.shadow_dirty_bitmap == NULL ) { - m->shadow_dirty_bitmap_size = 0; + d->arch.shadow_dirty_bitmap_size = 0; goto nomem; } - memset(m->shadow_dirty_bitmap, 0, m->shadow_dirty_bitmap_size/8); + memset(d->arch.shadow_dirty_bitmap, 0, + d->arch.shadow_dirty_bitmap_size/8); } - m->shadow_mode = mode; + d->arch.shadow_mode = mode; - __shadow_mk_pagetable(m); + __shadow_mk_pagetable(d->exec_domain[0]); /* XXX SMP */ return 0; nomem: - if ( m->shadow_ht != NULL ) - xfree( m->shadow_ht ); - m->shadow_ht = NULL; + if ( d->arch.shadow_ht != NULL ) + xfree(d->arch.shadow_ht); + d->arch.shadow_ht = NULL; return -ENOMEM; } void __shadow_mode_disable(struct domain *d) { - struct mm_struct *m = &d->exec_domain[0]->mm; struct shadow_status *x, *n; - free_shadow_state(m); - m->shadow_mode = 0; + free_shadow_state(d); + d->arch.shadow_mode = 0; SH_VLOG("freed tables count=%d l1=%d l2=%d", - m->shadow_page_count, perfc_value(shadow_l1_pages), + d->arch.shadow_page_count, perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages)); - n = m->shadow_ht_extras; + n = d->arch.shadow_ht_extras; while ( (x = n) != NULL ) { - m->shadow_extras_count--; + d->arch.shadow_extras_count--; n = *((struct shadow_status **)(&x[shadow_ht_extra_size])); xfree(x); } - m->shadow_ht_extras = NULL; - ASSERT(m->shadow_extras_count == 0); - SH_LOG("freed extras, now %d", m->shadow_extras_count); + d->arch.shadow_ht_extras = NULL; + ASSERT(d->arch.shadow_extras_count == 0); + SH_LOG("freed extras, now %d", d->arch.shadow_extras_count); - if ( m->shadow_dirty_bitmap != NULL ) + if ( d->arch.shadow_dirty_bitmap != NULL ) { - xfree(m->shadow_dirty_bitmap); - m->shadow_dirty_bitmap = 0; - m->shadow_dirty_bitmap_size = 0; + xfree(d->arch.shadow_dirty_bitmap); + d->arch.shadow_dirty_bitmap = 0; + d->arch.shadow_dirty_bitmap_size = 0; } - xfree(m->shadow_ht); - m->shadow_ht = NULL; + xfree(d->arch.shadow_ht); + d->arch.shadow_ht = NULL; } static int shadow_mode_table_op( struct domain *d, dom0_shadow_control_t *sc) { unsigned int op = sc->op; - struct mm_struct *m = &d->exec_domain[0]->mm; int i, rc = 0; - ASSERT(spin_is_locked(&m->shadow_lock)); + ASSERT(spin_is_locked(&d->arch.shadow_lock)); SH_VLOG("shadow mode table op %08lx %08lx count %d", - pagetable_val(m->pagetable), pagetable_val(m->shadow_table), - m->shadow_page_count); + pagetable_val(d->exec_domain[0]->arch.pagetable), /* XXX SMP */ + pagetable_val(d->exec_domain[0]->arch.shadow_table), /* XXX SMP */ + d->arch.shadow_page_count); - shadow_audit(m, 1); + shadow_audit(d, 1); switch ( op ) { case DOM0_SHADOW_CONTROL_OP_FLUSH: - free_shadow_state(m); + free_shadow_state(d); - m->shadow_fault_count = 0; - m->shadow_dirty_count = 0; - m->shadow_dirty_net_count = 0; - m->shadow_dirty_block_count = 0; + d->arch.shadow_fault_count = 0; + d->arch.shadow_dirty_count = 0; + d->arch.shadow_dirty_net_count = 0; + d->arch.shadow_dirty_block_count = 0; break; case DOM0_SHADOW_CONTROL_OP_CLEAN: - clear_shadow_state(m); + clear_shadow_state(d); - sc->stats.fault_count = m->shadow_fault_count; - sc->stats.dirty_count = m->shadow_dirty_count; - sc->stats.dirty_net_count = m->shadow_dirty_net_count; - sc->stats.dirty_block_count = m->shadow_dirty_block_count; + sc->stats.fault_count = d->arch.shadow_fault_count; + sc->stats.dirty_count = d->arch.shadow_dirty_count; + sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count; + sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count; - m->shadow_fault_count = 0; - m->shadow_dirty_count = 0; - m->shadow_dirty_net_count = 0; - m->shadow_dirty_block_count = 0; + d->arch.shadow_fault_count = 0; + d->arch.shadow_dirty_count = 0; + d->arch.shadow_dirty_net_count = 0; + d->arch.shadow_dirty_block_count = 0; if ( (d->max_pages > sc->pages) || (sc->dirty_bitmap == NULL) || - (m->shadow_dirty_bitmap == NULL) ) + (d->arch.shadow_dirty_bitmap == NULL) ) { rc = -EINVAL; break; @@ -300,34 +299,35 @@ static int shadow_mode_table_op( if (copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))), - m->shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), + d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), bytes)) { // copy_to_user can fail when copying to guest app memory. // app should zero buffer after mallocing, and pin it rc = -EINVAL; memset( - m->shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))), + d->arch.shadow_dirty_bitmap + + (i/(8*sizeof(unsigned long))), 0, (d->max_pages/8) - (i/(8*sizeof(unsigned long)))); break; } memset( - m->shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))), + d->arch.shadow_dirty_bitmap + (i/(8*sizeof(unsigned long))), 0, bytes); } break; case DOM0_SHADOW_CONTROL_OP_PEEK: - sc->stats.fault_count = m->shadow_fault_count; - sc->stats.dirty_count = m->shadow_dirty_count; - sc->stats.dirty_net_count = m->shadow_dirty_net_count; - sc->stats.dirty_block_count = m->shadow_dirty_block_count; + sc->stats.fault_count = d->arch.shadow_fault_count; + sc->stats.dirty_count = d->arch.shadow_dirty_count; + sc->stats.dirty_net_count = d->arch.shadow_dirty_net_count; + sc->stats.dirty_block_count = d->arch.shadow_dirty_block_count; if ( (d->max_pages > sc->pages) || (sc->dirty_bitmap == NULL) || - (m->shadow_dirty_bitmap == NULL) ) + (d->arch.shadow_dirty_bitmap == NULL) ) { rc = -EINVAL; break; @@ -335,7 +335,7 @@ static int shadow_mode_table_op( sc->pages = d->max_pages; if (copy_to_user( - sc->dirty_bitmap, m->shadow_dirty_bitmap, (d->max_pages+7)/8)) + sc->dirty_bitmap, d->arch.shadow_dirty_bitmap, (d->max_pages+7)/8)) { rc = -EINVAL; break; @@ -348,9 +348,9 @@ static int shadow_mode_table_op( break; } - SH_VLOG("shadow mode table op : page count %d", m->shadow_page_count); - shadow_audit(m, 1); - __shadow_mk_pagetable(m); + SH_VLOG("shadow mode table op : page count %d", d->arch.shadow_page_count); + shadow_audit(d, 1); + __shadow_mk_pagetable(d->exec_domain[0]); /* XXX SMP */ return rc; } @@ -368,7 +368,7 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc) domain_pause(d); synchronise_pagetables(~0UL); - shadow_lock(&d->exec_domain[0]->mm); + shadow_lock(d); switch ( op ) { @@ -387,27 +387,27 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc) break; default: - rc = shadow_mode(d->exec_domain[0]) ? shadow_mode_table_op(d, sc) : -EINVAL; + rc = shadow_mode(d) ? shadow_mode_table_op(d, sc) : -EINVAL; break; } - shadow_unlock(&d->exec_domain[0]->mm); + shadow_unlock(d); domain_unpause(d); return rc; } -static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m) +static inline struct pfn_info *alloc_shadow_page(struct domain *d) { struct pfn_info *page = alloc_domheap_page(NULL); - m->shadow_page_count++; + d->arch.shadow_page_count++; if ( unlikely(page == NULL) ) { printk("Couldn't alloc shadow page! count=%d\n", - m->shadow_page_count); + d->arch.shadow_page_count); SH_VLOG("Shadow tables l1=%d l2=%d", perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages)); @@ -431,35 +431,35 @@ void unshadow_table(unsigned long gpfn, unsigned int type) * guests there won't be a race here as this CPU was the one that * cmpxchg'ed the page to invalid. */ - spfn = __shadow_status(&d->exec_domain[0]->mm, gpfn) & PSH_pfn_mask; - delete_shadow_status(&d->exec_domain[0]->mm, gpfn); - free_shadow_page(&d->exec_domain[0]->mm, &frame_table[spfn]); + spfn = __shadow_status(d, gpfn) & PSH_pfn_mask; + delete_shadow_status(d, gpfn); + free_shadow_page(d, &frame_table[spfn]); } #ifdef CONFIG_VMX -void vmx_shadow_clear_state(struct mm_struct *m) +void vmx_shadow_clear_state(struct domain *d) { SH_VVLOG("vmx_clear_shadow_state: \n"); - clear_shadow_state(m); + clear_shadow_state(d); } #endif unsigned long shadow_l2_table( - struct mm_struct *m, unsigned long gpfn) + struct domain *d, unsigned long gpfn) { struct pfn_info *spfn_info; unsigned long spfn; l2_pgentry_t *spl2e = 0; unsigned long guest_gpfn; - __get_machine_to_phys(m, guest_gpfn, gpfn); + __get_machine_to_phys(d, guest_gpfn, gpfn); SH_VVLOG("shadow_l2_table( %08lx )", gpfn); perfc_incrc(shadow_l2_table_count); - if ( (spfn_info = alloc_shadow_page(m)) == NULL ) + if ( (spfn_info = alloc_shadow_page(d)) == NULL ) BUG(); /* XXX Deal gracefully with failure. */ spfn_info->u.inuse.type_info = PGT_l2_page_table; @@ -467,13 +467,13 @@ unsigned long shadow_l2_table( spfn = spfn_info - frame_table; /* Mark pfn as being shadowed; update field to point at shadow. */ - set_shadow_status(m, guest_gpfn, spfn | PSH_shadowed); + set_shadow_status(d, guest_gpfn, spfn | PSH_shadowed); #ifdef __i386__ /* Install hypervisor and 2x linear p.t. mapings. */ - if ( m->shadow_mode == SHM_full_32 ) + if ( d->arch.shadow_mode == SHM_full_32 ) { - vmx_update_shadow_state(m, gpfn, spfn); + vmx_update_shadow_state(d->exec_domain[0], gpfn, spfn); } else { @@ -494,12 +494,12 @@ unsigned long shadow_l2_table( spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = - mk_l2_pgentry(__pa(page_get_owner(&frame_table[gpfn])->mm_perdomain_pt) | + mk_l2_pgentry(__pa(page_get_owner(&frame_table[gpfn])->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR); } #endif - if ( m->shadow_mode != SHM_full_32 ) + if ( d->arch.shadow_mode != SHM_full_32 ) unmap_domain_mem(spl2e); SH_VLOG("shadow_l2_table( %08lx -> %08lx)", gpfn, spfn); @@ -508,22 +508,23 @@ unsigned long shadow_l2_table( static void shadow_map_l1_into_current_l2(unsigned long va) { - struct mm_struct *m = ¤t->mm; + struct exec_domain *ed = current; + struct domain *d = ed->domain; unsigned long *gpl1e, *spl1e, gpl2e, spl2e, gl1pfn, sl1pfn=0, sl1ss; struct pfn_info *sl1pfn_info; int i; - __guest_get_pl2e(m, va, &gpl2e); + __guest_get_pl2e(ed, va, &gpl2e); gl1pfn = gpl2e >> PAGE_SHIFT; - sl1ss = __shadow_status(m, gl1pfn); + sl1ss = __shadow_status(d, gl1pfn); if ( !(sl1ss & PSH_shadowed) ) { /* This L1 is NOT already shadowed so we need to shadow it. */ SH_VVLOG("4a: l1 not shadowed ( %08lx )", sl1pfn); - sl1pfn_info = alloc_shadow_page(m); + sl1pfn_info = alloc_shadow_page(d); sl1pfn_info->u.inuse.type_info = PGT_l1_page_table; sl1pfn = sl1pfn_info - frame_table; @@ -531,12 +532,12 @@ static void shadow_map_l1_into_current_l2(unsigned long va) perfc_incrc(shadow_l1_table_count); perfc_incr(shadow_l1_pages); - set_shadow_status(m, gl1pfn, PSH_shadowed | sl1pfn); + set_shadow_status(d, gl1pfn, PSH_shadowed | sl1pfn); - l2pde_general(m, &gpl2e, &spl2e, sl1pfn); + l2pde_general(d, &gpl2e, &spl2e, sl1pfn); - __guest_set_pl2e(m, va, gpl2e); - __shadow_set_pl2e(m, va, spl2e); + __guest_set_pl2e(ed, va, gpl2e); + __shadow_set_pl2e(ed, va, spl2e); gpl1e = (unsigned long *) &(linear_pg_table[ (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]); @@ -545,7 +546,7 @@ static void shadow_map_l1_into_current_l2(unsigned long va) (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]); for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) - l1pte_propagate_from_guest(m, &gpl1e[i], &spl1e[i]); + l1pte_propagate_from_guest(d, &gpl1e[i], &spl1e[i]); } else { @@ -553,20 +554,20 @@ static void shadow_map_l1_into_current_l2(unsigned long va) SH_VVLOG("4b: was shadowed, l2 missing ( %08lx )", sl1pfn); sl1pfn = sl1ss & PSH_pfn_mask; - l2pde_general(m, &gpl2e, &spl2e, sl1pfn); - __guest_set_pl2e(m, va, gpl2e); - __shadow_set_pl2e(m, va, spl2e); + l2pde_general(d, &gpl2e, &spl2e, sl1pfn); + __guest_set_pl2e(ed, va, gpl2e); + __shadow_set_pl2e(ed, va, spl2e); } } #ifdef CONFIG_VMX -void vmx_shadow_invlpg(struct mm_struct *m, unsigned long va) +void vmx_shadow_invlpg(struct domain *d, unsigned long va) { unsigned long gpte, spte, host_pfn; if (__put_user(0L, (unsigned long *) &shadow_linear_pg_table[va >> PAGE_SHIFT])) { - vmx_shadow_clear_state(m); + vmx_shadow_clear_state(d); return; } @@ -588,11 +589,12 @@ void vmx_shadow_invlpg(struct mm_struct *m, unsigned long va) int shadow_fault(unsigned long va, long error_code) { unsigned long gpte, spte; - struct mm_struct *m = ¤t->mm; + struct exec_domain *ed = current; + struct domain *d = ed->domain; SH_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code ); - check_pagetable(m, current->mm.pagetable, "pre-sf"); + check_pagetable(d, ed->arch.pagetable, "pre-sf"); /* * STEP 1. A fast-reject set of checks with no locking. @@ -621,20 +623,20 @@ int shadow_fault(unsigned long va, long error_code) * STEP 2. Take the shadow lock and re-check the guest PTE. */ - shadow_lock(m); + shadow_lock(d); if ( unlikely(__get_user(gpte, (unsigned long *) &linear_pg_table[va >> PAGE_SHIFT])) ) { SH_VVLOG("shadow_fault - EXIT: read gpte faulted" ); - shadow_unlock(m); + shadow_unlock(d); return 0; } if ( unlikely(!(gpte & _PAGE_PRESENT)) ) { SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte ); - shadow_unlock(m); + shadow_unlock(d); return 0; } @@ -645,15 +647,15 @@ int shadow_fault(unsigned long va, long error_code) { /* Write fault on a read-only mapping. */ SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)", gpte); - shadow_unlock(m); + shadow_unlock(d); return 0; } - l1pte_write_fault(m, &gpte, &spte); + l1pte_write_fault(d, &gpte, &spte); } else { - l1pte_read_fault(m, &gpte, &spte); + l1pte_read_fault(d, &gpte, &spte); } /* @@ -678,11 +680,11 @@ int shadow_fault(unsigned long va, long error_code) } perfc_incrc(shadow_fixup_count); - m->shadow_fault_count++; + d->arch.shadow_fault_count++; - shadow_unlock(m); + shadow_unlock(d); - check_pagetable(m, current->mm.pagetable, "post-sf"); + check_pagetable(d, ed->arch.pagetable, "post-sf"); return EXCRET_fault_fixed; } @@ -700,7 +702,7 @@ void shadow_l1_normal_pt_update( "prev_spfn=%08lx, prev_spl1e=%p\n", pa, gpte, prev_spfn, prev_spl1e); - spfn = __shadow_status(¤t->mm, pa >> PAGE_SHIFT) & PSH_pfn_mask; + spfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask; if ( spfn == prev_spfn ) { @@ -715,7 +717,7 @@ void shadow_l1_normal_pt_update( *prev_spl1e_ptr = spl1e; } - l1pte_propagate_from_guest(¤t->mm, &gpte, &spte); + l1pte_propagate_from_guest(current->domain, &gpte, &spte); spl1e[(pa & ~PAGE_MASK) / sizeof(l1_pgentry_t)] = mk_l1_pgentry(spte); } @@ -728,13 +730,13 @@ void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte) /* N.B. To get here, we know the l2 page *must* be shadowed. */ SH_VVLOG("shadow_l2_normal_pt_update pa=%08lx, gpte=%08lx",pa,gpte); - spfn = __shadow_status(¤t->mm, pa >> PAGE_SHIFT) & PSH_pfn_mask; + spfn = __shadow_status(current->domain, pa >> PAGE_SHIFT) & PSH_pfn_mask; s_sh = (gpte & _PAGE_PRESENT) ? - __shadow_status(¤t->mm, gpte >> PAGE_SHIFT) : 0; + __shadow_status(current->domain, gpte >> PAGE_SHIFT) : 0; /* XXXX Should mark guest pte as DIRTY and ACCESSED too! */ - l2pde_general(¤t->mm, &gpte, &spte, s_sh); + l2pde_general(current->domain, &gpte, &spte, s_sh); spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT); spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)] = mk_l2_pgentry(spte); unmap_domain_mem(spl2e); @@ -761,13 +763,11 @@ char * sh_check_name; } while ( 0 ) static int check_pte( - struct mm_struct *m, unsigned long gpte, unsigned long spte, + struct domain *d, unsigned long gpte, unsigned long spte, int level, int i) { unsigned long mask, gpfn, spfn; -#ifdef CONFIG_VMX unsigned long guest_gpfn; -#endif if ( (spte == 0) || (spte == 0xdeadface) || (spte == 0x00000E00) ) return 1; /* always safe */ @@ -811,18 +811,18 @@ static int check_pte( if ( level < 2 ) FAIL("Shadow in L1 entry?"); - if (m->shadow_mode == SHM_full_32) { + if (d->arch.shadow_mode == SHM_full_32) { guest_gpfn = phys_to_machine_mapping[gpfn]; - if ( __shadow_status(m, guest_gpfn) != (PSH_shadowed | spfn) ) + if ( __shadow_status(d, guest_gpfn) != (PSH_shadowed | spfn) ) FAIL("spfn problem g.sf=%08lx", - __shadow_status(m, guest_gpfn) ); + __shadow_status(d, guest_gpfn) ); } else { - if ( __shadow_status(m, gpfn) != (PSH_shadowed | spfn) ) + if ( __shadow_status(d, gpfn) != (PSH_shadowed | spfn) ) FAIL("spfn problem g.sf=%08lx", - __shadow_status(m, gpfn) ); + __shadow_status(d, gpfn) ); } } @@ -832,7 +832,7 @@ static int check_pte( static int check_l1_table( - struct mm_struct *m, unsigned long va, + struct domain *d, unsigned long va, unsigned long g2, unsigned long s2) { int i; @@ -842,7 +842,7 @@ static int check_l1_table( spl1e = map_domain_mem(s2 << PAGE_SHIFT); for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) - check_pte(m, gpl1e[i], spl1e[i], 1, i); + check_pte(d, gpl1e[i], spl1e[i], 1, i); unmap_domain_mem(spl1e); unmap_domain_mem(gpl1e); @@ -856,11 +856,11 @@ static int check_l1_table( BUG(); \ } while ( 0 ) -int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s) +int check_pagetable(struct domain *d, pagetable_t pt, char *s) { unsigned long gptbase = pagetable_val(pt); unsigned long gpfn, spfn; - int i; + unsigned long i; l2_pgentry_t *gpl2e, *spl2e; unsigned long host_gpfn = 0; @@ -872,22 +872,22 @@ int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s) gpfn = gptbase >> PAGE_SHIFT; - __get_phys_to_machine(m, host_gpfn, gpfn); + __get_phys_to_machine(d, host_gpfn, gpfn); - if ( ! (__shadow_status(m, gpfn) & PSH_shadowed) ) + if ( ! (__shadow_status(d, gpfn) & PSH_shadowed) ) { printk("%s-PT %08lx not shadowed\n", s, gptbase); - if( __shadow_status(m, gpfn) != 0 ) BUG(); + if( __shadow_status(d, gpfn) != 0 ) BUG(); return 0; } - spfn = __shadow_status(m, gpfn) & PSH_pfn_mask; + spfn = __shadow_status(d, gpfn) & PSH_pfn_mask; - if ( ! __shadow_status(m, gpfn) == (PSH_shadowed | spfn) ) + if ( ! __shadow_status(d, gpfn) == (PSH_shadowed | spfn) ) FAILPT("ptbase shadow inconsistent1"); - if (m->shadow_mode == SHM_full_32) + if (d->arch.shadow_mode == SHM_full_32) { host_gpfn = phys_to_machine_mapping[gpfn]; gpl2e = (l2_pgentry_t *) map_domain_mem( host_gpfn << PAGE_SHIFT ); @@ -922,23 +922,23 @@ int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s) L2_PAGETABLE_SHIFT]), (spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); - if (m->shadow_mode != SHM_full_32) { + if (d->arch.shadow_mode != SHM_full_32) { if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) != - ((__pa(page_get_owner(&frame_table[gpfn])->mm.perdomain_pt) | + ((__pa(page_get_owner(&frame_table[gpfn])->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR))) ) FAILPT("hypervisor per-domain map inconsistent"); } /* Check the whole L2. */ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ ) - check_pte(m, l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]), 2, i); + check_pte(d, l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]), 2, i); /* Go back and recurse. */ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ ) { if ( l2_pgentry_val(spl2e[i]) != 0 ) check_l1_table( - m, i << L2_PAGETABLE_SHIFT, + d, i << L2_PAGETABLE_SHIFT, l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT, l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT); } diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 399fd3b7ec..55d8917725 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * x86 SMP booting functions * @@ -662,7 +663,7 @@ static void __init do_boot_cpu (int apicid) set_bit(DF_IDLETASK, &idle->d_flags); - ed->mm.pagetable = mk_pagetable(__pa(idle_pg_table)); + ed->arch.pagetable = mk_pagetable(__pa(idle_pg_table)); map_cpu_to_boot_apicid(cpu, apicid); diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 55c767bdca..9cd16fdb27 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /****************************************************************************** * arch/x86/traps.c * @@ -139,7 +140,7 @@ static inline int do_trap(int trapnr, char *str, int use_error_code) { struct exec_domain *ed = current; - struct trap_bounce *tb = &ed->thread.trap_bounce; + struct trap_bounce *tb = &ed->arch.trap_bounce; trap_info_t *ti; unsigned long fixup; @@ -148,7 +149,7 @@ static inline int do_trap(int trapnr, char *str, if ( !GUEST_FAULT(regs) ) goto xen_fault; - ti = current->thread.traps + trapnr; + ti = current->arch.traps + trapnr; tb->flags = TBF_EXCEPTION; tb->cs = ti->cs; tb->eip = ti->address; @@ -206,7 +207,7 @@ DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error) asmlinkage int do_int3(struct xen_regs *regs) { struct exec_domain *ed = current; - struct trap_bounce *tb = &ed->thread.trap_bounce; + struct trap_bounce *tb = &ed->arch.trap_bounce; trap_info_t *ti; DEBUGGER_trap_entry(TRAP_int3, regs); @@ -218,7 +219,7 @@ asmlinkage int do_int3(struct xen_regs *regs) panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id()); } - ti = current->thread.traps + 3; + ti = current->arch.traps + 3; tb->flags = TBF_EXCEPTION; tb->cs = ti->cs; tb->eip = ti->address; @@ -237,9 +238,9 @@ void propagate_page_fault(unsigned long addr, u16 error_code) { trap_info_t *ti; struct exec_domain *ed = current; - struct trap_bounce *tb = &ed->thread.trap_bounce; + struct trap_bounce *tb = &ed->arch.trap_bounce; - ti = ed->thread.traps + 14; + ti = ed->arch.traps + 14; tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2; tb->cr2 = addr; tb->error_code = error_code; @@ -248,7 +249,7 @@ void propagate_page_fault(unsigned long addr, u16 error_code) if ( TI_GET_IF(ti) ) ed->vcpu_info->evtchn_upcall_mask = 1; - ed->mm.guest_cr2 = addr; + ed->arch.guest_cr2 = addr; } asmlinkage int do_page_fault(struct xen_regs *regs) @@ -282,7 +283,7 @@ asmlinkage int do_page_fault(struct xen_regs *regs) ((regs->error_code & 3) == 3) && /* write-protection fault */ ptwr_do_page_fault(addr) ) { - if ( unlikely(ed->mm.shadow_mode) ) + if ( unlikely(d->arch.shadow_mode) ) (void)shadow_fault(addr, regs->error_code); UNLOCK_BIGLOCK(d); return EXCRET_fault_fixed; @@ -290,12 +291,12 @@ asmlinkage int do_page_fault(struct xen_regs *regs) UNLOCK_BIGLOCK(d); } - if ( unlikely(ed->mm.shadow_mode) && + if ( unlikely(d->arch.shadow_mode) && (addr < PAGE_OFFSET) && shadow_fault(addr, regs->error_code) ) return EXCRET_fault_fixed; if ( unlikely(addr >= LDT_VIRT_START(ed)) && - (addr < (LDT_VIRT_START(ed) + (ed->mm.ldt_ents*LDT_ENTRY_SIZE))) ) + (addr < (LDT_VIRT_START(ed) + (ed->arch.ldt_ents*LDT_ENTRY_SIZE))) ) { /* * Copy a mapping from the guest's LDT, if it is valid. Otherwise we @@ -303,7 +304,7 @@ asmlinkage int do_page_fault(struct xen_regs *regs) */ LOCK_BIGLOCK(d); off = addr - LDT_VIRT_START(ed); - addr = ed->mm.ldt_base + off; + addr = ed->arch.ldt_base + off; ret = map_ldt_shadow_page(off >> PAGE_SHIFT); UNLOCK_BIGLOCK(d); if ( likely(ret) ) @@ -321,7 +322,7 @@ asmlinkage int do_page_fault(struct xen_regs *regs) if ( likely((fixup = search_exception_table(regs->eip)) != 0) ) { perfc_incrc(copy_user_faults); - if ( !ed->mm.shadow_mode ) + if ( !d->arch.shadow_mode ) DPRINTK("Page fault: %p -> %p\n", regs->eip, fixup); regs->eip = fixup; return 0; @@ -388,11 +389,11 @@ static int emulate_privileged_op(struct xen_regs *regs) break; case 2: /* Read CR2 */ - *reg = ed->mm.guest_cr2; + *reg = ed->arch.guest_cr2; break; case 3: /* Read CR3 */ - *reg = pagetable_val(ed->mm.pagetable); + *reg = pagetable_val(ed->arch.pagetable); break; default: @@ -415,7 +416,7 @@ static int emulate_privileged_op(struct xen_regs *regs) break; case 2: /* Write CR2 */ - ed->mm.guest_cr2 = *reg; + ed->arch.guest_cr2 = *reg; break; case 3: /* Write CR3 */ @@ -465,7 +466,7 @@ static int emulate_privileged_op(struct xen_regs *regs) asmlinkage int do_general_protection(struct xen_regs *regs) { struct exec_domain *ed = current; - struct trap_bounce *tb = &ed->thread.trap_bounce; + struct trap_bounce *tb = &ed->arch.trap_bounce; trap_info_t *ti; unsigned long fixup; @@ -500,7 +501,7 @@ asmlinkage int do_general_protection(struct xen_regs *regs) if ( (regs->error_code & 3) == 2 ) { /* This fault must be due to <INT n> instruction. */ - ti = current->thread.traps + (regs->error_code>>3); + ti = current->arch.traps + (regs->error_code>>3); if ( TI_GET_DPL(ti) >= (VM86_MODE(regs) ? 3 : (regs->cs & 3)) ) { tb->flags = TBF_EXCEPTION; @@ -523,7 +524,7 @@ asmlinkage int do_general_protection(struct xen_regs *regs) #endif /* Pass on GPF as is. */ - ti = current->thread.traps + 13; + ti = current->arch.traps + 13; tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE; tb->error_code = regs->error_code; finish_propagation: @@ -615,10 +616,10 @@ asmlinkage int math_state_restore(struct xen_regs *regs) if ( test_and_clear_bit(EDF_GUEST_STTS, ¤t->ed_flags) ) { - struct trap_bounce *tb = ¤t->thread.trap_bounce; + struct trap_bounce *tb = ¤t->arch.trap_bounce; tb->flags = TBF_EXCEPTION; - tb->cs = current->thread.traps[7].cs; - tb->eip = current->thread.traps[7].address; + tb->cs = current->arch.traps[7].cs; + tb->eip = current->arch.traps[7].address; } return EXCRET_fault_fixed; @@ -628,7 +629,7 @@ asmlinkage int do_debug(struct xen_regs *regs) { unsigned long condition; struct exec_domain *d = current; - struct trap_bounce *tb = &d->thread.trap_bounce; + struct trap_bounce *tb = &d->arch.trap_bounce; DEBUGGER_trap_entry(TRAP_debug, regs); @@ -636,7 +637,7 @@ asmlinkage int do_debug(struct xen_regs *regs) /* Mask out spurious debug traps due to lazy DR7 setting */ if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) && - (d->thread.debugreg[7] == 0) ) + (d->arch.debugreg[7] == 0) ) { __asm__("mov %0,%%db7" : : "r" (0UL)); goto out; @@ -656,11 +657,11 @@ asmlinkage int do_debug(struct xen_regs *regs) } /* Save debug status register where guest OS can peek at it */ - d->thread.debugreg[6] = condition; + d->arch.debugreg[6] = condition; tb->flags = TBF_EXCEPTION; - tb->cs = d->thread.traps[1].cs; - tb->eip = d->thread.traps[1].address; + tb->cs = d->arch.traps[1].cs; + tb->eip = d->arch.traps[1].address; out: return EXCRET_not_a_fault; @@ -759,7 +760,7 @@ void __init trap_init(void) long do_set_trap_table(trap_info_t *traps) { trap_info_t cur; - trap_info_t *dst = current->thread.traps; + trap_info_t *dst = current->arch.traps; LOCK_BIGLOCK(current->domain); @@ -798,10 +799,10 @@ long do_set_callbacks(unsigned long event_selector, if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) ) return -EPERM; - d->thread.event_selector = event_selector; - d->thread.event_address = event_address; - d->thread.failsafe_selector = failsafe_selector; - d->thread.failsafe_address = failsafe_address; + d->arch.event_selector = event_selector; + d->arch.event_address = event_address; + d->arch.failsafe_selector = failsafe_selector; + d->arch.failsafe_address = failsafe_address; return 0; } @@ -876,7 +877,7 @@ long set_debugreg(struct exec_domain *p, int reg, unsigned long value) return -EINVAL; } - p->thread.debugreg[reg] = value; + p->arch.debugreg[reg] = value; return 0; } @@ -888,5 +889,5 @@ long do_set_debugreg(int reg, unsigned long value) unsigned long do_get_debugreg(int reg) { if ( (reg < 0) || (reg > 7) ) return -EINVAL; - return current->thread.debugreg[reg]; + return current->arch.debugreg[reg]; } diff --git a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c index 66e38f88e6..67c08d073f 100644 --- a/xen/arch/x86/vmx.c +++ b/xen/arch/x86/vmx.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * vmx.c: handling VMX architecture-related VM exits * Copyright (c) 2004, Intel Corporation. @@ -110,7 +111,6 @@ static int vmx_do_page_fault(unsigned long va, unsigned long error_code) unsigned long gpde = 0, gpte, gpa; int result; struct exec_domain *ed = current; - struct mm_struct *m = &ed->mm; #if VMX_DEBUG { @@ -123,18 +123,18 @@ static int vmx_do_page_fault(unsigned long va, unsigned long error_code) /* * Set up guest page directory cache to make linear_pt_table[] work. */ - __guest_get_pl2e(m, va, &gpde); + __guest_get_pl2e(ed, va, &gpde); if (!(gpde & _PAGE_PRESENT)) return 0; index = (va >> L2_PAGETABLE_SHIFT); - if (!l2_pgentry_val(m->guest_pl2e_cache[index])) { + if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) { pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT]; VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_do_page_fault: pagetable = %lx\n", - pagetable_val(m->pagetable)); + pagetable_val(ed->arch.pagetable)); - m->guest_pl2e_cache[index] = + ed->arch.guest_pl2e_cache[index] = mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); } @@ -246,18 +246,18 @@ static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *re case TYPE_MOV_TO_DR: /* don't need to check the range */ if (reg != REG_ESP) - ed->thread.debugreg[reg] = *reg_p; + ed->arch.debugreg[reg] = *reg_p; else { unsigned long value; __vmread(GUEST_ESP, &value); - ed->thread.debugreg[reg] = value; + ed->arch.debugreg[reg] = value; } break; case TYPE_MOV_FROM_DR: if (reg != REG_ESP) - *reg_p = ed->thread.debugreg[reg]; + *reg_p = ed->arch.debugreg[reg]; else { - __vmwrite(GUEST_ESP, ed->thread.debugreg[reg]); + __vmwrite(GUEST_ESP, ed->arch.debugreg[reg]); } break; } @@ -270,7 +270,7 @@ static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *re static void vmx_vmexit_do_invlpg(unsigned long va) { unsigned long eip; - struct exec_domain *d = current; + struct exec_domain *ed = current; unsigned int index; __vmread(GUEST_EIP, &eip); @@ -282,31 +282,31 @@ static void vmx_vmexit_do_invlpg(unsigned long va) * We do the safest things first, then try to update the shadow * copying from guest */ - vmx_shadow_invlpg(&d->mm, va); + vmx_shadow_invlpg(ed->domain, va); index = (va >> L2_PAGETABLE_SHIFT); - d->mm.guest_pl2e_cache[index] = mk_l2_pgentry(0); /* invalidate pgd cache */ + ed->arch.guest_pl2e_cache[index] = + mk_l2_pgentry(0); /* invalidate pgd cache */ } -static inline void guest_pl2e_cache_invalidate(struct mm_struct *m) +static inline void guest_pl2e_cache_invalidate(struct exec_domain *ed) { /* * Need to optimize this */ - memset(m->guest_pl2e_cache, 0, PAGE_SIZE); + memset(ed->arch.guest_pl2e_cache, 0, PAGE_SIZE); } inline unsigned long gva_to_gpa(unsigned long gva) { unsigned long gpde, gpte, pfn, index; - struct exec_domain *d = current; - struct mm_struct *m = &d->mm; + struct exec_domain *ed = current; - __guest_get_pl2e(m, gva, &gpde); + __guest_get_pl2e(ed, gva, &gpde); index = (gva >> L2_PAGETABLE_SHIFT); pfn = phys_to_machine_mapping[gpde >> PAGE_SHIFT]; - m->guest_pl2e_cache[index] = + ed->arch.guest_pl2e_cache[index] = mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); if ( unlikely(__get_user(gpte, (unsigned long *) @@ -350,14 +350,14 @@ static void vmx_io_instruction(struct xen_regs *regs, return; } - vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va; + vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va; if (vio == 0) { VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx\n", (unsigned long) vio); domain_crash(); } p = &vio->vp_ioreq; p->dir = test_bit(3, &exit_qualification); - set_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags); + set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags); p->pdata_valid = 0; p->count = 1; @@ -443,40 +443,40 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs) __vmwrite(CR0_READ_SHADOW, value); if (value & (X86_CR0_PE | X86_CR0_PG) && - !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state)) { + !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) { /* * Enable paging */ - set_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state); + set_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state); /* * The guest CR3 must be pointing to the guest physical. */ if (!(pfn = phys_to_machine_mapping[ - d->thread.arch_vmx.cpu_cr3 >> PAGE_SHIFT])) + d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT])) { VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx\n", - d->thread.arch_vmx.cpu_cr3); + d->arch.arch_vmx.cpu_cr3); domain_crash(); /* need to take a clean path */ } - old_base_pfn = pagetable_val(d->mm.pagetable) >> PAGE_SHIFT; + old_base_pfn = pagetable_val(d->arch.pagetable) >> PAGE_SHIFT; /* * Now mm.pagetable points to machine physical. */ - d->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT); + d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT); VMX_DBG_LOG(DBG_LEVEL_VMMU, "New mm.pagetable = %lx\n", (unsigned long) (pfn << PAGE_SHIFT)); - shadow_lock(&d->mm); + shadow_lock(d->domain); shadow_mode_enable(d->domain, SHM_full_32); - shadow_unlock(&d->mm); + shadow_unlock(d->domain); - __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table)); + __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table)); /* * mm->shadow_table should hold the next CR3 for shadow */ VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, pfn = %lx\n", - d->thread.arch_vmx.cpu_cr3, pfn); + d->arch.arch_vmx.cpu_cr3, pfn); put_page_and_type(&frame_table[old_base_pfn]); } @@ -489,26 +489,26 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs) /* * If paging is not enabled yet, simply copy the valut to CR3. */ - if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state)) { - d->thread.arch_vmx.cpu_cr3 = value; + if (!test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) { + d->arch.arch_vmx.cpu_cr3 = value; return; } - guest_pl2e_cache_invalidate(&d->mm); + guest_pl2e_cache_invalidate(d); /* * We make a new one if the shadow does not exist. */ - if (value == d->thread.arch_vmx.cpu_cr3) { + if (value == d->arch.arch_vmx.cpu_cr3) { /* * This is simple TLB flush, implying the guest has * removed some translation or changed page attributes. * We simply invalidate the shadow. */ pfn = phys_to_machine_mapping[value >> PAGE_SHIFT]; - if ((pfn << PAGE_SHIFT) != pagetable_val(d->mm.pagetable)) + if ((pfn << PAGE_SHIFT) != pagetable_val(d->arch.pagetable)) __vmx_bug(regs); - vmx_shadow_clear_state(&d->mm); - shadow_invalidate(&d->mm); + vmx_shadow_clear_state(d->domain); + shadow_invalidate(d); } else { /* * If different, make a shadow. Check if the PDBR is valid @@ -522,16 +522,16 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs) domain_crash(); /* need to take a clean path */ } pfn = phys_to_machine_mapping[value >> PAGE_SHIFT]; - vmx_shadow_clear_state(&d->mm); - d->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT); - shadow_mk_pagetable(&d->mm); + vmx_shadow_clear_state(d->domain); + d->arch.pagetable = mk_pagetable(pfn << PAGE_SHIFT); + shadow_mk_pagetable(d); /* * mm->shadow_table should hold the next CR3 for shadow */ - d->thread.arch_vmx.cpu_cr3 = value; + d->arch.arch_vmx.cpu_cr3 = value; VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx\n", value); - __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table)); + __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table)); } break; } @@ -549,9 +549,9 @@ static void mov_to_cr(int gp, int cr, struct xen_regs *regs) * all TLB entries except global entries. */ if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) { - vmx_shadow_clear_state(&d->mm); - shadow_invalidate(&d->mm); - guest_pl2e_cache_invalidate(&d->mm); + vmx_shadow_clear_state(d->domain); + shadow_invalidate(d); + guest_pl2e_cache_invalidate(d); } break; default: @@ -576,7 +576,7 @@ static void mov_from_cr(int cr, int gp, struct xen_regs *regs) if (cr != 3) __vmx_bug(regs); - value = (unsigned long) d->thread.arch_vmx.cpu_cr3; + value = (unsigned long) d->arch.arch_vmx.cpu_cr3; ASSERT(value); switch (gp) { @@ -799,7 +799,7 @@ asmlinkage void vmx_vmexit_handler(struct xen_regs regs) "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx\n", regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi, regs.edi); - d->thread.arch_vmx.vmx_platform.mpci.inst_decoder_regs = ®s; + d->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = ®s; if (!(error = vmx_do_page_fault(va, error_code))) { /* @@ -813,7 +813,7 @@ asmlinkage void vmx_vmexit_handler(struct xen_regs regs) VECTOR_PG); __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields); __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); - d->thread.arch_vmx.cpu_cr2 = va; + d->arch.arch_vmx.cpu_cr2 = va; } break; } @@ -935,5 +935,5 @@ asmlinkage void load_cr2(void) struct exec_domain *d = current; local_irq_disable(); - asm volatile("movl %0,%%cr2": :"r" (d->thread.arch_vmx.cpu_cr2)); + asm volatile("movl %0,%%cr2": :"r" (d->arch.arch_vmx.cpu_cr2)); } diff --git a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c index 652bf3613d..d3585c2fc5 100644 --- a/xen/arch/x86/vmx_io.c +++ b/xen/arch/x86/vmx_io.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * vmx_io.c: handling I/O, interrupts related VMX entry/exit * Copyright (c) 2004, Intel Corporation. @@ -178,7 +179,7 @@ void vmx_io_assist(struct exec_domain *ed) struct mi_per_cpu_info *mpci_p; struct xen_regs *inst_decoder_regs; - mpci_p = &ed->thread.arch_vmx.vmx_platform.mpci; + mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci; inst_decoder_regs = mpci_p->inst_decoder_regs; /* clear the pending event */ @@ -187,7 +188,7 @@ void vmx_io_assist(struct exec_domain *ed) clear_bit(IOPACKET_PORT>>5, &ed->vcpu_info->evtchn_pending_sel); clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_pending[0]); - vio = (vcpu_iodata_t *) ed->thread.arch_vmx.vmx_platform.shared_page_va; + vio = (vcpu_iodata_t *) ed->arch.arch_vmx.vmx_platform.shared_page_va; if (vio == 0) { VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx\n", (unsigned long) vio); @@ -195,14 +196,14 @@ void vmx_io_assist(struct exec_domain *ed) } p = &vio->vp_ioreq; /* clear IO wait VMX flag */ - if (test_bit(ARCH_VMX_IO_WAIT, &ed->thread.arch_vmx.flags)) { + if (test_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags)) { if (p->state != STATE_IORESP_READY) { printk("got a false I/O reponse\n"); do_block(); } else { p->state = STATE_INVALID; } - clear_bit(ARCH_VMX_IO_WAIT, &ed->thread.arch_vmx.flags); + clear_bit(ARCH_VMX_IO_WAIT, &ed->arch.arch_vmx.flags); } else { return; } @@ -218,10 +219,10 @@ void vmx_io_assist(struct exec_domain *ed) } int size = -1, index = -1; - size = operand_size(ed->thread.arch_vmx.vmx_platform.mpci.mmio_target); - index = operand_index(ed->thread.arch_vmx.vmx_platform.mpci.mmio_target); + size = operand_size(ed->arch.arch_vmx.vmx_platform.mpci.mmio_target); + index = operand_index(ed->arch.arch_vmx.vmx_platform.mpci.mmio_target); - if (ed->thread.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) { + if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) { p->u.data = p->u.data & 0xffff; } set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data); @@ -301,7 +302,7 @@ static inline int find_highest_pending_irq(struct exec_domain *d) { vcpu_iodata_t *vio; - vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va; + vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va; if (vio == 0) { VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx\n", (unsigned long) vio); @@ -315,7 +316,7 @@ static inline void clear_highest_bit(struct exec_domain *d, int vector) { vcpu_iodata_t *vio; - vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va; + vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va; if (vio == 0) { VMX_DBG_LOG(DBG_LEVEL_1, "bad shared page: %lx\n", (unsigned long) vio); @@ -363,15 +364,15 @@ void vmx_intr_assist(struct exec_domain *d) void vmx_do_resume(struct exec_domain *d) { - __vmwrite(HOST_CR3, pagetable_val(d->mm.monitor_table)); - __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table)); + __vmwrite(HOST_CR3, pagetable_val(d->arch.monitor_table)); + __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table)); __vmwrite(HOST_ESP, (unsigned long) get_stack_top()); if (event_pending(d)) { if (test_bit(IOPACKET_PORT, &d->domain->shared_info->evtchn_pending[0])) vmx_io_assist(d); - else if (test_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags)) { + else if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) { printk("got an event while blocked on I/O\n"); do_block(); } @@ -382,6 +383,6 @@ void vmx_do_resume(struct exec_domain *d) * a response to ioreq_t is not ok. */ } - if (!test_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags)) + if (!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) vmx_intr_assist(d); } diff --git a/xen/arch/x86/vmx_platform.c b/xen/arch/x86/vmx_platform.c index e4875dc042..5be1c182be 100644 --- a/xen/arch/x86/vmx_platform.c +++ b/xen/arch/x86/vmx_platform.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * vmx_platform.c: handling x86 platform related MMIO instructions * Copyright (c) 2004, Intel Corporation. @@ -420,9 +421,9 @@ static void send_mmio_req(unsigned long gpa, extern long evtchn_send(int lport); extern long do_block(void); - mpci_p = ¤t->thread.arch_vmx.vmx_platform.mpci; + mpci_p = ¤t->arch.arch_vmx.vmx_platform.mpci; inst_decoder_regs = mpci_p->inst_decoder_regs; - vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va; + vio = (vcpu_iodata_t *) d->arch.arch_vmx.vmx_platform.shared_page_va; if (vio == NULL) { printk("bad shared page\n"); @@ -430,7 +431,7 @@ static void send_mmio_req(unsigned long gpa, } p = &vio->vp_ioreq; - set_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags); + set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags); p->dir = dir; p->pdata_valid = pvalid; p->count = 1; @@ -470,7 +471,7 @@ void handle_mmio(unsigned long va, unsigned long gpte, unsigned long gpa) unsigned char inst[MAX_INST_LEN]; int ret; - mpci_p = ¤t->thread.arch_vmx.vmx_platform.mpci; + mpci_p = ¤t->arch.arch_vmx.vmx_platform.mpci; inst_decoder_regs = mpci_p->inst_decoder_regs; __vmread(GUEST_EIP, &eip); diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c index b7e71c935d..72096c7043 100644 --- a/xen/arch/x86/vmx_vmcs.c +++ b/xen/arch/x86/vmx_vmcs.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * vmx_vmcs.c: VMCS management * Copyright (c) 2004, Intel Corporation. @@ -137,7 +138,7 @@ int vmx_setup_platform(struct exec_domain *d, execution_context_t *context) mpfn = phys_to_machine_mapping[gpfn]; p = map_domain_mem(mpfn << PAGE_SHIFT); - d->thread.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p; + d->arch.arch_vmx.vmx_platform.shared_page_va = (unsigned long) p; return 0; } @@ -159,7 +160,7 @@ static int add_mapping_perdomain(struct exec_domain *d, unsigned long gpfn, if (gpfn > ENTRIES_PER_L2_PAGETABLE * ENTRIES_PER_L1_PAGETABLE) return -1; - if (!(l1_pgentry_val(d->domain->mm_perdomain_pt[ + if (!(l1_pgentry_val(d->domain->arch.mm_perdomain_pt[ gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)]) & _PAGE_PRESENT)) { page = (struct pfn_info *) alloc_domheap_page(NULL); @@ -168,7 +169,7 @@ static int add_mapping_perdomain(struct exec_domain *d, unsigned long gpfn, } pfn = (unsigned long) (page - frame_table); - d->domain->mm_perdomain_pt[gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)] = + d->domain->arch.mm_perdomain_pt[gpfn >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)] = mk_l1_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); } phys_to_machine_mapping[gpfn] = mpfn; @@ -190,18 +191,18 @@ void vmx_do_launch(struct exec_domain *ed) struct domain *d = ed->domain; cpu = smp_processor_id(); - ed->mm.min_pfn = ed->mm.max_pfn = 0; + d->arch.min_pfn = d->arch.max_pfn = 0; spin_lock(&d->page_alloc_lock); list_ent = d->page_list.next; - mpl2e = (l2_pgentry_t *)map_domain_mem(pagetable_val(ed->mm.monitor_table)); + mpl2e = (l2_pgentry_t *)map_domain_mem(pagetable_val(ed->arch.monitor_table)); for ( i = 0; list_ent != &d->page_list; i++ ) { pfn = list_entry(list_ent, struct pfn_info, list) - frame_table; - ed->mm.min_pfn = min(ed->mm.min_pfn, pfn); - ed->mm.max_pfn = max(ed->mm.max_pfn, pfn); + d->arch.min_pfn = min(d->arch.min_pfn, pfn); + d->arch.max_pfn = max(d->arch.max_pfn, pfn); list_ent = frame_table[pfn].list.next; add_mapping_perdomain(ed, i, pfn); } @@ -219,7 +220,7 @@ void vmx_do_launch(struct exec_domain *ed) guest_pl2e_cache = map_domain_mem(pfn << PAGE_SHIFT); memset(guest_pl2e_cache, 0, PAGE_SIZE); /* clean it up */ - ed->mm.guest_pl2e_cache = guest_pl2e_cache; + ed->arch.guest_pl2e_cache = guest_pl2e_cache; unmap_domain_mem(mpl2e); @@ -245,12 +246,12 @@ void vmx_do_launch(struct exec_domain *ed) error |= __vmwrite(GUEST_TR_BASE, 0); error |= __vmwrite(GUEST_TR_LIMIT, 0xff); - ed->mm.shadow_table = ed->mm.pagetable; - __vmwrite(GUEST_CR3, pagetable_val(ed->mm.pagetable)); - __vmwrite(HOST_CR3, pagetable_val(ed->mm.monitor_table)); + ed->arch.shadow_table = ed->arch.pagetable; + __vmwrite(GUEST_CR3, pagetable_val(ed->arch.pagetable)); + __vmwrite(HOST_CR3, pagetable_val(ed->arch.monitor_table)); __vmwrite(HOST_ESP, (unsigned long) get_stack_top()); - ed->thread.schedule_tail = arch_vmx_do_resume; + ed->arch.schedule_tail = arch_vmx_do_resume; } /* diff --git a/xen/arch/x86/x86_32/asm-offsets.c b/xen/arch/x86/x86_32/asm-offsets.c index 8c51181cd8..839893b793 100644 --- a/xen/arch/x86/x86_32/asm-offsets.c +++ b/xen/arch/x86/x86_32/asm-offsets.c @@ -39,12 +39,12 @@ void __dummy__(void) OFFSET(EDOMAIN_processor, struct exec_domain, processor); OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info); - OFFSET(EDOMAIN_event_sel, struct exec_domain, thread.event_selector); - OFFSET(EDOMAIN_event_addr, struct exec_domain, thread.event_address); - OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, thread.failsafe_selector); - OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, thread.failsafe_address); - OFFSET(EDOMAIN_trap_bounce, struct exec_domain, thread.trap_bounce); - OFFSET(EDOMAIN_thread_flags, struct exec_domain, thread.flags); + OFFSET(EDOMAIN_event_sel, struct exec_domain, arch.event_selector); + OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address); + OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, arch.failsafe_selector); + OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address); + OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce); + OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags); BLANK(); OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending); diff --git a/xen/arch/x86/x86_32/domain_build.c b/xen/arch/x86/x86_32/domain_build.c index 9c5e51280e..e8db6ac1eb 100644 --- a/xen/arch/x86/x86_32/domain_build.c +++ b/xen/arch/x86/x86_32/domain_build.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /****************************************************************************** * domain_build.c * @@ -216,11 +217,11 @@ int construct_dom0(struct domain *d, * We're basically forcing default RPLs to 1, so that our "what privilege * level are we returning to?" logic works. */ - ed->thread.failsafe_selector = FLAT_GUESTOS_CS; - ed->thread.event_selector = FLAT_GUESTOS_CS; - ed->thread.guestos_ss = FLAT_GUESTOS_SS; + ed->arch.failsafe_selector = FLAT_GUESTOS_CS; + ed->arch.event_selector = FLAT_GUESTOS_CS; + ed->arch.guestos_ss = FLAT_GUESTOS_SS; for ( i = 0; i < 256; i++ ) - ed->thread.traps[i].cs = FLAT_GUESTOS_CS; + ed->arch.traps[i].cs = FLAT_GUESTOS_CS; /* WARNING: The new domain must have its 'processor' field filled in! */ l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE; @@ -228,8 +229,8 @@ int construct_dom0(struct domain *d, l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR); l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = - mk_l2_pgentry(__pa(d->mm_perdomain_pt) | __PAGE_HYPERVISOR); - ed->mm.pagetable = mk_pagetable((unsigned long)l2start); + mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR); + ed->arch.pagetable = mk_pagetable((unsigned long)l2start); l2tab += l2_table_offset(dsi.v_start); mfn = alloc_start >> PAGE_SHIFT; @@ -307,7 +308,7 @@ int construct_dom0(struct domain *d, /* Install the new page tables. */ __cli(); - write_ptbase(&ed->mm); + write_ptbase(ed); /* Copy the OS image. */ (void)loadelfimage(image_start); @@ -360,7 +361,7 @@ int construct_dom0(struct domain *d, *dst = '\0'; /* Reinstate the caller's page tables. */ - write_ptbase(¤t->mm); + write_ptbase(current); __sti(); /* Destroy low mappings - they were only for our convenience. */ diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c index c2dee7059c..b6d26ec8d9 100644 --- a/xen/arch/x86/x86_32/mm.c +++ b/xen/arch/x86/x86_32/mm.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /****************************************************************************** * arch/x86/x86_32/mm.c * @@ -184,7 +185,7 @@ static void __synchronise_pagetables(void *mask) struct exec_domain *ed = current; if ( ((unsigned long)mask & (1 << ed->processor)) && is_idle_task(ed->domain) ) - write_ptbase(&ed->mm); + write_ptbase(ed); } void synchronise_pagetables(unsigned long cpu_mask) { @@ -201,8 +202,8 @@ long do_stack_switch(unsigned long ss, unsigned long esp) if ( (ss & 3) == 0 ) return -EPERM; - current->thread.guestos_ss = ss; - current->thread.guestos_sp = esp; + current->arch.guestos_ss = ss; + current->arch.guestos_sp = esp; t->ss1 = ss; t->esp1 = esp; @@ -316,9 +317,9 @@ void destroy_gdt(struct exec_domain *ed) for ( i = 0; i < 16; i++ ) { - if ( (pfn = l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[i])) != 0 ) + if ( (pfn = l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[i])) != 0 ) put_page_and_type(&frame_table[pfn]); - ed->mm.perdomain_ptes[i] = mk_l1_pgentry(0); + ed->arch.perdomain_ptes[i] = mk_l1_pgentry(0); } } @@ -372,7 +373,7 @@ long set_gdt(struct exec_domain *ed, /* Install the new GDT. */ for ( i = 0; i < nr_pages; i++ ) - ed->mm.perdomain_ptes[i] = + ed->arch.perdomain_ptes[i] = mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR); SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed)); @@ -404,7 +405,7 @@ long do_set_gdt(unsigned long *frame_list, unsigned int entries) if ( (ret = set_gdt(current, frames, entries)) == 0 ) { local_flush_tlb(); - __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt)); + __asm__ __volatile__ ("lgdt %0" : "=m" (*current->arch.gdt)); } UNLOCK_BIGLOCK(current->domain); @@ -443,7 +444,7 @@ long do_update_descriptor( case PGT_gdt_page: /* Disallow updates of Xen-reserved descriptors in the current GDT. */ for_each_exec_domain(current->domain, ed) { - if ( (l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[0]) == pfn) && + if ( (l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[0]) == pfn) && (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) && (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) ) goto out; diff --git a/xen/arch/x86/x86_32/seg_fixup.c b/xen/arch/x86/x86_32/seg_fixup.c index d41dcf2488..8fcc011246 100644 --- a/xen/arch/x86/x86_32/seg_fixup.c +++ b/xen/arch/x86/x86_32/seg_fixup.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /****************************************************************************** * arch/x86/x86_32/seg_fixup.c * @@ -114,7 +115,7 @@ int get_baselimit(u16 seg, unsigned long *base, unsigned long *limit) if ( ldt ) { table = (unsigned long *)LDT_VIRT_START(d); - if ( idx >= d->mm.ldt_ents ) + if ( idx >= d->arch.ldt_ents ) goto fail; } else /* gdt */ @@ -180,10 +181,10 @@ int fixup_seg(u16 seg, unsigned long offset) if ( ldt ) { table = (unsigned long *)LDT_VIRT_START(d); - if ( idx >= d->mm.ldt_ents ) + if ( idx >= d->arch.ldt_ents ) { DPRINTK("Segment %04x out of LDT range (%ld)\n", - seg, d->mm.ldt_ents); + seg, d->arch.ldt_ents); goto fail; } } @@ -466,8 +467,8 @@ int gpf_emulate_4gb(struct xen_regs *regs) /* If requested, give a callback on otherwise unused vector 15. */ if ( VM_ASSIST(d->domain, VMASST_TYPE_4gb_segments_notify) ) { - ti = &d->thread.traps[15]; - tb = &d->thread.trap_bounce; + ti = &d->arch.traps[15]; + tb = &d->arch.trap_bounce; tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE; tb->error_code = pb - eip; tb->cs = ti->cs; diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c index a4b0282c25..5337bf421a 100644 --- a/xen/arch/x86/x86_32/traps.c +++ b/xen/arch/x86/x86_32/traps.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ #include <xen/config.h> #include <xen/init.h> @@ -208,8 +209,8 @@ long set_fast_trap(struct exec_domain *p, int idx) if ( idx == 0 ) { if ( p == current ) - CLEAR_FAST_TRAP(&p->thread); - SET_DEFAULT_FAST_TRAP(&p->thread); + CLEAR_FAST_TRAP(&p->arch); + SET_DEFAULT_FAST_TRAP(&p->arch); return 0; } @@ -221,7 +222,7 @@ long set_fast_trap(struct exec_domain *p, int idx) if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) return -1; - ti = p->thread.traps + idx; + ti = p->arch.traps + idx; /* * We can't virtualise interrupt gates, as there's no way to get @@ -231,15 +232,15 @@ long set_fast_trap(struct exec_domain *p, int idx) return -1; if ( p == current ) - CLEAR_FAST_TRAP(&p->thread); + CLEAR_FAST_TRAP(&p->arch); - p->thread.fast_trap_idx = idx; - p->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff); - p->thread.fast_trap_desc.b = + p->arch.fast_trap_idx = idx; + p->arch.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff); + p->arch.fast_trap_desc.b = (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13; if ( p == current ) - SET_FAST_TRAP(&p->thread); + SET_FAST_TRAP(&p->arch); return 0; } diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index 72c3fd367c..2dd6055f0a 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -41,12 +41,12 @@ void __dummy__(void) OFFSET(EDOMAIN_processor, struct exec_domain, processor); OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info); - OFFSET(EDOMAIN_event_sel, struct exec_domain, thread.event_selector); - OFFSET(EDOMAIN_event_addr, struct exec_domain, thread.event_address); - OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, thread.failsafe_selector); - OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, thread.failsafe_address); - OFFSET(EDOMAIN_trap_bounce, struct exec_domain, thread.trap_bounce); - OFFSET(EDOMAIN_thread_flags, struct exec_domain, thread.flags); + OFFSET(EDOMAIN_event_sel, struct exec_domain, arch.event_selector); + OFFSET(EDOMAIN_event_addr, struct exec_domain, arch.event_address); + OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, arch.failsafe_selector); + OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, arch.failsafe_address); + OFFSET(EDOMAIN_trap_bounce, struct exec_domain, arch.trap_bounce); + OFFSET(EDOMAIN_thread_flags, struct exec_domain, arch.flags); BLANK(); OFFSET(SHINFO_upcall_pending, shared_info_t, diff --git a/xen/arch/x86/x86_64/domain_build.c b/xen/arch/x86/x86_64/domain_build.c index 6b3ec1e86c..bd15fe9eeb 100644 --- a/xen/arch/x86/x86_64/domain_build.c +++ b/xen/arch/x86/x86_64/domain_build.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /****************************************************************************** * domain_build.c * @@ -224,11 +225,11 @@ int construct_dom0(struct domain *d, * We're basically forcing default RPLs to 1, so that our "what privilege * level are we returning to?" logic works. */ - ed->thread.failsafe_selector = FLAT_GUESTOS_CS; - ed->thread.event_selector = FLAT_GUESTOS_CS; - ed->thread.guestos_ss = FLAT_GUESTOS_SS; + ed->arch.failsafe_selector = FLAT_GUESTOS_CS; + ed->arch.event_selector = FLAT_GUESTOS_CS; + ed->arch.guestos_ss = FLAT_GUESTOS_SS; for ( i = 0; i < 256; i++ ) - ed->thread.traps[i].cs = FLAT_GUESTOS_CS; + ed->arch.traps[i].cs = FLAT_GUESTOS_CS; /* WARNING: The new domain must have its 'processor' field filled in! */ phys_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table; @@ -237,8 +238,8 @@ int construct_dom0(struct domain *d, l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] = mk_l4_pgentry(__pa(l4start) | __PAGE_HYPERVISOR); l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] = - mk_l4_pgentry(__pa(d->mm_perdomain_pt) | __PAGE_HYPERVISOR); - ed->mm.pagetable = mk_pagetable(__pa(l4start)); + mk_l4_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR); + ed->arch.pagetable = mk_pagetable(__pa(l4start)); l4tab += l4_table_offset(dsi.v_start); mfn = alloc_start >> PAGE_SHIFT; @@ -329,7 +330,7 @@ int construct_dom0(struct domain *d, /* Install the new page tables. */ __cli(); - write_ptbase(&ed->mm); + write_ptbase(ed); /* Copy the OS image. */ (void)loadelfimage(image_start); @@ -382,7 +383,7 @@ int construct_dom0(struct domain *d, *dst = '\0'; /* Reinstate the caller's page tables. */ - write_ptbase(¤t->mm); + write_ptbase(current); __sti(); /* DOM0 gets access to everything. */ diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index f557e1fbc0..3dede2e8db 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /****************************************************************************** * arch/x86/x86_64/mm.c * @@ -220,7 +221,7 @@ static void __synchronise_pagetables(void *mask) struct exec_domain *ed = current; if ( ((unsigned long)mask & (1 << ed->processor)) && is_idle_task(ed->domain) ) - write_ptbase(&ed->mm); + write_ptbase(ed); } void synchronise_pagetables(unsigned long cpu_mask) { @@ -232,8 +233,8 @@ long do_stack_switch(unsigned long ss, unsigned long esp) { if ( (ss & 3) != 3 ) return -EPERM; - current->thread.guestos_ss = ss; - current->thread.guestos_sp = esp; + current->arch.guestos_ss = ss; + current->arch.guestos_sp = esp; return 0; } @@ -346,9 +347,9 @@ void destroy_gdt(struct exec_domain *ed) for ( i = 0; i < 16; i++ ) { - if ( (pfn = l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[i])) != 0 ) + if ( (pfn = l1_pgentry_to_pagenr(ed->arch.perdomain_ptes[i])) != 0 ) put_page_and_type(&frame_table[pfn]); - ed->mm.perdomain_ptes[i] = mk_l1_pgentry(0); + ed->arch.perdomain_ptes[i] = mk_l1_pgentry(0); } } @@ -402,7 +403,7 @@ long set_gdt(struct exec_domain *ed, /* Install the new GDT. */ for ( i = 0; i < nr_pages; i++ ) - ed->mm.perdomain_ptes[i] = + ed->arch.perdomain_ptes[i] = mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR); SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed)); @@ -432,7 +433,7 @@ long do_set_gdt(unsigned long *frame_list, unsigned int entries) if ( (ret = set_gdt(current, frames, entries)) == 0 ) { local_flush_tlb(); - __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt)); + __asm__ __volatile__ ("lgdt %0" : "=m" (*current->arch.gdt)); } return ret; @@ -461,7 +462,7 @@ long do_update_descriptor( { case PGT_gdt_page: /* Disallow updates of Xen-reserved descriptors in the current GDT. */ - if ( (l1_pgentry_to_pagenr(current->mm.perdomain_ptes[0]) == pfn) && + if ( (l1_pgentry_to_pagenr(current->arch.perdomain_ptes[0]) == pfn) && (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) && (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) ) goto out; diff --git a/xen/common/domain.c b/xen/common/domain.c index a77e77384e..d06ed7c491 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -39,13 +39,13 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu) atomic_set(&d->refcnt, 1); atomic_set(&ed->pausecnt, 0); - shadow_lock_init(ed); + shadow_lock_init(d); d->id = dom_id; - ed->processor = cpu; + ed->processor = cpu; d->create_time = NOW(); - memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread)); + memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch)); spin_lock_init(&d->time_lock); @@ -327,9 +327,9 @@ long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt) ed = d->exec_domain[vcpu]; atomic_set(&ed->pausecnt, 0); - shadow_lock_init(ed); + shadow_lock_init(d); - memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread)); + memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch)); arch_do_boot_vcpu(ed); diff --git a/xen/common/physdev.c b/xen/common/physdev.c index 70181dfe50..d6356f4678 100644 --- a/xen/common/physdev.c +++ b/xen/common/physdev.c @@ -172,21 +172,21 @@ int physdev_pci_access_modify( /* Now, setup access to the IO ports and memory regions for the device. */ - if ( ed->thread.io_bitmap == NULL ) + if ( ed->arch.io_bitmap == NULL ) { - if ( (ed->thread.io_bitmap = xmalloc_array(u8, IOBMP_BYTES)) == NULL ) + if ( (ed->arch.io_bitmap = xmalloc_array(u8, IOBMP_BYTES)) == NULL ) { rc = -ENOMEM; goto out; } - memset(ed->thread.io_bitmap, 0xFF, IOBMP_BYTES); + memset(ed->arch.io_bitmap, 0xFF, IOBMP_BYTES); - ed->thread.io_bitmap_sel = ~0ULL; + ed->arch.io_bitmap_sel = ~0ULL; for_each_exec_domain(p, edc) { if (edc == ed) continue; - edc->thread.io_bitmap = ed->thread.io_bitmap; + edc->arch.io_bitmap = ed->arch.io_bitmap; } } @@ -204,8 +204,8 @@ int physdev_pci_access_modify( "for device %s\n", dom, r->start, r->end, pdev->slot_name); for ( j = r->start; j < r->end + 1; j++ ) { - clear_bit(j, ed->thread.io_bitmap); - clear_bit(j / IOBMP_BITS_PER_SELBIT, &ed->thread.io_bitmap_sel); + clear_bit(j, ed->arch.io_bitmap); + clear_bit(j / IOBMP_BITS_PER_SELBIT, &ed->arch.io_bitmap_sel); } } @@ -215,7 +215,7 @@ int physdev_pci_access_modify( for_each_exec_domain(p, edc) { if (edc == ed) continue; - edc->thread.io_bitmap_sel = ed->thread.io_bitmap_sel; + edc->arch.io_bitmap_sel = ed->arch.io_bitmap_sel; } out: diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 6eeb8dbd1c..e3b2016cab 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -1,11 +1,119 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ -#ifndef __X86_DOMAIN_H__ -#define __X86_DOMAIN_H__ +#ifndef __ASM_DOMAIN_H__ +#define __ASM_DOMAIN_H__ -typedef struct { -} arch_domain_t; +struct trap_bounce { + unsigned long error_code; + unsigned long cr2; + unsigned short flags; /* TBF_ */ + unsigned short cs; + unsigned long eip; +}; -typedef struct { -} arch_exec_domain_t; +struct arch_domain +{ + l1_pgentry_t *mm_perdomain_pt; -#endif /* __X86_DOMAIN_H__ */ + /* shadow mode status and controls */ + unsigned int shadow_mode; /* flags to control shadow table operation */ + spinlock_t shadow_lock; + unsigned long min_pfn; /* min host physical */ + unsigned long max_pfn; /* max host physical */ + + /* shadow hashtable */ + struct shadow_status *shadow_ht; + struct shadow_status *shadow_ht_free; + struct shadow_status *shadow_ht_extras; /* extra allocation units */ + unsigned int shadow_extras_count; + + /* shadow dirty bitmap */ + unsigned long *shadow_dirty_bitmap; + unsigned int shadow_dirty_bitmap_size; /* in pages, bit per page */ + + /* shadow mode stats */ + unsigned int shadow_page_count; + unsigned int shadow_fault_count; + unsigned int shadow_dirty_count; + unsigned int shadow_dirty_net_count; + unsigned int shadow_dirty_block_count; +} __cacheline_aligned; + +struct arch_exec_domain +{ + unsigned long guestos_sp; + unsigned long guestos_ss; + + unsigned long flags; /* TF_ */ + + /* Hardware debugging registers */ + unsigned long debugreg[8]; /* %%db0-7 debug registers */ + + /* floating point info */ + struct i387_state i387; + + /* general user-visible register state */ + execution_context_t user_ctxt; + + void (*schedule_tail) (struct exec_domain *); + + /* + * Return vectors pushed to us by guest OS. + * The stack frame for events is exactly that of an x86 hardware interrupt. + * The stack frame for a failsafe callback is augmented with saved values + * for segment registers %ds, %es, %fs and %gs: + * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss] + */ + unsigned long event_selector; /* entry CS */ + unsigned long event_address; /* entry EIP */ + + unsigned long failsafe_selector; /* entry CS */ + unsigned long failsafe_address; /* entry EIP */ + + /* Bounce information for propagating an exception to guest OS. */ + struct trap_bounce trap_bounce; + + /* I/O-port access bitmap. */ + u64 io_bitmap_sel; /* Selector to tell us which part of the IO bitmap are + * "interesting" (i.e. have clear bits) */ + u8 *io_bitmap; /* Pointer to task's IO bitmap or NULL */ + + /* Trap info. */ +#ifdef ARCH_HAS_FAST_TRAP + int fast_trap_idx; + struct desc_struct fast_trap_desc; +#endif + trap_info_t traps[256]; +#ifdef CONFIG_VMX + struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */ +#endif + + /* + * Every domain has a L1 pagetable of its own. Per-domain mappings + * are put in this table (eg. the current GDT is mapped here). + */ + l1_pgentry_t *perdomain_ptes; + pagetable_t pagetable; + + pagetable_t monitor_table; + pagetable_t shadow_table; + l2_pgentry_t *vpagetable; /* virtual address of pagetable */ + l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */ + l2_pgentry_t *guest_pl2e_cache; /* guest page directory cache */ + + /* Virtual CR2 value. Can be read/written by guest. */ + unsigned long guest_cr2; + + /* Current LDT details. */ + unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt; + /* Next entry is passed to LGDT on domain switch. */ + char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */ +} __cacheline_aligned; + +#define IDLE0_ARCH_EXEC_DOMAIN \ +{ \ + perdomain_ptes: 0, \ + pagetable: mk_pagetable(__pa(idle_pg_table)) \ +} + +#endif /* __ASM_DOMAIN_H__ */ diff --git a/xen/include/asm-x86/ldt.h b/xen/include/asm-x86/ldt.h index 84c456120c..d44d896de1 100644 --- a/xen/include/asm-x86/ldt.h +++ b/xen/include/asm-x86/ldt.h @@ -1,25 +1,27 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ + #ifndef __ARCH_LDT_H #define __ARCH_LDT_H #ifndef __ASSEMBLY__ -static inline void load_LDT(struct exec_domain *p) +static inline void load_LDT(struct exec_domain *ed) { unsigned int cpu; struct desc_struct *desc; unsigned long ents; - - if ( (ents = p->mm.ldt_ents) == 0 ) + + if ( (ents = ed->arch.ldt_ents) == 0 ) { __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) ); } else { cpu = smp_processor_id(); - desc = (struct desc_struct *)GET_GDT_ADDRESS(p) + __LDT(cpu); - desc->a = ((LDT_VIRT_START(p)&0xffff)<<16) | (ents*8-1); - desc->b = (LDT_VIRT_START(p)&(0xff<<24)) | 0x8200 | - ((LDT_VIRT_START(p)&0xff0000)>>16); + desc = (struct desc_struct *)GET_GDT_ADDRESS(ed) + __LDT(cpu); + desc->a = ((LDT_VIRT_START(ed)&0xffff)<<16) | (ents*8-1); + desc->b = (LDT_VIRT_START(ed)&(0xff<<24)) | 0x8200 | + ((LDT_VIRT_START(ed)&0xff0000)>>16); __asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) ); } } diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h index 1fd7b23287..abbb21e3cf 100644 --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -1,8 +1,6 @@ -/* - * include/asm-x86/processor.h - * - * Copyright (C) 1994 Linus Torvalds - */ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ + +/* Portions are: Copyright (c) 1994 Linus Torvalds */ #ifndef __ASM_X86_PROCESSOR_H #define __ASM_X86_PROCESSOR_H @@ -380,63 +378,6 @@ struct tss_struct { u8 __cacheline_filler[23]; } __cacheline_aligned PACKED; -struct trap_bounce { - unsigned long error_code; - unsigned long cr2; - unsigned short flags; /* TBF_ */ - unsigned short cs; - unsigned long eip; -}; - -struct thread_struct { - unsigned long guestos_sp; - unsigned long guestos_ss; - - unsigned long flags; /* TF_ */ - - /* Hardware debugging registers */ - unsigned long debugreg[8]; /* %%db0-7 debug registers */ - - /* floating point info */ - struct i387_state i387; - - /* general user-visible register state */ - execution_context_t user_ctxt; - - void (*schedule_tail) (struct exec_domain *); - - /* - * Return vectors pushed to us by guest OS. - * The stack frame for events is exactly that of an x86 hardware interrupt. - * The stack frame for a failsafe callback is augmented with saved values - * for segment registers %ds, %es, %fs and %gs: - * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss] - */ - unsigned long event_selector; /* entry CS */ - unsigned long event_address; /* entry EIP */ - - unsigned long failsafe_selector; /* entry CS */ - unsigned long failsafe_address; /* entry EIP */ - - /* Bounce information for propagating an exception to guest OS. */ - struct trap_bounce trap_bounce; - - /* I/O-port access bitmap. */ - u64 io_bitmap_sel; /* Selector to tell us which part of the IO bitmap are - * "interesting" (i.e. have clear bits) */ - u8 *io_bitmap; /* Pointer to task's IO bitmap or NULL */ - - /* Trap info. */ -#ifdef ARCH_HAS_FAST_TRAP - int fast_trap_idx; - struct desc_struct fast_trap_desc; -#endif - trap_info_t traps[256]; -#ifdef CONFIG_VMX - struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */ -#endif -} __cacheline_aligned; - #define IDT_ENTRIES 256 extern idt_entry_t idt_table[]; extern idt_entry_t *idt_tables[]; @@ -467,91 +408,18 @@ long set_fast_trap(struct exec_domain *p, int idx); #endif -#define INIT_THREAD { 0 } - extern int gpf_emulate_4gb(struct xen_regs *regs); -struct mm_struct { - /* - * Every domain has a L1 pagetable of its own. Per-domain mappings - * are put in this table (eg. the current GDT is mapped here). - */ - l1_pgentry_t *perdomain_ptes; - pagetable_t pagetable; - - pagetable_t monitor_table; - l2_pgentry_t *vpagetable; /* virtual address of pagetable */ - l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */ - l2_pgentry_t *guest_pl2e_cache; /* guest page directory cache */ - unsigned long min_pfn; /* min host physical */ - unsigned long max_pfn; /* max host physical */ - - /* Virtual CR2 value. Can be read/written by guest. */ - unsigned long guest_cr2; - - /* shadow mode status and controls */ - unsigned int shadow_mode; /* flags to control shadow table operation */ - pagetable_t shadow_table; - spinlock_t shadow_lock; - unsigned int shadow_max_page_count; // currently unused - - /* shadow hashtable */ - struct shadow_status *shadow_ht; - struct shadow_status *shadow_ht_free; - struct shadow_status *shadow_ht_extras; /* extra allocation units */ - unsigned int shadow_extras_count; - - /* shadow dirty bitmap */ - unsigned long *shadow_dirty_bitmap; - unsigned int shadow_dirty_bitmap_size; /* in pages, bit per page */ - - /* shadow mode stats */ - unsigned int shadow_page_count; - unsigned int shadow_fault_count; - unsigned int shadow_dirty_count; - unsigned int shadow_dirty_net_count; - unsigned int shadow_dirty_block_count; - - /* Current LDT details. */ - unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt; - /* Next entry is passed to LGDT on domain switch. */ - char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */ -}; - -#define SHM_full_32 (8) /* full virtualization for 32-bit */ - -static inline void write_ptbase(struct mm_struct *mm) -{ - unsigned long pa; - -#ifdef CONFIG_VMX - if ( unlikely(mm->shadow_mode) ) { - if (mm->shadow_mode == SHM_full_32) - pa = pagetable_val(mm->monitor_table); - else - pa = pagetable_val(mm->shadow_table); - } -#else - if ( unlikely(mm->shadow_mode) ) - pa = pagetable_val(mm->shadow_table); -#endif - else - pa = pagetable_val(mm->pagetable); - - write_cr3(pa); -} - -#define IDLE0_MM \ -{ \ - perdomain_ptes: 0, \ - pagetable: mk_pagetable(__pa(idle_pg_table)) \ -} +extern void write_ptbase(struct exec_domain *ed); -/* Convenient accessor for mm.gdt. */ -#define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (((_e)<<3)-1)) -#define SET_GDT_ADDRESS(_p, _a) ((*(unsigned long *)((_p)->mm.gdt + 2)) = (_a)) -#define GET_GDT_ENTRIES(_p) (((*(u16 *)((_p)->mm.gdt + 0))+1)>>3) -#define GET_GDT_ADDRESS(_p) (*(unsigned long *)((_p)->mm.gdt + 2)) +#define SET_GDT_ENTRIES(_p, _e) \ + ((*(u16 *)((_p)->arch.gdt + 0)) = (((_e)<<3)-1)) +#define SET_GDT_ADDRESS(_p, _a) \ + ((*(unsigned long *)((_p)->arch.gdt + 2)) = (_a)) +#define GET_GDT_ENTRIES(_p) \ + (((*(u16 *)((_p)->arch.gdt + 0))+1)>>3) +#define GET_GDT_ADDRESS(_p) \ + (*(unsigned long *)((_p)->arch.gdt + 2)) void destroy_gdt(struct exec_domain *d); long set_gdt(struct exec_domain *d, diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h index b43b11e583..c95a4de693 100644 --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -1,4 +1,4 @@ -/* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- */ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ #ifndef _XEN_SHADOW_H #define _XEN_SHADOW_H @@ -12,7 +12,7 @@ #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */ #define PSH_pfn_mask ((1<<21)-1) -/* Shadow PT operation mode : shadowmode variable in mm_struct */ +/* Shadow PT operation mode : shadow-mode variable in arch_domain. */ #define SHM_test (1) /* just run domain on shadow PTs */ #define SHM_logdirty (2) /* log pages that are dirtied */ #define SHM_translate (3) /* lookup machine pages in translation table */ @@ -23,10 +23,10 @@ #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \ (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT)))) -#define shadow_mode(_d) ((_d)->mm.shadow_mode) -#define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock) -#define shadow_lock(_m) spin_lock(&(_m)->shadow_lock) -#define shadow_unlock(_m) spin_unlock(&(_m)->shadow_lock) +#define shadow_mode(_d) ((_d)->arch.shadow_mode) +#define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock) +#define shadow_lock(_d) spin_lock(&(_d)->arch.shadow_lock) +#define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock) extern void shadow_mode_init(void); extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc); @@ -39,18 +39,18 @@ extern void unshadow_table(unsigned long gpfn, unsigned int type); extern int shadow_mode_enable(struct domain *p, unsigned int mode); #ifdef CONFIG_VMX -extern void vmx_shadow_clear_state(struct mm_struct *); -extern void vmx_shadow_invlpg(struct mm_struct *, unsigned long); +extern void vmx_shadow_clear_state(struct domain *); +extern void vmx_shadow_invlpg(struct domain *, unsigned long); #endif -#define __get_machine_to_phys(m, guest_gpfn, gpfn) \ - if ((m)->shadow_mode == SHM_full_32) \ +#define __get_machine_to_phys(_d, guest_gpfn, gpfn) \ + if ((_d)->arch.shadow_mode == SHM_full_32) \ (guest_gpfn) = machine_to_phys_mapping[(gpfn)]; \ else \ (guest_gpfn) = (gpfn); -#define __get_phys_to_machine(m, host_gpfn, gpfn) \ - if ((m)->shadow_mode == SHM_full_32) \ +#define __get_phys_to_machine(_d, host_gpfn, gpfn) \ + if ((_d)->arch.shadow_mode == SHM_full_32) \ (host_gpfn) = phys_to_machine_mapping[(gpfn)]; \ else \ (host_gpfn) = (gpfn); @@ -58,21 +58,21 @@ extern void vmx_shadow_invlpg(struct mm_struct *, unsigned long); extern void __shadow_mode_disable(struct domain *d); static inline void shadow_mode_disable(struct domain *d) { - if ( shadow_mode(d->exec_domain[0]) ) + if ( shadow_mode(d) ) __shadow_mode_disable(d); } extern unsigned long shadow_l2_table( - struct mm_struct *m, unsigned long gpfn); + struct domain *d, unsigned long gpfn); -static inline void shadow_invalidate(struct mm_struct *m) { - if (m->shadow_mode != SHM_full_32) +static inline void shadow_invalidate(struct exec_domain *ed) { + if ( ed->domain->arch.shadow_mode != SHM_full_32 ) BUG(); - memset(m->shadow_vtable, 0, PAGE_SIZE); + memset(ed->arch.shadow_vtable, 0, PAGE_SIZE); } -#define SHADOW_DEBUG 0 -#define SHADOW_HASH_DEBUG 0 +#define SHADOW_DEBUG 1 +#define SHADOW_HASH_DEBUG 1 struct shadow_status { unsigned long pfn; /* Guest pfn. */ @@ -94,7 +94,7 @@ printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \ #if SHADOW_DEBUG #define SH_VLOG(_f, _a...) \ printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \ - current->id , __LINE__ , ## _a ) + current->domain->id , __LINE__ , ## _a ) #else #define SH_VLOG(_f, _a...) #endif @@ -102,67 +102,64 @@ printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \ #if 0 #define SH_VVLOG(_f, _a...) \ printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \ - current->id , __LINE__ , ## _a ) + current->domain->id , __LINE__ , ## _a ) #else #define SH_VVLOG(_f, _a...) #endif -static inline void __shadow_get_pl2e(struct mm_struct *m, - unsigned long va, unsigned long *sl2e) +static inline void __shadow_get_pl2e( + struct exec_domain *ed, unsigned long va, unsigned long *sl2e) { - if (m->shadow_mode == SHM_full_32) { - *sl2e = l2_pgentry_val(m->shadow_vtable[va >> L2_PAGETABLE_SHIFT]); - } - else - *sl2e = l2_pgentry_val(linear_l2_table[va >> L2_PAGETABLE_SHIFT]); + *sl2e = (ed->domain->arch.shadow_mode == SHM_full_32) ? + l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]) : + l2_pgentry_val(linear_l2_table[l2_table_offset(va)]); } -static inline void __shadow_set_pl2e(struct mm_struct *m, - unsigned long va, unsigned long value) +static inline void __shadow_set_pl2e( + struct exec_domain *ed, unsigned long va, unsigned long value) { - if (m->shadow_mode == SHM_full_32) { - m->shadow_vtable[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value); - } + if ( ed->domain->arch.shadow_mode == SHM_full_32 ) + ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value); else - linear_l2_table[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value); + linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value); } -static inline void __guest_get_pl2e(struct mm_struct *m, - unsigned long va, unsigned long *l2e) +static inline void __guest_get_pl2e( + struct exec_domain *ed, unsigned long va, unsigned long *l2e) { - if (m->shadow_mode == SHM_full_32) { - *l2e = l2_pgentry_val(m->vpagetable[va >> L2_PAGETABLE_SHIFT]); - } - else - *l2e = l2_pgentry_val(linear_l2_table[va >> L2_PAGETABLE_SHIFT]); + *l2e = (ed->domain->arch.shadow_mode == SHM_full_32) ? + l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) : + l2_pgentry_val(linear_l2_table[l2_table_offset(va)]); } -static inline void __guest_set_pl2e(struct mm_struct *m, - unsigned long va, unsigned long value) +static inline void __guest_set_pl2e( + struct exec_domain *ed, unsigned long va, unsigned long value) { - if (m->shadow_mode == SHM_full_32) { + if ( ed->domain->arch.shadow_mode == SHM_full_32 ) + { unsigned long pfn; pfn = phys_to_machine_mapping[value >> PAGE_SHIFT]; - m->guest_pl2e_cache[va >> L2_PAGETABLE_SHIFT] = - mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); + ed->arch.guest_pl2e_cache[l2_table_offset(va)] = + mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); - m->vpagetable[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value); + ed->arch.vpagetable[l2_table_offset(va)] = mk_l2_pgentry(value); } else - linear_l2_table[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value); - + { + linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value); + } } /************************************************************************/ -static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn) +static inline int __mark_dirty(struct domain *d, unsigned int mfn) { unsigned long pfn; int rc = 0; - ASSERT(spin_is_locked(&m->shadow_lock)); - ASSERT(m->shadow_dirty_bitmap != NULL); + ASSERT(spin_is_locked(&d->arch.shadow_lock)); + ASSERT(d->arch.shadow_dirty_bitmap != NULL); pfn = machine_to_phys_mapping[mfn]; @@ -174,20 +171,20 @@ static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn) if ( unlikely(pfn & 0x80000000UL) ) return rc; - if ( likely(pfn < m->shadow_dirty_bitmap_size) ) + if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) ) { /* N.B. Can use non-atomic TAS because protected by shadow_lock. */ - if ( !__test_and_set_bit(pfn, m->shadow_dirty_bitmap) ) + if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) ) { - m->shadow_dirty_count++; + d->arch.shadow_dirty_count++; rc = 1; } } #ifndef NDEBUG else if ( mfn < max_page ) { - SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (mm %p)", - mfn, pfn, m->shadow_dirty_bitmap_size, m ); + SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)", + mfn, pfn, d->arch.shadow_dirty_bitmap_size, d); SH_LOG("dom=%p caf=%08x taf=%08x\n", page_get_owner(&frame_table[mfn]), frame_table[mfn].count_info, @@ -199,12 +196,12 @@ static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn) } -static inline int mark_dirty(struct mm_struct *m, unsigned int mfn) +static inline int mark_dirty(struct domain *d, unsigned int mfn) { int rc; - shadow_lock(m); - rc = __mark_dirty(m, mfn); - shadow_unlock(m); + shadow_lock(d); + rc = __mark_dirty(d, mfn); + shadow_unlock(d); return rc; } @@ -212,7 +209,7 @@ static inline int mark_dirty(struct mm_struct *m, unsigned int mfn) /************************************************************************/ static inline void l1pte_write_fault( - struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p) + struct domain *d, unsigned long *gpte_p, unsigned long *spte_p) { unsigned long gpte = *gpte_p; unsigned long spte = *spte_p; @@ -220,7 +217,7 @@ static inline void l1pte_write_fault( ASSERT(gpte & _PAGE_RW); gpte |= _PAGE_DIRTY | _PAGE_ACCESSED; - switch ( m->shadow_mode ) + switch ( d->arch.shadow_mode ) { case SHM_test: spte = gpte | _PAGE_RW; @@ -228,7 +225,7 @@ static inline void l1pte_write_fault( case SHM_logdirty: spte = gpte | _PAGE_RW; - __mark_dirty(m, gpte >> PAGE_SHIFT); + __mark_dirty(d, gpte >> PAGE_SHIFT); case SHM_full_32: { @@ -247,14 +244,14 @@ static inline void l1pte_write_fault( } static inline void l1pte_read_fault( - struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p) + struct domain *d, unsigned long *gpte_p, unsigned long *spte_p) { unsigned long gpte = *gpte_p; unsigned long spte = *spte_p; gpte |= _PAGE_ACCESSED; - switch ( m->shadow_mode ) + switch ( d->arch.shadow_mode ) { case SHM_test: spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW); @@ -281,12 +278,13 @@ static inline void l1pte_read_fault( } static inline void l1pte_propagate_from_guest( - struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p) + struct domain *d, unsigned long *gpte_p, unsigned long *spte_p) { unsigned long gpte = *gpte_p; unsigned long spte = *spte_p; + unsigned long host_pfn, host_gpte; - switch ( m->shadow_mode ) + switch ( d->arch.shadow_mode ) { case SHM_test: spte = 0; @@ -303,11 +301,10 @@ static inline void l1pte_propagate_from_guest( break; case SHM_full_32: - { - unsigned long host_pfn, host_gpte; spte = 0; - if (mmio_space(gpte & 0xFFFFF000)) { + if ( mmio_space(gpte & 0xFFFFF000) ) + { *spte_p = spte; return; } @@ -317,8 +314,9 @@ static inline void l1pte_propagate_from_guest( if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == (_PAGE_PRESENT|_PAGE_ACCESSED) ) - spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW); - } + spte = (host_gpte & _PAGE_DIRTY) ? + host_gpte : (host_gpte & ~_PAGE_RW); + break; } @@ -327,7 +325,7 @@ static inline void l1pte_propagate_from_guest( } static inline void l2pde_general( - struct mm_struct *m, + struct domain *d, unsigned long *gpde_p, unsigned long *spde_p, unsigned long sl1pfn) @@ -347,7 +345,7 @@ static inline void l2pde_general( if ( (frame_table[sl1pfn].u.inuse.type_info & PGT_type_mask) == PGT_l2_page_table ) { - if (m->shadow_mode != SHM_full_32) + if ( d->arch.shadow_mode != SHM_full_32 ) spde = gpde & ~_PAGE_RW; } @@ -360,14 +358,14 @@ static inline void l2pde_general( /*********************************************************************/ #if SHADOW_HASH_DEBUG -static void shadow_audit(struct mm_struct *m, int print) +static void shadow_audit(struct domain *d, int print) { int live = 0, free = 0, j = 0, abs; struct shadow_status *a; for ( j = 0; j < shadow_ht_buckets; j++ ) { - a = &m->shadow_ht[j]; + a = &d->arch.shadow_ht[j]; if ( a->pfn ) { live++; ASSERT(a->spfn_and_flags & PSH_pfn_mask); } ASSERT(a->pfn < 0x00100000UL); a = a->next; @@ -387,7 +385,7 @@ static void shadow_audit(struct mm_struct *m, int print) ASSERT(live < 9999); } - for ( a = m->shadow_ht_free; a != NULL; a = a->next ) + for ( a = d->arch.shadow_ht_free; a != NULL; a = a->next ) free++; if ( print) @@ -406,24 +404,23 @@ static void shadow_audit(struct mm_struct *m, int print) #endif - static inline struct shadow_status *hash_bucket( - struct mm_struct *m, unsigned int gpfn) + struct domain *d, unsigned int gpfn) { - return &m->shadow_ht[gpfn % shadow_ht_buckets]; + return &d->arch.shadow_ht[gpfn % shadow_ht_buckets]; } static inline unsigned long __shadow_status( - struct mm_struct *m, unsigned int gpfn) + struct domain *d, unsigned int gpfn) { struct shadow_status *p, *x, *head; - x = head = hash_bucket(m, gpfn); + x = head = hash_bucket(d, gpfn); p = NULL; SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x); - shadow_audit(m, 0); + shadow_audit(d, 0); do { @@ -461,11 +458,11 @@ static inline unsigned long __shadow_status( * anyway it's probably not worth being too clever. */ static inline unsigned long get_shadow_status( - struct mm_struct *m, unsigned int gpfn ) + struct domain *d, unsigned int gpfn ) { unsigned long res; - ASSERT(m->shadow_mode); + ASSERT(d->arch.shadow_mode); /* * If we get here we know that some sort of update has happened to the @@ -475,37 +472,37 @@ static inline unsigned long get_shadow_status( * N.B. The VA update path doesn't use this and is handled independently. */ - shadow_lock(m); + shadow_lock(d); - if ( m->shadow_mode == SHM_logdirty ) - __mark_dirty( m, gpfn ); + if ( d->arch.shadow_mode == SHM_logdirty ) + __mark_dirty(d, gpfn); - if ( !(res = __shadow_status(m, gpfn)) ) - shadow_unlock(m); + if ( !(res = __shadow_status(d, gpfn)) ) + shadow_unlock(d); return res; } static inline void put_shadow_status( - struct mm_struct *m) + struct domain *d) { - shadow_unlock(m); + shadow_unlock(d); } static inline void delete_shadow_status( - struct mm_struct *m, unsigned int gpfn) + struct domain *d, unsigned int gpfn) { struct shadow_status *p, *x, *n, *head; - ASSERT(spin_is_locked(&m->shadow_lock)); + ASSERT(spin_is_locked(&d->arch.shadow_lock)); ASSERT(gpfn != 0); - head = hash_bucket(m, gpfn); + head = hash_bucket(d, gpfn); SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, head); - shadow_audit(m, 0); + shadow_audit(d, 0); /* Match on head item? */ if ( head->pfn == gpfn ) @@ -522,8 +519,8 @@ static inline void delete_shadow_status( /* Add deleted node to the free list. */ n->pfn = 0; n->spfn_and_flags = 0; - n->next = m->shadow_ht_free; - m->shadow_ht_free = n; + n->next = d->arch.shadow_ht_free; + d->arch.shadow_ht_free = n; } else { @@ -548,8 +545,8 @@ static inline void delete_shadow_status( /* Add deleted node to the free list. */ x->pfn = 0; x->spfn_and_flags = 0; - x->next = m->shadow_ht_free; - m->shadow_ht_free = x; + x->next = d->arch.shadow_ht_free; + d->arch.shadow_ht_free = x; goto found; } @@ -563,24 +560,24 @@ static inline void delete_shadow_status( BUG(); found: - shadow_audit(m, 0); + shadow_audit(d, 0); } static inline void set_shadow_status( - struct mm_struct *m, unsigned int gpfn, unsigned long s) + struct domain *d, unsigned int gpfn, unsigned long s) { struct shadow_status *x, *head, *extra; int i; - ASSERT(spin_is_locked(&m->shadow_lock)); + ASSERT(spin_is_locked(&d->arch.shadow_lock)); ASSERT(gpfn != 0); ASSERT(s & PSH_shadowed); - x = head = hash_bucket(m, gpfn); + x = head = hash_bucket(d, gpfn); SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, x, x->next); - shadow_audit(m, 0); + shadow_audit(d, 0); /* * STEP 1. If page is already in the table, update it in place. @@ -612,7 +609,7 @@ static inline void set_shadow_status( } /* We need to allocate a new node. Ensure the quicklist is non-empty. */ - if ( unlikely(m->shadow_ht_free == NULL) ) + if ( unlikely(d->arch.shadow_ht_free == NULL) ) { SH_LOG("Allocate more shadow hashtable blocks."); @@ -626,10 +623,10 @@ static inline void set_shadow_status( memset(extra, 0, sizeof(void *) + (shadow_ht_extra_size * sizeof(*x))); /* Record the allocation block so it can be correctly freed later. */ - m->shadow_extras_count++; + d->arch.shadow_extras_count++; *((struct shadow_status **)&extra[shadow_ht_extra_size]) = - m->shadow_ht_extras; - m->shadow_ht_extras = &extra[0]; + d->arch.shadow_ht_extras; + d->arch.shadow_ht_extras = &extra[0]; /* Thread a free chain through the newly-allocated nodes. */ for ( i = 0; i < (shadow_ht_extra_size - 1); i++ ) @@ -637,12 +634,12 @@ static inline void set_shadow_status( extra[i].next = NULL; /* Add the new nodes to the free list. */ - m->shadow_ht_free = &extra[0]; + d->arch.shadow_ht_free = &extra[0]; } /* Allocate a new node from the quicklist. */ - x = m->shadow_ht_free; - m->shadow_ht_free = x->next; + x = d->arch.shadow_ht_free; + d->arch.shadow_ht_free = x->next; /* Initialise the new node and insert directly after the head item. */ x->pfn = gpfn; @@ -651,50 +648,51 @@ static inline void set_shadow_status( head->next = x; done: - shadow_audit(m, 0); + shadow_audit(d, 0); } #ifdef CONFIG_VMX #include <asm/domain_page.h> static inline void vmx_update_shadow_state( - struct mm_struct *mm, unsigned long gpfn, unsigned long spfn) + struct exec_domain *ed, unsigned long gpfn, unsigned long spfn) { l2_pgentry_t *mpl2e = 0; l2_pgentry_t *gpl2e, *spl2e; /* unmap the old mappings */ - if (mm->shadow_vtable) - unmap_domain_mem(mm->shadow_vtable); - if (mm->vpagetable) - unmap_domain_mem(mm->vpagetable); + if ( ed->arch.shadow_vtable ) + unmap_domain_mem(ed->arch.shadow_vtable); + if ( ed->arch.vpagetable ) + unmap_domain_mem(ed->arch.vpagetable); /* new mapping */ - mpl2e = (l2_pgentry_t *) - map_domain_mem(pagetable_val(mm->monitor_table)); + mpl2e = (l2_pgentry_t *) + map_domain_mem(pagetable_val(ed->arch.monitor_table)); - mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = + mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); __flush_tlb_one(SH_LINEAR_PT_VIRT_START); - spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT); - gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT); + spl2e = (l2_pgentry_t *)map_domain_mem(spfn << PAGE_SHIFT); + gpl2e = (l2_pgentry_t *)map_domain_mem(gpfn << PAGE_SHIFT); memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); - mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); - mm->shadow_vtable = spl2e; - mm->vpagetable = gpl2e; /* expect the guest did clean this up */ + ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); + ed->arch.shadow_vtable = spl2e; + ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */ unmap_domain_mem(mpl2e); } -static inline void __shadow_mk_pagetable( struct mm_struct *mm ) +static inline void __shadow_mk_pagetable(struct exec_domain *ed) { - unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT; + struct domain *d = ed->domain; + unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT; unsigned long spfn; SH_VLOG("0: __shadow_mk_pagetable(gpfn=%08lx\n", gpfn); - if (mm->shadow_mode == SHM_full_32) + if (d->arch.shadow_mode == SHM_full_32) { unsigned long guest_gpfn; guest_gpfn = machine_to_phys_mapping[gpfn]; @@ -702,59 +700,59 @@ static inline void __shadow_mk_pagetable( struct mm_struct *mm ) SH_VVLOG("__shadow_mk_pagetable(guest_gpfn=%08lx, gpfn=%08lx\n", guest_gpfn, gpfn); - spfn = __shadow_status(mm, guest_gpfn) & PSH_pfn_mask; + spfn = __shadow_status(d, guest_gpfn) & PSH_pfn_mask; if ( unlikely(spfn == 0) ) { - spfn = shadow_l2_table(mm, gpfn); - mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); + spfn = shadow_l2_table(d, gpfn); + ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); } else { - vmx_update_shadow_state(mm, gpfn, spfn); + vmx_update_shadow_state(ed, gpfn, spfn); } } else { - spfn = __shadow_status(mm, gpfn) & PSH_pfn_mask; + spfn = __shadow_status(d, gpfn) & PSH_pfn_mask; if ( unlikely(spfn == 0) ) { - spfn = shadow_l2_table(mm, gpfn); + spfn = shadow_l2_table(d, gpfn); } - mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); + ed->arch.shadow_table = mk_pagetable(spfn<<PAGE_SHIFT); } } #else -static inline void __shadow_mk_pagetable(struct mm_struct *mm) +static inline void __shadow_mk_pagetable(struct exec_domain *ed) { - unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT; - unsigned long spfn = __shadow_status(mm, gpfn); + unsigned long gpfn = pagetable_val(ed->arch.pagetable) >> PAGE_SHIFT; + unsigned long spfn = __shadow_status(ed->domain, gpfn); if ( unlikely(spfn == 0) ) - spfn = shadow_l2_table(mm, gpfn); + spfn = shadow_l2_table(ed->domain, gpfn); - mm->shadow_table = mk_pagetable(spfn << PAGE_SHIFT); + ed->arch.shadow_table = mk_pagetable(spfn << PAGE_SHIFT); } #endif /* CONFIG_VMX */ -static inline void shadow_mk_pagetable(struct mm_struct *mm) +static inline void shadow_mk_pagetable(struct exec_domain *ed) { - if ( unlikely(mm->shadow_mode) ) + if ( unlikely(ed->domain->arch.shadow_mode) ) { SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )", - pagetable_val(mm->pagetable), mm->shadow_mode ); - - shadow_lock(mm); - __shadow_mk_pagetable(mm); - shadow_unlock(mm); - - SH_VVLOG("leaving shadow_mk_pagetable:\n"); - - SH_VVLOG("( gptbase=%08lx, mode=%d ) sh=%08lx", - pagetable_val(mm->pagetable), mm->shadow_mode, - pagetable_val(mm->shadow_table) ); - - } + pagetable_val(ed->arch.pagetable), + ed->domain->arch.shadow_mode); + + shadow_lock(ed->domain); + __shadow_mk_pagetable(ed); + shadow_unlock(ed->domain); + + SH_VVLOG("leaving shadow_mk_pagetable:\n" + "( gptbase=%08lx, mode=%d ) sh=%08lx", + pagetable_val(ed->arch.pagetable), + ed->domain->arch.shadow_mode, + pagetable_val(ed->arch.shadow_table) ); + } } #if SHADOW_DEBUG -extern int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s); +extern int check_pagetable(struct domain *d, pagetable_t pt, char *s); #else -#define check_pagetable(m, pt, s) ((void)0) +#define check_pagetable(d, pt, s) ((void)0) #endif #endif /* XEN_SHADOW_H */ diff --git a/xen/include/asm-x86/vmx_vmcs.h b/xen/include/asm-x86/vmx_vmcs.h index 40d315d609..6ee5137e74 100644 --- a/xen/include/asm-x86/vmx_vmcs.h +++ b/xen/include/asm-x86/vmx_vmcs.h @@ -1,3 +1,4 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ /* * vmx_vmcs.h: VMCS related definitions * Copyright (c) 2004, Intel Corporation. @@ -59,7 +60,7 @@ struct arch_vmx_struct { #define vmx_schedule_tail(next) \ (next)->thread.arch_vmx.arch_vmx_schedule_tail((next)) -#define VMX_DOMAIN(d) d->thread.arch_vmx.flags +#define VMX_DOMAIN(d) d->arch.arch_vmx.flags #define ARCH_VMX_VMCS_LOADED 0 /* VMCS has been loaded and active */ #define ARCH_VMX_VMCS_LAUNCH 1 /* Needs VMCS launch */ diff --git a/xen/include/asm-x86/x86_32/current.h b/xen/include/asm-x86/x86_32/current.h index 42d01ee134..3c254191ba 100644 --- a/xen/include/asm-x86/x86_32/current.h +++ b/xen/include/asm-x86/x86_32/current.h @@ -1,3 +1,5 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ + #ifndef _X86_CURRENT_H #define _X86_CURRENT_H @@ -50,6 +52,6 @@ static inline unsigned long get_stack_top(void) "movl %0,%%esp; jmp "STR(__fn) \ : : "r" (get_execution_context()) ) -#define schedule_tail(_d) ((_d)->thread.schedule_tail)(_d) +#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed) #endif /* _X86_CURRENT_H */ diff --git a/xen/include/asm-x86/x86_64/current.h b/xen/include/asm-x86/x86_64/current.h index 7d8904a607..0442f2db0f 100644 --- a/xen/include/asm-x86/x86_64/current.h +++ b/xen/include/asm-x86/x86_64/current.h @@ -1,3 +1,5 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ + #ifndef _X86_64_CURRENT_H #define _X86_64_CURRENT_H @@ -44,6 +46,6 @@ static inline unsigned long get_stack_top(void) "movq %0,%%rsp; jmp "STR(__fn) \ : : "r" (get_execution_context()) ) -#define schedule_tail(_d) ((_d)->thread.schedule_tail)(_d) +#define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed) #endif /* !(_X86_64_CURRENT_H) */ diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 5a875589fc..be6470c66f 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -1,3 +1,5 @@ +/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ + #ifndef __SCHED_H__ #define __SCHED_H__ @@ -70,12 +72,8 @@ struct exec_domain #ifdef ARCH_HAS_EXEC_DOMAIN_MM_PTR struct mm_struct *mm; -#else - struct mm_struct mm; #endif - struct thread_struct thread; - struct ac_timer timer; /* one-shot timer for timeout values */ s_time_t lastschd; /* time this domain was last scheduled */ @@ -89,8 +87,8 @@ struct exec_domain u16 virq_to_evtchn[NR_VIRQS]; atomic_t pausecnt; - arch_exec_domain_t arch; + struct arch_exec_domain arch; }; /* @@ -107,7 +105,8 @@ struct exec_domain #define UNLOCK_BIGLOCK(_d) spin_unlock_recursive(&(_d)->big_lock) #endif -struct domain { +struct domain +{ domid_t id; s_time_t create_time; @@ -116,8 +115,6 @@ struct domain { spinlock_t big_lock; - l1_pgentry_t *mm_perdomain_pt; - spinlock_t page_alloc_lock; /* protects all the following fields */ struct list_head page_list; /* linked list, of size tot_pages */ struct list_head xenpage_list; /* linked list, of size xenheap_pages */ @@ -157,7 +154,8 @@ struct domain { atomic_t refcnt; struct exec_domain *exec_domain[MAX_VIRT_CPUS]; - arch_domain_t arch; + + struct arch_domain arch; }; struct domain_setup_info |