/*
* Copyright (C) 1995 Linus Torvalds
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*
* Copyright (C) 2005 Intel Co
* Kun Tian (Kevin Tian) <kevin.tian@intel.com>
*
* 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add VTI domain support
*
* Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
* dom0 vp model support
*/
#include <xen/config.h>
#include <xen/init.h>
#include <xen/lib.h>
#include <xen/errno.h>
#include <xen/sched.h>
#include <xen/smp.h>
#include <xen/delay.h>
#include <xen/softirq.h>
#include <xen/mm.h>
#include <xen/iocap.h>
#include <asm/asm-xsi-offsets.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <xen/event.h>
#include <xen/console.h>
#include <xen/version.h>
#include <xen/libelf.h>
#include <asm/pgalloc.h>
#include <asm/offsets.h> /* for IA64_THREAD_INFO_SIZE */
#include <asm/vcpu.h> /* for function declarations */
#include <public/xen.h>
#include <xen/domain.h>
#include <asm/vmx.h>
#include <asm/vmx_vcpu.h>
#include <asm/vmx_vpd.h>
#include <asm/vmx_phy_mode.h>
#include <asm/vmx_vcpu_save.h>
#include <asm/vhpt.h>
#include <asm/vcpu.h>
#include <asm/tlbflush.h>
#include <asm/regionreg.h>
#include <asm/dom_fw.h>
#include <asm/shadow.h>
#include <xen/guest_access.h>
#include <asm/tlb_track.h>
#include <asm/perfmon.h>
#include <asm/sal.h>
#include <public/vcpu.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <asm/debugger.h>
/* dom0_size: default memory allocation for dom0 (~4GB) */
static unsigned long __initdata dom0_size = 4096UL*1024UL*1024UL;
/* dom0_max_vcpus: maximum number of VCPUs to create for dom0. */
static unsigned int __initdata dom0_max_vcpus = 4;
integer_param("dom0_max_vcpus", dom0_max_vcpus);
extern char dom0_command_line[];
/* forward declaration */
static void init_switch_stack(struct vcpu *v);
/* Address of vpsr.i (in fact evtchn_upcall_mask) of current vcpu.
This is a Xen virtual address. */
DEFINE_PER_CPU(uint8_t *, current_psr_i_addr);
DEFINE_PER_CPU(int *, current_psr_ic_addr);
DEFINE_PER_CPU(struct vcpu *, fp_owner);
#include <xen/sched-if.h>
static void flush_vtlb_for_context_switch(struct vcpu* prev, struct vcpu* next)
{
int cpu = smp_processor_id();
int last_vcpu_id, last_processor;
if (!is_idle_domain(prev->domain))
tlbflush_update_time
(&prev->domain->arch.last_vcpu[cpu].tlbflush_timestamp,
tlbflush_current_time());
if (is_idle_domain(next->domain))
return;
last_vcpu_id = next->domain->arch.last_vcpu[cpu].vcpu_id;
last_processor = next->arch.last_processor;
next->domain->arch.last_vcpu[cpu].vcpu_id = next->vcpu_id;
next->arch.last_processor = cpu;
if ((last_vcpu_id != next->vcpu_id &&
last_vcpu_id != INVALID_VCPU_ID) ||
(last_vcpu_id == next->vcpu_id &&
last_processor != cpu &&
last_processor != INVALID_PROCESSOR)) {
#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCK
u32 last_tlbflush_timestamp =
next->domain->arch.last_vcpu[cpu].tlbflush_timestamp;
#endif
int vhpt_is_flushed = 0;
// if the vTLB implementation was changed,
// the followings must be updated either.
if (VMX_DOMAIN(next)) {
// currently vTLB for vt-i domian is per vcpu.
// so any flushing isn't needed.
} else if (HAS_PERVCPU_VHPT(next->domain)) {
// nothing to do
} else {
if (NEED_FLUSH(__get_cpu_var(vhpt_tlbflush_timestamp),
last_tlbflush_timestamp)) {
local_vhpt_flush();
vhpt_is_flushed = 1;
}
}
if (vhpt_is_flushed || NEED_FLUSH(__get_cpu_var(tlbflush_time),
last_tlbflush_timestamp)) {
local_flush_tlb_all();
perfc_incr(tlbflush_clock_cswitch_purge);
} else {
perfc_incr(tlbflush_clock_cswitch_skip);
}
perfc_incr(flush_vtlb_for_context_switch);
}
}
static void flush_cache_for_context_switch(struct vcpu *next)
{
extern cpumask_t cpu_cache_coherent_map;
int cpu = smp_processor_id();
if (is_idle_vcpu(next) ||
__test_and_clear_bit(cpu, &next->arch.cache_coherent_map)) {
if (cpu_test_and_clear(cpu, cpu_cache_coherent_map)) {
unsigned long flags;
u64 progress = 0;
s64 status;
local_irq_save(flags);
status = ia64_pal_cache_flush(4, 0, &progress, NULL);
local_irq_restore(flags);
if (status != 0)
panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
"cache_type=4 status %lx", status);
}
}
}
static void set_current_psr_i_addr(struct vcpu* v)
{
__ia64_per_cpu_var(current_psr_i_addr) =
(uint8_t*)(v->domain->arch.shared_info_va +
INT_ENABLE_OFFSET(v));
__ia64_per_cpu_var(current_psr_ic_addr) = (int *)
(v->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
}
static void clear_current_psr_i_addr(void)
{
__ia64_per_cpu_var(current_psr_i_addr) = NULL;
__ia64_per_cpu_var(current_psr_ic_addr) = NULL;
}
static void lazy_fp_switch(struct vcpu *prev, struct vcpu *next)
{
/*
* Implement eager save, lazy restore
*/
if (!is_idle_vcpu(prev)) {
if (VMX_DOMAIN(prev)) {
if (FP_PSR(prev) & IA64_PSR_MFH) {
__ia64_save_fpu(prev->arch._thread.fph);
__ia64_per_cpu_var(fp_owner) = prev;
}
} else {
if (PSCB(prev, hpsr_mfh)) {
__ia64_save_fpu(prev->arch._thread.fph);
__ia64_per_cpu_var(fp_owner) = prev;
}
}
}
if (!is_idle_vcpu(next)) {
if (VMX_DOMAIN(next)) {
FP_PSR(next) = IA64_PSR_DFH;
vcpu_regs(next)->cr_ipsr |= IA64_PSR_DFH;
} else {
PSCB(next, hpsr_dfh) = 1;
PSCB(next, hpsr_mfh) = 0;
vcpu_regs(next)->cr_ipsr |= IA64_PSR_DFH;
}
}
}
static void load_state(struct vcpu *v)
{
load_region_regs(v);
ia64_set_pta(vcpu_pta(v));
vcpu_load_kernel_regs(v);
if (vcpu_pkr_in_use(v))
vcpu_pkr_load_regs(v);
set_current_psr_i_addr(v);
}
void schedule_tail(struct vcpu *prev)
{
extern char ia64_ivt;
context_saved(prev);
if (VMX_DOMAIN(current))
vmx_do_resume(current);
else {
if (VMX_DOMAIN(prev))
ia64_set_iva(&ia64_ivt);
load_state(current);
migrate_timer(¤t->arch.hlt_timer, current->processor);
}
flush_vtlb_for_context_switch(prev, current);
}
void context_switch(struct vcpu *prev, struct vcpu *next)
{
uint64_t spsr;
local_irq_save(spsr);
if (VMX_DOMAIN(prev)) {