/*
* Virtualized CPU functions
*
* Copyright (C) 2004-2005 Hewlett-Packard Co.
* Dan Magenheimer (dan.magenheimer@hp.com)
*
*/
#include <linux/sched.h>
#include <public/xen.h>
#include <xen/mm.h>
#include <asm/ia64_int.h>
#include <asm/vcpu.h>
#include <asm/regionreg.h>
#include <asm/tlb.h>
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/vmx_vcpu.h>
#include <asm/vhpt.h>
#include <asm/tlbflush.h>
#include <asm/privop.h>
#include <xen/event.h>
#include <asm/vmx_phy_mode.h>
#include <asm/bundle.h>
#include <asm/privop_stat.h>
#include <asm/uaccess.h>
#include <asm/p2m_entry.h>
#include <asm/tlb_track.h>
/* FIXME: where these declarations should be there ? */
extern void getreg(unsigned long regnum, unsigned long *val, int *nat,
struct pt_regs *regs);
extern void setreg(unsigned long regnum, unsigned long val, int nat,
struct pt_regs *regs);
extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
struct pt_regs *regs);
extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
struct pt_regs *regs);
typedef union {
struct ia64_psr ia64_psr;
unsigned long i64;
} PSR;
// this def for vcpu_regs won't work if kernel stack is present
//#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
#define IA64_PTA_SZ_BIT 2
#define IA64_PTA_VF_BIT 8
#define IA64_PTA_BASE_BIT 15
#define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
#define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
unsigned long vcpu_verbose = 0;
/**************************************************************************
VCPU general register access routines
**************************************************************************/
#ifdef XEN
u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg)
{
REGS *regs = vcpu_regs(vcpu);
u64 val;
if (!reg)
return 0;
getreg(reg, &val, 0, regs); // FIXME: handle NATs later
return val;
}
IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val)
{
REGS *regs = vcpu_regs(vcpu);
int nat;
getreg(reg, val, &nat, regs); // FIXME: handle NATs later
if (nat)
return IA64_NAT_CONSUMPTION_VECTOR;
return 0;
}
// returns:
// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
// IA64_NO_FAULT otherwise
IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat)
{
REGS *regs = vcpu_regs(vcpu);
long sof = (regs->cr_ifs) & 0x7f;
if (!reg)
return IA64_ILLOP_FAULT;
if (reg >= sof + 32)
return IA64_ILLOP_FAULT;
setreg(reg, value, nat, regs); // FIXME: handle NATs later
return IA64_NO_FAULT;
}
IA64FAULT
vcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
{
REGS *regs = vcpu_regs(vcpu);
getfpreg(reg, val, regs); // FIXME: handle NATs later
return IA64_NO_FAULT;
}
IA64FAULT
vcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
{
REGS *regs = vcpu_regs(vcpu);
if (reg > 1)
setfpreg(reg, val, regs); // FIXME: handle NATs later
return IA64_NO_FAULT;
}
#else
// returns:
// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
// IA64_NO_FAULT otherwise
IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value)
{
REGS *regs = vcpu_regs(vcpu);
long sof = (regs->cr_ifs) & 0x7f;
if (!reg)
return IA64_ILLOP_FAULT;
if (reg >= sof + 32)
return IA64_ILLOP_FAULT;
setreg(reg, value, 0, regs); // FIXME: handle NATs later
return IA64_NO_FAULT;
}
#endif
void vcpu_init_regs(struct vcpu *v)
{
struct pt_regs *regs;
regs = vcpu_regs(v);
if (VMX_DOMAIN(v)) {
/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
/* Need to be expanded as macro */
regs->cr_ipsr = 0x501008826008;
/* lazy fp */
FP_PSR(v) = IA64_PSR_DFH;
regs->cr_ipsr |= IA64_PSR_DFH;
} else {
regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
| IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
| IA64_PSR_RI | IA64_PSR_IS);
// domain runs at PL2
regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
// lazy fp
PSCB(v, hpsr_dfh) = 1;
PSCB(v, hpsr_mfh) = 0;
regs->cr_ipsr |= IA64_PSR_DFH;
}
regs->cr_ifs = 1UL << 63; /* or clear? */
regs->ar_fpsr = FPSR_DEFAULT;
if (VMX_DOMAIN(v)) {
vmx_init_all_rr(v);
/* Virtual processor context setup */
VCPU(v, vpsr) = IA64_PSR_BN;
VCPU(v, dcr) = 0;
} else {
init_all_rr(v);
regs->ar_rsc |= (2 << 2); /* force PL2/3 */
VCPU(v, banknum) = 1;
VCPU(v, metaphysical_mode) = 1;
VCPU(v, interrupt_mask_addr) =
(unsigned char *)v->domain->arch.shared_info_va +
INT_ENABLE_OFFSET(v);
VCPU(v, itv) = (1 << 16); /* timer vector masked */
/* SAL specification 3.2.4 */
VCPU(v, vpsr) = IA64_PSR_AC | IA64_PSR_IC | IA64_PSR_BN;
v->vcpu_info->evtchn_upcall_pending = 0;
v->vcpu_info->evtchn_upcall_mask = -1;
}
/* pta.size must not be 0. The minimum is 15 (32k) */
VCPU(v, pta) = 15 << 2;
v->arch.domain_itm_last = -1L;
}
/**************************************************************************
VCPU privileged application register access routines
**************************************************************************/
void vcpu_load_kernel_regs(VCPU * vcpu)
{
ia64_set_kr(0, VCPU(vcpu, krs[0]));
ia64_set_kr(1, VCPU(vcpu, krs[1]));
ia64_set_kr(2, VCPU(vcpu, krs[2]));
ia64_set_kr(3, VCPU(vcpu, krs[3]));
ia64_set_kr(4, VCPU(vcpu, krs[4]));
ia64_set_kr(5, VCPU(vcpu, krs[5]));
ia64_set_kr(6, VCPU(vcpu, krs[6]));
ia64_set_kr(7, VCPU(vcpu, krs[7]));
}
/* GCC 4.0.2 seems not to be able to suppress this call!. */
#define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val)
{
if (reg == 44)
return vcpu_set_itc(vcpu, val);
else if (reg == 27)
return IA64_ILLOP_FAULT;
else if (reg == 24)
printk("warning: setting ar.eflg is a no-op; no IA-32 "
"support\n");
else if (reg > 7)
return IA64_ILLOP_FAULT;
else {
PSCB(vcpu, krs[reg]) = val;
ia64_set_kr(reg, val);
}
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val)
{
if (reg == 24)
printk("warning: getting ar.eflg is a no-op; no IA-32 "
"support\n");
else if (reg > 7)
return IA64_ILLOP_FAULT;
else
*val = PSCB(vcpu, krs[reg]);
return IA64_NO_FAULT;
}
/**************************************************************************
VCPU processor status register access routines
**************************************************************************/
void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
{
/* only do something if mode changes */
if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {
PSCB(vcpu, metaphysical_mode) = newmode;
if (newmode)
set_metaphysical_rr0();
else if (PSCB(vcpu, rrs[0]) != -1)
set_one_rr(0, PSCB(vcpu, rrs[0]));
}
}
IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
{
vcpu_set_metaphysical_mode(vcpu, TRUE);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
{
struct ia64_psr psr, imm, *ipsr;
REGS *regs = vcpu_regs(vcpu);
//PRIVOP_COUNT_ADDR(regs,_RSM);
// TODO: All of these bits need to be virtualized
// TODO: Only allowed for current vcpu
__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
ipsr = (struct ia64_psr *)®s->cr_ipsr;
imm = *(struct ia64_psr *)&imm24;
// interrupt flag
if (imm.i)
vcpu->vcpu_info->evtchn_upcall_mask = 1;
if (imm.ic)
PSCB(vcpu, interrupt_collection_enabled) = 0;
// interrupt collection flag
//if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
// just handle psr.up and psr.pp for now
if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
IA64_PSR_DFL | IA64_PSR_DFH))
return IA64_ILLOP_FAULT;
if (imm.dfh) {
ipsr->dfh = PSCB(vcpu, hpsr_dfh);
PSCB(vcpu, vpsr_dfh) = 0;
}
if (imm.dfl)
ipsr->dfl = 0;
if (imm.pp) {
ipsr->pp = 1;
psr.pp = 1; // priv perf ctrs always enabled
PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
}
if (imm.up) {
ipsr->up = 0;
psr.up = 0;
}
if (imm.sp) {
ipsr->sp = 0;
psr.sp = 0;
}
if (imm.be)
ipsr->be = 0;
if (imm.dt)
vcpu_set_metaphysical_mode(vcpu, TRUE);
__asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_psr_dt(VCPU * vcpu)
{
vcpu_set_metaphysical_mode(vcpu, FALSE);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
{
vcpu->vcpu_info->evtchn_upcall_mask = 0;
PSCB(vcpu, interrupt_collection_enabled) = 1;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
{
struct ia64_psr psr, imm, *ipsr;
REGS *regs = vcpu_regs(vcpu);
u64 mask, enabling_interrupts = 0;
//PRIVOP_COUNT_ADDR(regs,_SSM);
// TODO: All of these bits need to be virtualized
__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
imm = *(struct ia64_psr *)&imm24;
ipsr = (struct ia64_psr *)®s->cr_ipsr;
// just handle psr.sp,pp and psr.i,ic (and user mask) for now
mask =
IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_I | IA64_PSR_IC | IA64_PSR_UM |
IA64_PSR_DT | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_BE;
if (imm24 & ~mask)
return IA64_ILLOP_FAULT;
if (imm.dfh) {
PSCB(vcpu, vpsr_dfh) = 1;
ipsr->dfh = 1;
}
if (imm.dfl)
ipsr->dfl = 1;
if (imm.pp) {
ipsr->pp = 1;
psr.pp = 1;
PSCB(vcpu, vpsr_pp) = 1;
}
if (imm.sp) {
ipsr->sp = 1;
psr.sp = 1;
}
if (imm.i) {
if (vcpu->vcpu_info->evtchn_upcall_mask) {
//printk("vcpu_set_psr_sm: psr.ic 0->1\n");
enabling_interrupts = 1;
}
vcpu->vcpu_info->evtchn_upcall_mask = 0;
}
if (imm.ic)
PSCB(vcpu, interrupt_collection_enabled) = 1;
// TODO: do this faster
if (imm.mfl) {
ipsr->mfl = 1;
psr.mfl = 1;
}
if (imm.mfh) {
ipsr->mfh = 1;
psr.mfh = 1;
}
if (imm.ac) {
ipsr->ac = 1;
psr.ac = 1;
}
if (imm.up) {
ipsr->up = 1;
psr.up = 1;
}
if (imm.be)
ipsr->be = 1;
if (imm.dt)
vcpu_set_metaphysical_mode(vcpu, FALSE);
__asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
if (enabling_interrupts &&
vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
PSCB(vcpu, pending_interruption) = 1;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
{
struct ia64_psr newpsr, *ipsr;
REGS *regs = vcpu_regs(vcpu);
u64 enabling_interrupts = 0;
newpsr = *(struct ia64_psr *)&val;
ipsr = (struct ia64_psr *)®s->cr_ipsr;
// just handle psr.up and psr.pp for now
//if (val & ~(IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP))
// return IA64_ILLOP_FAULT;
// however trying to set other bits can't be an error as it is in ssm
if (newpsr.dfh) {
ipsr->dfh = 1;
PSCB(vcpu, vpsr_dfh) = 1;
} else {
ipsr->dfh = PSCB(vcpu, hpsr_dfh);
PSCB(vcpu, vpsr_dfh) = 0;
}
if (newpsr.dfl)
ipsr->dfl = 1;
if (newpsr.pp) {
ipsr->pp = 1;
PSCB(vcpu, vpsr_pp) = 1;
} else {
ipsr->pp = 1;
PSCB(vcpu, vpsr_pp) = 0;
}
if (newpsr.up)
ipsr->up = 1;
if (newpsr.sp)
ipsr->sp = 1;
if (newpsr.i) {
if (vcpu->vcpu_info->evtchn_upcall_mask)
enabling_interrupts = 1;
vcpu->vcpu_info->evtchn_upcall_mask = 0;
}
if (newpsr.ic)
PSCB(vcpu, interrupt_collection_enabled) = 1;
if (newpsr.mfl)
ipsr->mfl = 1;
if (newpsr.mfh)
ipsr->mfh = 1;
if (newpsr.ac)
ipsr->ac = 1;
if (newpsr.up)
ipsr->up = 1;
if (newpsr.dt && newpsr.rt)
vcpu_set_metaphysical_mode(vcpu, FALSE);
else
vcpu_set_metaphysical_mode(vcpu, TRUE);
if (newpsr.be)
ipsr->be = 1;
if (enabling_interrupts &&
vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
PSCB(vcpu, pending_interruption) = 1;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval)
{
REGS *regs = vcpu_regs(vcpu);
struct ia64_psr newpsr;
newpsr = *(struct ia64_psr *)®s->cr_ipsr;
if (!vcpu->vcpu_info->evtchn_upcall_mask)
newpsr.i = 1;
else
newpsr.i = 0;
if (PSCB(vcpu, interrupt_collection_enabled))
newpsr.ic = 1;
else
newpsr.ic = 0;
if (PSCB(vcpu, metaphysical_mode))
newpsr.dt = 0;
else
newpsr.dt = 1;
if (PSCB(vcpu, vpsr_pp))
newpsr.pp = 1;
else
newpsr.pp = 0;
newpsr.dfh = PSCB(vcpu, vpsr_dfh);
*pval = *(unsigned long *)&newpsr;
*pval &= (MASK(0, 32) | MASK(35, 2));
return IA64_NO_FAULT;
}
BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
{
return !!PSCB(vcpu, interrupt_collection_enabled);
}
BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
{
return !vcpu->vcpu_info->evtchn_upcall_mask;
}
u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr)
{
u64 dcr = PSCB(vcpu, dcr);
PSR psr;
//printk("*** vcpu_get_ipsr_int_state (0x%016lx)...\n",prevpsr);
psr.i64 = prevpsr;
psr.ia64_psr.pp = 0;
if (dcr & IA64_DCR_PP)
psr.ia64_psr.pp = 1;
psr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
psr.ia64_psr.bn = PSCB(vcpu, banknum);
psr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);
psr.ia64_psr.dt = 1;
psr.ia64_psr.it = 1;
psr.ia64_psr.rt = 1;
if (psr.ia64_psr.cpl == 2)
psr.ia64_psr.cpl = 0; // !!!! fool domain
// psr.pk = 1;
//printk("returns 0x%016lx...\n",psr.i64);
return psr.i64;
}
/**************************************************************************
VCPU control register access routines
**************************************************************************/
IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
{
*pval = PSCB(vcpu, dcr);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_iva(VCPU * vcpu, u64 * pval)
{
if (VMX_DOMAIN(vcpu))
*pval = PSCB(vcpu, iva) & ~0x7fffL;
else
*pval = PSCBX(vcpu, iva) & ~0x7fffL;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_pta(VCPU * vcpu, u64 * pval)
{
*pval = PSCB(vcpu, pta);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval)
{
//REGS *regs = vcpu_regs(vcpu);
//*pval = regs->cr_ipsr;
*pval = PSCB(vcpu, ipsr);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval)
{
*pval = PSCB(vcpu, isr);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval)
{
//REGS *regs = vcpu_regs(vcpu);
//*pval = regs->cr_iip;
*pval = PSCB(vcpu, iip);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval)
{
PRIVOP_COUNT_ADDR(vcpu_regs(vcpu), privop_inst_get_ifa);
*pval = PSCB(vcpu, ifa);
return IA64_NO_FAULT;
}
unsigned long vcpu_get_rr_ps(VCPU * vcpu, u64 vadr)