/*
* Virtualized CPU functions
*
* Copyright (C) 2004-2005 Hewlett-Packard Co.
* Dan Magenheimer (dan.magenheimer@hp.com)
*
*/
#include <linux/sched.h>
#include <public/xen.h>
#include <xen/mm.h>
#include <asm/ia64_int.h>
#include <asm/vcpu.h>
#include <asm/regionreg.h>
#include <asm/tlb.h>
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/vmx_vcpu.h>
#include <asm/vhpt.h>
#include <asm/tlbflush.h>
#include <asm/privop.h>
#include <xen/event.h>
#include <asm/vmx_phy_mode.h>
#include <asm/bundle.h>
#include <asm/privop_stat.h>
#include <asm/uaccess.h>
#include <asm/p2m_entry.h>
#include <asm/tlb_track.h>
/* FIXME: where these declarations should be there ? */
extern void getreg(unsigned long regnum, unsigned long *val, int *nat,
struct pt_regs *regs);
extern void setreg(unsigned long regnum, unsigned long val, int nat,
struct pt_regs *regs);
extern void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
struct pt_regs *regs);
extern void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
struct pt_regs *regs);
typedef union {
struct ia64_psr ia64_psr;
unsigned long i64;
} PSR;
// this def for vcpu_regs won't work if kernel stack is present
//#define vcpu_regs(vcpu) ((struct pt_regs *) vcpu->arch.regs
#define IA64_PTA_SZ_BIT 2
#define IA64_PTA_VF_BIT 8
#define IA64_PTA_BASE_BIT 15
#define IA64_PTA_LFMT (1UL << IA64_PTA_VF_BIT)
#define IA64_PTA_SZ(x) (x##UL << IA64_PTA_SZ_BIT)
#define IA64_PSR_NON_VIRT_BITS \
(IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | \
IA64_PSR_MFL| IA64_PSR_MFH| IA64_PSR_PK | \
IA64_PSR_DFL| IA64_PSR_SP | IA64_PSR_DB | \
IA64_PSR_LP | IA64_PSR_TB | IA64_PSR_ID | \
IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS | \
IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA)
unsigned long vcpu_verbose = 0;
/**************************************************************************
VCPU general register access routines
**************************************************************************/
#ifdef XEN
u64 vcpu_get_gr(VCPU * vcpu, unsigned long reg)
{
REGS *regs = vcpu_regs(vcpu);
u64 val;
if (!reg)
return 0;
getreg(reg, &val, 0, regs); // FIXME: handle NATs later
return val;
}
IA64FAULT vcpu_get_gr_nat(VCPU * vcpu, unsigned long reg, u64 * val)
{
REGS *regs = vcpu_regs(vcpu);
int nat;
getreg(reg, val, &nat, regs); // FIXME: handle NATs later
if (nat)
return IA64_NAT_CONSUMPTION_VECTOR;
return 0;
}
// returns:
// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
// IA64_NO_FAULT otherwise
IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value, int nat)
{
REGS *regs = vcpu_regs(vcpu);
long sof = (regs->cr_ifs) & 0x7f;
if (!reg)
return IA64_ILLOP_FAULT;
if (reg >= sof + 32)
return IA64_ILLOP_FAULT;
setreg(reg, value, nat, regs); // FIXME: handle NATs later
return IA64_NO_FAULT;
}
IA64FAULT
vcpu_get_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
{
REGS *regs = vcpu_regs(vcpu);
getfpreg(reg, val, regs); // FIXME: handle NATs later
return IA64_NO_FAULT;
}
IA64FAULT
vcpu_set_fpreg(VCPU * vcpu, unsigned long reg, struct ia64_fpreg * val)
{
REGS *regs = vcpu_regs(vcpu);
if (reg > 1)
setfpreg(reg, val, regs); // FIXME: handle NATs later
return IA64_NO_FAULT;
}
#else
// returns:
// IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
// IA64_NO_FAULT otherwise
IA64FAULT vcpu_set_gr(VCPU * vcpu, unsigned long reg, u64 value)
{
REGS *regs = vcpu_regs(vcpu);
long sof = (regs->cr_ifs) & 0x7f;
if (!reg)
return IA64_ILLOP_FAULT;
if (reg >= sof + 32)
return IA64_ILLOP_FAULT;
setreg(reg, value, 0, regs); // FIXME: handle NATs later
return IA64_NO_FAULT;
}
#endif
void vcpu_init_regs(struct vcpu *v)
{
struct pt_regs *regs;
regs = vcpu_regs(v);
if (VMX_DOMAIN(v)) {
/* dt/rt/it:1;i/ic:1, si:1, vm/bn:1, ac:1 */
/* Need to be expanded as macro */
regs->cr_ipsr = 0x501008826008;
/* lazy fp */
FP_PSR(v) = IA64_PSR_DFH;
regs->cr_ipsr |= IA64_PSR_DFH;
} else {
regs->cr_ipsr = ia64_getreg(_IA64_REG_PSR)
| IA64_PSR_BITS_TO_SET | IA64_PSR_BN;
regs->cr_ipsr &= ~(IA64_PSR_BITS_TO_CLEAR
| IA64_PSR_RI | IA64_PSR_IS);
// domain runs at PL2
regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,IA64_PSR_CPL0_BIT);
// lazy fp
PSCB(v, hpsr_dfh) = 1;
PSCB(v, hpsr_mfh) = 0;
regs->cr_ipsr |= IA64_PSR_DFH;
}
regs->cr_ifs = 1UL << 63; /* or clear? */
regs->ar_fpsr = FPSR_DEFAULT;
if (VMX_DOMAIN(v)) {
vmx_init_all_rr(v);
/* Virtual processor context setup */
VCPU(v, vpsr) = IA64_PSR_BN;
VCPU(v, dcr) = 0;
} else {
init_all_rr(v);
regs->ar_rsc = vcpu_pl_adjust(regs->ar_rsc, 2);
VCPU(v, banknum) = 1;
VCPU(v, metaphysical_mode) = 1;
VCPU(v, interrupt_mask_addr) =
(unsigned char *)v->domain->arch.shared_info_va +
INT_ENABLE_OFFSET(v);
VCPU(v, itv) = (1 << 16); /* timer vector masked */
v->vcpu_info->evtchn_upcall_pending = 0;
v->vcpu_info->evtchn_upcall_mask = -1;
}
/* pta.size must not be 0. The minimum is 15 (32k) */
VCPU(v, pta) = 15 << 2;
v->arch.domain_itm_last = -1L;
}
/**************************************************************************
VCPU privileged application register access routines
**************************************************************************/
void vcpu_load_kernel_regs(VCPU * vcpu)
{
ia64_set_kr(0, VCPU(vcpu, krs[0]));
ia64_set_kr(1, VCPU(vcpu, krs[1]));
ia64_set_kr(2, VCPU(vcpu, krs[2]));
ia64_set_kr(3, VCPU(vcpu, krs[3]));
ia64_set_kr(4, VCPU(vcpu, krs[4]));
ia64_set_kr(5, VCPU(vcpu, krs[5]));
ia64_set_kr(6, VCPU(vcpu, krs[6]));
ia64_set_kr(7, VCPU(vcpu, krs[7]));
}
/* GCC 4.0.2 seems not to be able to suppress this call!. */
#define ia64_setreg_unknown_kr() return IA64_ILLOP_FAULT
IA64FAULT vcpu_set_ar(VCPU * vcpu, u64 reg, u64 val)
{
if (reg == 44)
return vcpu_set_itc(vcpu, val);
else if (reg == 27)
return IA64_ILLOP_FAULT;
else if (reg == 24)
printk("warning: setting ar.eflg is a no-op; no IA-32 "
"support\n");
else if (reg > 7)
return IA64_ILLOP_FAULT;
else {
PSCB(vcpu, krs[reg]) = val;
ia64_set_kr(reg, val);
}
return IA64_NO_FAULT;
}
IA64FAULT vcpu_get_ar(VCPU * vcpu, u64 reg, u64 * val)
{
if (reg == 24)
printk("warning: getting ar.eflg is a no-op; no IA-32 "
"support\n");
else if (reg > 7)
return IA64_ILLOP_FAULT;
else
*val = PSCB(vcpu, krs[reg]);
return IA64_NO_FAULT;
}
/**************************************************************************
VCPU processor status register access routines
**************************************************************************/
void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
{
/* only do something if mode changes */
if (!!newmode ^ !!PSCB(vcpu, metaphysical_mode)) {
PSCB(vcpu, metaphysical_mode) = newmode;
if (newmode)
set_metaphysical_rr0();
else if (PSCB(vcpu, rrs[0]) != -1)
set_one_rr(0, PSCB(vcpu, rrs[0]));
}
}
IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
{
vcpu_set_metaphysical_mode(vcpu, TRUE);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
{
struct ia64_psr psr, imm, *ipsr;
REGS *regs = vcpu_regs(vcpu);
//PRIVOP_COUNT_ADDR(regs,_RSM);
// TODO: All of these bits need to be virtualized
// TODO: Only allowed for current vcpu
__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
ipsr = (struct ia64_psr *)®s->cr_ipsr;
imm = *(struct ia64_psr *)&imm24;
// interrupt flag
if (imm.i)
vcpu->vcpu_info->evtchn_upcall_mask = 1;
if (imm.ic)
PSCB(vcpu, interrupt_collection_enabled) = 0;
// interrupt collection flag
//if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
// just handle psr.up and psr.pp for now
if (imm24 & ~(IA64_PSR_BE | IA64_PSR_PP | IA64_PSR_UP | IA64_PSR_SP |
IA64_PSR_I | IA64_PSR_IC | IA64_PSR_DT |
IA64_PSR_DFL | IA64_PSR_DFH))
return IA64_ILLOP_FAULT;
if (imm.dfh) {
ipsr->dfh = PSCB(vcpu, hpsr_dfh);
PSCB(vcpu, vpsr_dfh) = 0;
}
if (imm.dfl)
ipsr->dfl = 0;
if (imm.pp) {
ipsr->pp = 1;
psr.pp = 1; // priv perf ctrs always enabled
PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
}
if (imm.up) {
ipsr->up = 0;
psr.up = 0;
}
if (imm.sp) {
ipsr->sp = 0;
psr.sp = 0;
}
if (imm.be)
ipsr->be = 0;
if (imm.dt)
vcpu_set_metaphysical_mode(vcpu, TRUE);
__asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_psr_dt(VCPU * vcpu)
{
vcpu_set_metaphysical_mode(vcpu, FALSE);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
{
vcpu->vcpu_info->evtchn_upcall_mask = 0;
PSCB(vcpu, interrupt_collection_enabled) = 1;
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
{
struct ia64_psr psr, imm, *ipsr;
REGS *regs = vcpu_regs(vcpu);
u64 mask, enabling_interrupts = 0;
//PRIVOP_COUNT_ADDR(regs,_SSM);
// TODO: All of these bits need to be virtualized
__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
imm = *(struct ia64_psr *)&imm24;