/*
* Copyright (C) 1995 Linus Torvalds
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*
* Copyright (C) 2005 Intel Co
* Kun Tian (Kevin Tian) <kevin.tian@intel.com>
*
* 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add VTI domain support
*/
#include <xen/config.h>
#include <xen/init.h>
#include <xen/lib.h>
#include <xen/errno.h>
#include <xen/sched.h>
#include <xen/smp.h>
#include <xen/delay.h>
#include <xen/softirq.h>
#include <xen/mm.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/desc.h>
#include <asm/hw_irq.h>
//#include <asm/mpspec.h>
#include <xen/irq.h>
#include <xen/event.h>
//#include <xen/shadow.h>
#include <xen/console.h>
#include <xen/compile.h>
#include <xen/elf.h>
//#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/dma.h> /* for MAX_DMA_ADDRESS */
#include <asm/asm-offsets.h> /* for IA64_THREAD_INFO_SIZE */
#include <asm/vcpu.h> /* for function declarations */
#include <public/arch-ia64.h>
#include <asm/vmx.h>
#include <asm/vmx_vcpu.h>
#include <asm/vmx_vpd.h>
#include <asm/pal.h>
#include <public/io/ioreq.h>
#define CONFIG_DOMAIN0_CONTIGUOUS
unsigned long dom0_start = -1L;
unsigned long dom0_size = 512*1024*1024;
unsigned long dom0_align = 64*1024*1024;
// initialized by arch/ia64/setup.c:find_initrd()
unsigned long initrd_start = 0, initrd_end = 0;
extern unsigned long running_on_sim;
#define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend))
//extern int loadelfimage(char *);
extern int readelfimage_base_and_size(char *, unsigned long,
unsigned long *, unsigned long *, unsigned long *);
unsigned long map_domain_page0(struct domain *);
extern unsigned long dom_fw_setup(struct domain *, char *, int);
/* this belongs in include/asm, but there doesn't seem to be a suitable place */
void free_perdomain_pt(struct domain *d)
{
printf("free_perdomain_pt: not implemented\n");
//free_page((unsigned long)d->mm.perdomain_pt);
}
static void default_idle(void)
{
int cpu = smp_processor_id();
local_irq_disable();
if ( !softirq_pending(cpu))
safe_halt();
local_irq_enable();
}
static void continue_cpu_idle_loop(void)
{
int cpu = smp_processor_id();
for ( ; ; )
{
printf ("idle%dD\n", cpu);
#ifdef IA64
// __IRQ_STAT(cpu, idle_timestamp) = jiffies
#else
irq_stat[cpu].idle_timestamp = jiffies;
#endif
while ( !softirq_pending(cpu) )
default_idle();
add_preempt_count(SOFTIRQ_OFFSET);
raise_softirq(SCHEDULE_SOFTIRQ);
do_softirq();
sub_preempt_count(SOFTIRQ_OFFSET);
}
}
void startup_cpu_idle_loop(void)
{
int cpu = smp_processor_id ();
/* Just some sanity to ensure that the scheduler is set up okay. */
ASSERT(current->domain == IDLE_DOMAIN_ID);
printf ("idle%dA\n", cpu);
raise_softirq(SCHEDULE_SOFTIRQ);
#if 0 /* All this work is done within continue_cpu_idle_loop */
printf ("idle%dB\n", cpu);
asm volatile ("mov ar.k2=r0");
do_softirq();
printf ("idle%dC\n", cpu);
/*
* Declares CPU setup done to the boot processor.
* Therefore memory barrier to ensure state is visible.
*/
smp_mb();
#endif
#if 0
//do we have to ensure the idle task has a shared page so that, for example,
//region registers can be loaded from it. Apparently not...
idle0_task.shared_info = (void *)alloc_xenheap_page();
memset(idle0_task.shared_info, 0, PAGE_SIZE);
/* pin mapping */
// FIXME: Does this belong here? Or do only at domain switch time?
{
/* WARNING: following must be inlined to avoid nested fault */
unsigned long psr = ia64_clear_ic();
ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
pte_val(pfn_pte(ia64_tpa(idle0_task.shared_info) >> PAGE_SHIFT, PAGE_KERNEL)),
PAGE_SHIFT);
ia64_set_psr(psr);
ia64_srlz_i();
}
#endif
continue_cpu_idle_loop();
}
struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
{
struct vcpu *v;
if ((v = alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER)) == NULL)
return NULL;
memset(v, 0, sizeof(*v));
memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
v->arch.privregs =
alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
printf("arch_vcpu_info=%p\n", v->arch.privregs);
memset(v->arch.privregs, 0, PAGE_SIZE);
return v;
}
void free_vcpu_struct(struct vcpu *v)
{
free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
}
static void init_switch_stack(struct vcpu *v)
{
struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
struct switch_stack *sw = (struct switch_stack *) regs - 1;
extern void ia64_ret_from_clone;
memset(sw, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs));
sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
sw->b0 = (unsigned long) &ia64_ret_from_clone;
sw->ar_fpsr = FPSR_DEFAULT;
v->arch._thread.ksp = (unsigned long) sw - 16;
// stay on kernel stack because may get interrupts!
// ia64_ret_from_clone (which b0 gets in new_thread) switches