aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--extras/mini-os/arch/ia64/Makefile17
-rw-r--r--extras/mini-os/arch/ia64/arch.mk15
-rw-r--r--extras/mini-os/arch/ia64/common.c11
-rw-r--r--extras/mini-os/arch/ia64/fw.S21
-rw-r--r--extras/mini-os/arch/ia64/gen_off.c62
-rw-r--r--extras/mini-os/arch/ia64/mm.c2
-rw-r--r--extras/mini-os/arch/ia64/xencomm.c20
-rw-r--r--extras/mini-os/include/ia64/hypercall-ia64.h2
-rw-r--r--extras/mini-os/include/ia64/os.h2
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/kernel/time.c437
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c21
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/xcom_hcall.c18
-rw-r--r--linux-2.6-xen-sparse/include/asm-ia64/hypercall.h7
-rw-r--r--linux-2.6-xen-sparse/include/asm-ia64/xen/xcom_hcall.h2
-rw-r--r--unmodified_drivers/linux-2.6/platform-pci/xen_support.c8
-rw-r--r--xen/arch/ia64/linux-xen/entry.S2
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/io_init.c32
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/irq.c11
-rw-r--r--xen/arch/ia64/vmx/viosapic.c4
-rw-r--r--xen/arch/ia64/vmx/vmmu.c25
-rw-r--r--xen/arch/ia64/vmx/vmx_entry.S10
-rw-r--r--xen/arch/ia64/vmx/vmx_process.c5
-rw-r--r--xen/arch/ia64/vmx/vtlb.c37
-rw-r--r--xen/arch/ia64/xen/dom0_ops.c6
-rw-r--r--xen/arch/ia64/xen/dom_fw.c23
-rw-r--r--xen/arch/ia64/xen/domain.c43
-rw-r--r--xen/arch/ia64/xen/fw_emul.c75
-rw-r--r--xen/arch/ia64/xen/mm.c77
-rw-r--r--xen/arch/ia64/xen/vcpu.c30
-rw-r--r--xen/include/asm-ia64/hypercall.h3
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/README.origin3
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/sn_sal.h10
-rw-r--r--xen/include/asm-ia64/mm.h26
-rw-r--r--xen/include/asm-ia64/vmmu.h2
34 files changed, 918 insertions, 151 deletions
diff --git a/extras/mini-os/arch/ia64/Makefile b/extras/mini-os/arch/ia64/Makefile
index 58f52caff1..2c9a627ec4 100644
--- a/extras/mini-os/arch/ia64/Makefile
+++ b/extras/mini-os/arch/ia64/Makefile
@@ -2,6 +2,9 @@
# Special makefile for ia64.
#
+XEN_ROOT = ../../../..
+include $(XEN_ROOT)/Config.mk
+
include arch.mk
include ../../minios.mk
@@ -36,21 +39,21 @@ ARCH_OBJS += __udivdi3.o
ARCH_OBJS += __divdi3.o
GEN_OFF_SRC := gen_off.c
-GEN_OFF_BIN := gen_off
-GEN_OFF_H := $(ARCH_SPEC_INC)/offsets.h
+GEN_OFF_ASM := gen_off.s
+GEN_OFF_H := $(ARCH_INC)/offsets.h
all: $(ARCH_LIB)
-$(GEN_OFF_BIN): $(GEN_OFF_SRC)
- $(CC) -o $@ $(CPPFLAGS) $<
+$(GEN_OFF_ASM): $(GEN_OFF_SRC)
+ $(CC) -S -o $@ $(CPPFLAGS) $<
-$(GEN_OFF_H): $(GEN_OFF_BIN)
- ./$(GEN_OFF_BIN) > $(GEN_OFF_H)
+$(GEN_OFF_H): $(GEN_OFF_ASM)
+ sed -ne "/^->/ {s/->/#define /; p}" < $< > $@
$(ARCH_LIB): $(GEN_OFF_H) $(ARCH_OBJS) $(HEAD_ARCH_OBJ)
$(AR) rv $(ARCH_LIB) $(ARCH_OBJS)
clean:
rm -f $(ARCH_LIB) $(ARCH_OBJS) $(HEAD_ARCH_OBJ)
- rm -f $(GEN_OFF_BIN)
+ rm -f $(GEN_OFF_ASM)
rm -f $(GEN_OFF_H)
diff --git a/extras/mini-os/arch/ia64/arch.mk b/extras/mini-os/arch/ia64/arch.mk
index 167e3a1f50..12168713ee 100644
--- a/extras/mini-os/arch/ia64/arch.mk
+++ b/extras/mini-os/arch/ia64/arch.mk
@@ -1,5 +1,20 @@
+# Build for Big Endian?
+BIGENDIAN ?= n
+
ARCH_CFLAGS := -mfixed-range=f2-f5,f12-f15,f32-f127 -mconstant-gp
ARCH_CFLAGS += -O2
ARCH_ASFLAGS := -x assembler-with-cpp
ARCH_ASFLAGS += -mfixed-range=f2-f5,f12-f15,f32-f127 -fomit-frame-pointer
ARCH_ASFLAGS += -fno-builtin -fno-common -fno-strict-aliasing -mconstant-gp
+
+ARCH_LDFLAGS = -warn-common
+
+# Next lines are for big endian code !
+ifeq ($(BIGENDIAN),y)
+ARCH_CFLAGS += -mbig-endian -Wa,-mbe -Wa,-mlp64
+ARCH_CFLAGS += -DBIG_ENDIAN
+ARCH_ASFLAGS += -Wa,-mbe
+ARCH_ASFLAGS += -DBIG_ENDIAN
+ARCH_LDFLAGS = -EB -d
+endif
+
diff --git a/extras/mini-os/arch/ia64/common.c b/extras/mini-os/arch/ia64/common.c
index ee496338a2..13416d9542 100644
--- a/extras/mini-os/arch/ia64/common.c
+++ b/extras/mini-os/arch/ia64/common.c
@@ -59,6 +59,9 @@ shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)XSI_BASE;
struct machine_fw machineFwG;
+/* This pointer is initialized in ia64.S with the address of the boot param
+ * area passed by the bootloader. */
+struct xen_ia64_boot_param* ia64_boot_paramP;
struct xen_ia64_boot_param ia64BootParamG;
char boot_cmd_line[COMMAND_LINE_SIZE+1];
@@ -104,6 +107,7 @@ map_pal_code(void)
xen_set_virtual_psr_ic(1);
}
+/* In ivt.S */
extern char hypervisor_callback;
static void
@@ -139,7 +143,6 @@ init_start_info(start_info_t* xen_start_info)
static void
init_boot_params(void)
{
- /* ia64_boot_paramP is initialised in ia64.S! */
ia64BootParamG.command_line = SWAP(ia64_boot_paramP->command_line);
ia64BootParamG.efi_systab = SWAP(ia64_boot_paramP->efi_systab);
ia64BootParamG.efi_memmap = SWAP(ia64_boot_paramP->efi_memmap);
@@ -222,6 +225,7 @@ arch_print_info(void)
major = minor >> 16;
minor &= ~0xffffffff;
printk("Running on Xen version: %d.%d\n", major, minor);
+#if 0
printk("machine addr of shared_info_t : 0x%lx\n",
start_info.shared_info);
printk("machine page number of shared page: 0x%lx\n",
@@ -232,5 +236,8 @@ arch_print_info(void)
start_info.console.domU.mfn);
printk("evtchn for console messages : %d\n",
start_info.console.domU.evtchn);
- printk("xen_guest_cmdline : %s\n", boot_cmd_line);
+#endif
+ if(strlen(boot_cmd_line) > 0)
+ printk("xen_guest_cmdline : %s\n", boot_cmd_line);
}
+
diff --git a/extras/mini-os/arch/ia64/fw.S b/extras/mini-os/arch/ia64/fw.S
index db89dfa934..48bb851e5d 100644
--- a/extras/mini-os/arch/ia64/fw.S
+++ b/extras/mini-os/arch/ia64/fw.S
@@ -33,6 +33,7 @@
#include "ia64_cpu.h"
#include "ia64_fpu.h"
#include "offsets.h"
+#include "xen/xen.h"
/*
@@ -517,3 +518,23 @@ ENTRY(__hypercall)
br.ret.sptk.many b0
;;
END(__hypercall)
+
+/*
+ * Stub for suspend.
+ * Just force the stacked registers to be written in memory.
+ */
+ENTRY(xencomm_arch_hypercall_suspend)
+ ;;
+ alloc r20=ar.pfs,0,0,6,0
+ mov r2=__HYPERVISOR_sched_op
+ ;;
+ /* We don't want to deal with RSE. */
+ flushrs
+ mov r33=r32
+ mov r32=2 // SCHEDOP_shutdown
+ ;;
+ break 0x1000
+ ;;
+ br.ret.sptk.many b0
+END(xencomm_arch_hypercall_suspend)
+
diff --git a/extras/mini-os/arch/ia64/gen_off.c b/extras/mini-os/arch/ia64/gen_off.c
index b5d2f15c1e..2c67a8c352 100644
--- a/extras/mini-os/arch/ia64/gen_off.c
+++ b/extras/mini-os/arch/ia64/gen_off.c
@@ -25,19 +25,25 @@
* SUCH DAMAGE.
*
*/
-
-#include <stdio.h>
-#include <stddef.h>
-#include <string.h>
#include "types.h"
#include "sched.h"
#include "xen/xen.h"
#include "xen/arch-ia64.h"
+#define DEFINE(sym, val) \
+ asm volatile("\n->" sym " %0 /* " #val " */": : "i" (val))
+#define DEFINE_STR2(sym, pfx, val) \
+ asm volatile("\n->" sym " " pfx "%0" : : "i"(val));
+
#define SZ(st,e) sizeof(((st *)0)->e)
-#define OFF(st,e,d,o) print_define(fp, #d, offsetof(st, e) + o, SZ(st, e))
+#define OFF(st,e,d,o) \
+ DEFINE(#d, offsetof(st, e) + o); \
+ DEFINE(#d "_sz", SZ(st,e )); \
+ DEFINE_STR2(#d "_ld", "ld", SZ(st, e)); \
+ DEFINE_STR2(#d "_st", "st", SZ(st, e));
+
#define TFOFF(e,d) OFF(trap_frame_t, e, d, 0)
-#define SIZE(st,d) fprintf(fp, "#define %-30s\t0x%016lx\n", #d, sizeof(st))
+#define SIZE(st,d) DEFINE(#d, sizeof(st))
#define SWOFF(e,d) OFF(struct thread, e, d, 0)
@@ -46,51 +52,9 @@
/* mapped_regs_t from xen/arch-ia64.h */
#define MR_OFF(e, d) OFF(mapped_regs_t, e, d, XMAPPEDREGS_OFS)
-void
-print_define(FILE *fp, char *name, uint64_t val, int size)
-{
- char ld_name[64];
- char st_name[64];
- char sz_name[64];
-
- strcpy(ld_name, name);
- strcat(ld_name, "_ld");
- strcpy(st_name, name);
- strcat(st_name, "_st");
- strcpy(sz_name, name);
- strcat(sz_name, "_sz");
- fprintf(fp, "#define %-30s\t0x%016lx\n", name, val);
- fprintf(fp, "#define %-30s\t%u\n", sz_name, size);
- switch (size) {
- case 1:
- fprintf(fp, "#define %-30s\tld1\n", ld_name);
- fprintf(fp, "#define %-30s\tst1\n", st_name);
- break;
- case 2:
- fprintf(fp, "#define %-30s\tld2\n", ld_name);
- fprintf(fp, "#define %-30s\tst2\n", st_name);
- break;
- case 4:
- fprintf(fp, "#define %-30s\tld4\n", ld_name);
- fprintf(fp, "#define %-30s\tst4\n", st_name);
- break;
- case 8:
- fprintf(fp, "#define %-30s\tld8\n", ld_name);
- fprintf(fp, "#define %-30s\tst8\n", st_name);
- break;
- default: ;
- }
- return;
-}
-
-
int
main(int argc, char ** argv)
{
- FILE *fp;
-
- fp = stdout;
-
TFOFF(cfm, TF_CFM);
TFOFF(pfs, TF_PFS);
TFOFF(bsp, TF_BSP);
@@ -173,5 +137,5 @@ main(int argc, char ** argv)
MR_OFF(bank1_regs[0], XSI_BANK1_R16_OFS);
MR_OFF(precover_ifs, XSI_PRECOVER_IFS_OFS);
- return(0);
+ return 0;
}
diff --git a/extras/mini-os/arch/ia64/mm.c b/extras/mini-os/arch/ia64/mm.c
index dd1fb9dc78..cb1699a9ab 100644
--- a/extras/mini-os/arch/ia64/mm.c
+++ b/extras/mini-os/arch/ia64/mm.c
@@ -127,7 +127,7 @@ void*
map_frames(unsigned long* frames, unsigned long n)
{
n = n;
- return (void*) __va(frames[0] << PAGE_SHIFT);
+ return (void*) __va(SWAP(frames[0]) << PAGE_SHIFT);
}
void arch_init_p2m(unsigned long max_pfn)
diff --git a/extras/mini-os/arch/ia64/xencomm.c b/extras/mini-os/arch/ia64/xencomm.c
index 587576a0b4..03d163cb94 100644
--- a/extras/mini-os/arch/ia64/xencomm.c
+++ b/extras/mini-os/arch/ia64/xencomm.c
@@ -171,13 +171,15 @@ xencommize_mini_grant_table_op(struct xencomm_mini *xc_area, int *nbr_area,
return -EINVAL;
rc = xencomm_create_mini
(xc_area, nbr_area,
- xen_guest_handle(setup->frame_list),
- setup->nr_frames
+ (void*)SWAP((uint64_t)
+ xen_guest_handle(setup->frame_list)),
+ SWAP(setup->nr_frames)
* sizeof(*xen_guest_handle(setup->frame_list)),
&desc1);
if (rc)
return rc;
- set_xen_guest_handle(setup->frame_list, (void *)desc1);
+ set_xen_guest_handle(setup->frame_list,
+ (void *)SWAP((uint64_t)desc1));
break;
}
case GNTTABOP_dump_table:
@@ -254,3 +256,15 @@ HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
return xencomm_mini_hypercall_grant_table_op(cmd, uop, count);
}
+ /* In fw.S */
+extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
+int
+HYPERVISOR_suspend(unsigned long srec)
+{
+ struct sched_shutdown arg;
+
+ arg.reason = (uint32_t)SWAP((uint32_t)SHUTDOWN_suspend);
+
+ return xencomm_arch_hypercall_suspend(xencomm_create_inline(&arg));
+}
+
diff --git a/extras/mini-os/include/ia64/hypercall-ia64.h b/extras/mini-os/include/ia64/hypercall-ia64.h
index 7b9b3edcc2..2a4415879d 100644
--- a/extras/mini-os/include/ia64/hypercall-ia64.h
+++ b/extras/mini-os/include/ia64/hypercall-ia64.h
@@ -138,7 +138,7 @@ xencomm_arch_event_channel_op(int cmd, void *arg)
if (unlikely(rc == -ENOSYS)) {
struct evtchn_op op;
- op.cmd = cmd;
+ op.cmd = SWAP(cmd);
memcpy(&op.u, arg, sizeof(op.u));
rc = _hypercall1(int, event_channel_op_compat, &op);
}
diff --git a/extras/mini-os/include/ia64/os.h b/extras/mini-os/include/ia64/os.h
index 6c928f2103..4e9f595f38 100644
--- a/extras/mini-os/include/ia64/os.h
+++ b/extras/mini-os/include/ia64/os.h
@@ -52,7 +52,7 @@ void arch_print_info(void); /* in common.c */
/* Size of xen_ia64_boot_param.command_line */
#define COMMAND_LINE_SIZE 512
-struct xen_ia64_boot_param* ia64_boot_paramP;
+extern struct xen_ia64_boot_param* ia64_boot_paramP;
extern struct xen_ia64_boot_param ia64BootParamG;
extern char boot_cmd_line[];
extern efi_system_table_t* efiSysTableP;
diff --git a/linux-2.6-xen-sparse/arch/ia64/kernel/time.c b/linux-2.6-xen-sparse/arch/ia64/kernel/time.c
new file mode 100644
index 0000000000..e9d72d89a3
--- /dev/null
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/time.c
@@ -0,0 +1,437 @@
+/*
+ * linux/arch/ia64/kernel/time.c
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ * Copyright (C) 1999-2000 VA Linux Systems
+ * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/profile.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/efi.h>
+#include <linux/profile.h>
+#include <linux/timex.h>
+
+#include <asm/machvec.h>
+#include <asm/delay.h>
+#include <asm/hw_irq.h>
+#include <asm/ptrace.h>
+#include <asm/sal.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_XEN
+#include <linux/kernel_stat.h>
+#include <linux/posix-timers.h>
+#include <xen/interface/vcpu.h>
+#include <asm/percpu.h>
+#endif
+
+extern unsigned long wall_jiffies;
+
+volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
+
+#ifdef CONFIG_IA64_DEBUG_IRQ
+
+unsigned long last_cli_ip;
+EXPORT_SYMBOL(last_cli_ip);
+
+#endif
+
+#ifdef CONFIG_XEN
+DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
+DEFINE_PER_CPU(unsigned long, processed_stolen_time);
+DEFINE_PER_CPU(unsigned long, processed_blocked_time);
+#define NS_PER_TICK (1000000000LL/HZ)
+#endif
+
+static struct time_interpolator itc_interpolator = {
+ .shift = 16,
+ .mask = 0xffffffffffffffffLL,
+ .source = TIME_SOURCE_CPU
+};
+
+#ifdef CONFIG_XEN
+static unsigned long
+consider_steal_time(unsigned long new_itm, struct pt_regs *regs)
+{
+ unsigned long stolen, blocked, sched_time;
+ unsigned long delta_itm = 0, stolentick = 0;
+ int i, cpu = smp_processor_id();
+ struct vcpu_runstate_info *runstate;
+ struct task_struct *p = current;
+
+ runstate = &per_cpu(runstate, smp_processor_id());
+
+ do {
+ sched_time = runstate->state_entry_time;
+ mb();
+ stolen = runstate->time[RUNSTATE_runnable] +
+ runstate->time[RUNSTATE_offline] -
+ per_cpu(processed_stolen_time, cpu);
+ blocked = runstate->time[RUNSTATE_blocked] -
+ per_cpu(processed_blocked_time, cpu);
+ mb();
+ } while (sched_time != runstate->state_entry_time);
+
+ /*
+ * Check for vcpu migration effect
+ * In this case, itc value is reversed.
+ * This causes huge stolen value.
+ * This function just checks and reject this effect.
+ */
+ if (!time_after_eq(runstate->time[RUNSTATE_blocked],
+ per_cpu(processed_blocked_time, cpu)))
+ blocked = 0;
+
+ if (!time_after_eq(runstate->time[RUNSTATE_runnable] +
+ runstate->time[RUNSTATE_offline],
+ per_cpu(processed_stolen_time, cpu)))
+ stolen = 0;
+
+ if (!time_after(delta_itm + new_itm, ia64_get_itc()))
+ stolentick = ia64_get_itc() - delta_itm - new_itm;
+
+ do_div(stolentick, NS_PER_TICK);
+ stolentick++;
+
+ do_div(stolen, NS_PER_TICK);
+
+ if (stolen > stolentick)
+ stolen = stolentick;
+
+ stolentick -= stolen;
+ do_div(blocked, NS_PER_TICK);
+
+ if (blocked > stolentick)
+ blocked = stolentick;
+
+ if (stolen > 0 || blocked > 0) {
+ account_steal_time(NULL, jiffies_to_cputime(stolen));
+ account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked));
+ run_local_timers();
+
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, user_mode(regs));
+
+ scheduler_tick();
+ run_posix_cpu_timers(p);
+ delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
+
+ if (cpu == time_keeper_id) {
+ write_seqlock(&xtime_lock);
+ for(i = 0; i < stolen + blocked; i++)
+ do_timer(regs);
+ local_cpu_data->itm_next = delta_itm + new_itm;
+ write_sequnlock(&xtime_lock);
+ } else {
+ local_cpu_data->itm_next = delta_itm + new_itm;
+ }
+ per_cpu(processed_stolen_time,cpu) += NS_PER_TICK * stolen;
+ per_cpu(processed_blocked_time,cpu) += NS_PER_TICK * blocked;
+ }
+ return delta_itm;
+}
+#else
+#define consider_steal_time(new_itm, regs) (0)
+#endif
+
+static irqreturn_t
+timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned long new_itm;
+ unsigned long delta_itm; /* XEN */
+
+ if (unlikely(cpu_is_offline(smp_processor_id()))) {
+ return IRQ_HANDLED;
+ }
+
+ platform_timer_interrupt(irq, dev_id, regs);
+
+ new_itm = local_cpu_data->itm_next;
+
+ if (!time_after(ia64_get_itc(), new_itm))
+ printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
+ ia64_get_itc(), new_itm);
+
+ profile_tick(CPU_PROFILING, regs);
+
+ if (is_running_on_xen()) {
+ delta_itm = consider_steal_time(new_itm, regs);
+ new_itm += delta_itm;
+ if (time_after(new_itm, ia64_get_itc()) && delta_itm)
+ goto skip_process_time_accounting;
+ }
+
+ while (1) {
+ update_process_times(user_mode(regs));
+
+ new_itm += local_cpu_data->itm_delta;
+
+ if (smp_processor_id() == time_keeper_id) {
+ /*
+ * Here we are in the timer irq handler. We have irqs locally
+ * disabled, but we don't know if the timer_bh is running on
+ * another CPU. We need to avoid to SMP race by acquiring the
+ * xtime_lock.
+ */
+ write_seqlock(&xtime_lock);
+ do_timer(regs);
+ local_cpu_data->itm_next = new_itm;
+ write_sequnlock(&xtime_lock);
+ } else
+ local_cpu_data->itm_next = new_itm;
+
+ if (time_after(new_itm, ia64_get_itc()))
+ break;
+ }
+
+skip_process_time_accounting: /* XEN */
+
+ do {
+ /*
+ * If we're too close to the next clock tick for
+ * comfort, we increase the safety margin by
+ * intentionally dropping the next tick(s). We do NOT
+ * update itm.next because that would force us to call
+ * do_timer() which in turn would let our clock run
+ * too fast (with the potentially devastating effect
+ * of losing monotony of time).
+ */
+ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
+ new_itm += local_cpu_data->itm_delta;
+ ia64_set_itm(new_itm);
+ /* double check, in case we got hit by a (slow) PMI: */
+ } while (time_after_eq(ia64_get_itc(), new_itm));
+ return IRQ_HANDLED;
+}
+
+/*
+ * Encapsulate access to the itm structure for SMP.
+ */
+void
+ia64_cpu_local_tick (void)
+{
+ int cpu = smp_processor_id();
+ unsigned long shift = 0, delta;
+
+ /* arrange for the cycle counter to generate a timer interrupt: */
+ ia64_set_itv(IA64_TIMER_VECTOR);
+
+ delta = local_cpu_data->itm_delta;
+ /*
+ * Stagger the timer tick for each CPU so they don't occur all at (almost) the
+ * same time:
+ */
+ if (cpu) {
+ unsigned long hi = 1UL << ia64_fls(cpu);
+ shift = (2*(cpu - hi) + 1) * delta/hi/2;
+ }
+ local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
+ ia64_set_itm(local_cpu_data->itm_next);
+}
+
+static int nojitter;
+
+static int __init nojitter_setup(char *str)
+{
+ nojitter = 1;
+ printk("Jitter checking for ITC timers disabled\n");
+ return 1;
+}
+
+__setup("nojitter", nojitter_setup);
+
+#ifdef CONFIG_XEN
+/* taken from i386/kernel/time-xen.c */
+static void init_missing_ticks_accounting(int cpu)
+{
+ struct vcpu_register_runstate_memory_area area;
+ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
+
+ memset(runstate, 0, sizeof(*runstate));
+
+ area.addr.v = runstate;
+ HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
+
+ per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
+ per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+ + runstate->time[RUNSTATE_offline];
+}
+#else
+#define init_missing_ticks_accounting(cpu) do {} while (0)
+#endif
+
+void __devinit
+ia64_init_itm (void)
+{
+ unsigned long platform_base_freq, itc_freq;
+ struct pal_freq_ratio itc_ratio, proc_ratio;
+ long status, platform_base_drift, itc_drift;
+
+ /*
+ * According to SAL v2.6, we need to use a SAL call to determine the platform base
+ * frequency and then a PAL call to determine the frequency ratio between the ITC
+ * and the base frequency.
+ */
+ status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+ &platform_base_freq, &platform_base_drift);
+ if (status != 0) {
+ printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
+ } else {
+ status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
+ if (status != 0)
+ printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
+ }
+ if (status != 0) {
+ /* invent "random" values */
+ printk(KERN_ERR
+ "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
+ platform_base_freq = 100000000;
+ platform_base_drift = -1; /* no drift info */
+ itc_ratio.num = 3;
+ itc_ratio.den = 1;
+ }
+ if (platform_base_freq < 40000000) {
+ printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
+ platform_base_freq);
+ platform_base_freq = 75000000;
+ platform_base_drift = -1;
+ }
+ if (!proc_ratio.den)
+ proc_ratio.den = 1; /* avoid division by zero */
+ if (!itc_ratio.den)
+ itc_ratio.den = 1; /* avoid division by zero */
+
+ itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
+
+ local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
+ printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
+ "ITC freq=%lu.%03luMHz", smp_processor_id(),
+ platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
+ itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
+
+ if (platform_base_drift != -1) {
+ itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
+ printk("+/-%ldppm\n", itc_drift);
+ } else {
+ itc_drift = -1;
+ printk("\n");
+ }
+
+ local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
+ local_cpu_data->itc_freq = itc_freq;
+ local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
+ local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
+ + itc_freq/2)/itc_freq;
+
+ if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
+ itc_interpolator.frequency = local_cpu_data->itc_freq;
+ itc_interpolator.drift = itc_drift;
+#ifdef CONFIG_SMP
+ /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
+ * Jitter compensation requires a cmpxchg which may limit
+ * the scalability of the syscalls for retrieving time.
+ * The ITC synchronization is usually successful to within a few
+ * ITC ticks but this is not a sure thing. If you need to improve
+ * timer performance in SMP situations then boot the kernel with the
+ * "nojitter" option. However, doing so may result in time fluctuating (maybe
+ * even going backward) if the ITC offsets between the individual CPUs
+ * are too large.
+ */
+ if (!nojitter) itc_interpolator.jitter = 1;
+#endif
+ register_time_interpolator(&itc_interpolator);
+ }
+
+ if (is_running_on_xen())
+ init_missing_ticks_accounting(smp_processor_id());
+
+ /* Setup the CPU local timer tick */
+ ia64_cpu_local_tick();
+}
+
+static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED,
+ .name = "timer"
+};
+
+void __devinit ia64_disable_timer(void)
+{
+ ia64_set_itv(1 << 16);
+}
+
+void __init
+time_init (void)
+{
+ register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
+ efi_gettimeofday(&xtime);
+ ia64_init_itm();
+
+ /*
+ * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
+ * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
+ */
+ set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
+}
+
+/*
+ * Generic udelay assumes that if preemption is allowed and the thread
+ * migrates to another CPU, that the ITC values are synchronized across
+ * all CPUs.
+ */
+static void
+ia64_itc_udelay (unsigned long usecs)
+{
+ unsigned long start = ia64_get_itc();
+ unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
+
+ while (time_before(ia64_get_itc(), end))
+ cpu_relax();
+}
+
+void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
+
+void
+udelay (unsigned long usecs)
+{
+ (*ia64_udelay)(usecs);
+}
+EXPORT_SYMBOL(udelay);
+
+static unsigned long long ia64_itc_printk_clock(void)
+{
+ if (ia64_get_kr(IA64_KR_PER_CPU_DATA))
+ return sched_clock();
+ return 0;
+}
+
+static unsigned long long ia64_default_printk_clock(void)
+{
+ return (unsigned long long)(jiffies_64 - INITIAL_JIFFIES) *
+ (1000000000/HZ);
+}
+
+unsigned long long (*ia64_printk_clock)(void) = &ia64_default_printk_clock;
+
+unsigned long long printk_clock(void)
+{
+ return ia64_printk_clock();
+}
+
+void __init
+ia64_setup_printk_clock(void)
+{
+ if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT))
+ ia64_printk_clock = ia64_itc_printk_clock;
+}
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
index 695dc3bce5..c9b542b98b 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
@@ -205,6 +205,18 @@ static void contiguous_bitmap_clear(
#define MAX_CONTIG_ORDER 7
static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
+/* Width of DMA addresses. 30 bits is a b44 limitation. */
+#define DEFAULT_DMA_BITS 30
+static unsigned int xen_ia64_dma_bits = DEFAULT_DMA_BITS;
+
+static int __init
+setup_dma_bits(char *str)
+{
+ xen_ia64_dma_bits = simple_strtoul(str, NULL, 0);
+ return 0;
+}
+__setup("xen_ia64_dma_bits=", setup_dma_bits);
+
/* Ensure multi-page extents are contiguous in machine memory. */
int
__xen_create_contiguous_region(unsigned long vstart,
@@ -234,6 +246,15 @@ __xen_create_contiguous_region(unsigned long vstart,
.nr_exchanged = 0
};
+ /*
+ * XXX xen/ia64 vmm bug work around
+ * the c/s 13366:ed73ff8440d8 of xen-unstable.hg revealed that
+ * XENMEM_exchange has been broken on Xen/ia64.
+ * This is work around for it until the right fix.
+ */
+ if (address_bits < xen_ia64_dma_bits)
+ return -ENOSYS;
+
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/xcom_hcall.c b/linux-2.6-xen-sparse/arch/ia64/xen/xcom_hcall.c
index 95588e7304..b14db04ee6 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xcom_hcall.c
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xcom_hcall.c
@@ -33,6 +33,7 @@
#include <xen/interface/acm_ops.h>
#include <xen/interface/hvm/params.h>
#include <xen/interface/xenoprof.h>
+#include <xen/interface/vcpu.h>
#include <asm/hypercall.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -363,3 +364,20 @@ xencomm_hypercall_perfmon_op(unsigned long cmd, void* arg, unsigned long count)
xencomm_create_inline(arg),
count);
}
+
+long
+xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
+{
+ switch (cmd) {
+ case VCPUOP_register_runstate_memory_area:
+ xencommize_memory_reservation((xen_memory_reservation_t *)arg);
+ break;
+
+ default:
+ printk("%s: unknown vcpu op %d\n", __func__, cmd);
+ return -ENOSYS;
+ }
+
+ return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
+ xencomm_create_inline(arg));
+}
diff --git a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
index ee4d0ce604..b8f393a8c8 100644
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
@@ -205,6 +205,12 @@ xencomm_arch_hypercall_hvm_op(int cmd, void *arg)
return _hypercall2(unsigned long, hvm_op, cmd, arg);
}
+static inline long
+xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
+{
+ return _hypercall3(long, vcpu_op, cmd, cpu, arg);
+}
+
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
@@ -405,5 +411,6 @@ xencomm_arch_hypercall_perfmon_op(unsigned long cmd,
#endif
#define HYPERVISOR_suspend xencomm_hypercall_suspend
+#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
#endif /* __HYPERCALL_H__ */
diff --git a/linux-2.6-xen-sparse/include/asm-ia64/xen/xcom_hcall.h b/linux-2.6-xen-sparse/include/asm-ia64/xen/xcom_hcall.h
index 79e790d75b..913f2761ee 100644
--- a/linux-2.6-xen-sparse/include/asm-ia64/xen/xcom_hcall.h
+++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/xcom_hcall.h
@@ -51,6 +51,8 @@ extern int xencomm_hypercall_xenoprof_op(int op, void *arg);
extern int xencomm_hypercall_perfmon_op(unsigned long cmd, void* arg,
unsigned long count);
+extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
+
/* Using mini xencomm. */
extern int xencomm_mini_hypercall_console_io(int cmd, int count, char *str);
diff --git a/unmodified_drivers/linux-2.6/platform-pci/xen_support.c b/unmodified_drivers/linux-2.6/platform-pci/xen_support.c
index 589d8365ec..431115a8cc 100644
--- a/unmodified_drivers/linux-2.6/platform-pci/xen_support.c
+++ b/unmodified_drivers/linux-2.6/platform-pci/xen_support.c
@@ -45,7 +45,13 @@ unsigned long __hypercall(unsigned long a1, unsigned long a2,
return __res;
}
EXPORT_SYMBOL(__hypercall);
-#endif
+
+int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
+{
+ return xencomm_mini_hypercall_grant_table_op(cmd, uop, count);
+}
+EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
+#endif /* __ia64__ */
void xen_machphys_update(unsigned long mfn, unsigned long pfn)
{
diff --git a/xen/arch/ia64/linux-xen/entry.S b/xen/arch/ia64/linux-xen/entry.S
index 0f6aaf80d7..9f008c6841 100644
--- a/xen/arch/ia64/linux-xen/entry.S
+++ b/xen/arch/ia64/linux-xen/entry.S
@@ -1509,7 +1509,7 @@ ia64_hypercall_table:
data8 do_ni_hypercall /* do_vm_assist */
data8 do_ni_hypercall /* do_update_va_mapping_othe */
data8 do_ni_hypercall /* (x86 only) */
- data8 do_ni_hypercall /* do_vcpu_op */
+ data8 do_vcpu_op /* do_vcpu_op */
data8 do_ni_hypercall /* (x86_64 only) */ /* 25 */
data8 do_ni_hypercall /* do_mmuext_op */
data8 do_ni_hypercall /* do_acm_op */
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/io_init.c b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c
index 72a3b18f85..ca0b66d0d8 100644
--- a/xen/arch/ia64/linux-xen/sn/kernel/io_init.c
+++ b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c
@@ -20,9 +20,7 @@
#include <asm/sn/module.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
-#ifndef XEN
#include <asm/sn/pcidev.h>
-#endif
#include <asm/sn/simulator.h>
#include <asm/sn/sn_sal.h>
#ifndef XEN
@@ -41,6 +39,7 @@
extern void sn_init_cpei_timer(void);
extern void register_sn_procfs(void);
#ifdef XEN
+#define pci_dev_get(dev) do{}while(0)
extern void sn_irq_lh_init(void);
#endif
@@ -65,7 +64,6 @@ int sn_ioif_inited; /* SN I/O infrastructure initialized? */
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
-#ifndef XEN
static int max_segment_number; /* Default highest segment number */
static int max_pcibus_number = 255; /* Default highest pci bus number */
@@ -97,7 +95,6 @@ static struct sn_pcibus_provider sn_pci_default_provider = {
.dma_unmap = sn_default_pci_unmap,
.bus_fixup = sn_default_pci_bus_fixup,
};
-#endif
/*
* Retrieve the DMA Flush List given nasid, widget, and device.
@@ -148,7 +145,6 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
return ret_stuff.v0;
}
-#ifndef XEN
/*
* Retrieve the pci device information given the bus and device|function number.
*/
@@ -168,6 +164,7 @@ sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
return ret_stuff.v0;
}
+#ifndef XEN
/*
* sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
* device.
@@ -372,7 +369,9 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
pci_dev_put(host_pci_dev);
pci_dev_put(dev);
}
+#endif
+#ifndef XEN
/*
* sn_pci_fixup_slot() - This routine sets up a slot's resources
* consistent with the Linux PCI abstraction layer. Resources acquired
@@ -437,10 +436,12 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
+#ifndef XEN
if (dev->resource[idx].flags & IORESOURCE_IO)
dev->resource[idx].parent = &ioport_resource;
else
dev->resource[idx].parent = &iomem_resource;
+#endif
}
/* Create a pci_window in the pci_controller struct for
* each device resource.
@@ -480,6 +481,7 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
kfree(sn_irq_info);
}
}
+#endif
/*
* sn_pci_controller_fixup() - This routine sets up a bus's resources
@@ -512,6 +514,7 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
controller = &sn_controller->pci_controller;
controller->segment = segment;
+#ifndef XEN
if (bus == NULL) {
bus = pci_scan_bus(busnum, &pci_root_ops, controller);
if (bus == NULL)
@@ -533,6 +536,7 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
goto error_return; /* no further fixup necessary */
+#endif
provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
if (provider == NULL)
goto error_return; /* no provider registerd for this asic */
@@ -562,14 +566,18 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
controller->window[0].resource.end =
controller->window[0].resource.start + 0xffff;
+#ifndef XEN
controller->window[0].resource.parent = &ioport_resource;
+#endif
controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
controller->window[1].resource.name = "legacy_mem";
controller->window[1].resource.flags = IORESOURCE_MEM;
controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
controller->window[1].resource.end =
controller->window[1].resource.start + (1024 * 1024) - 1;
+#ifndef XEN
controller->window[1].resource.parent = &iomem_resource;
+#endif
controller->windows = 2;
/*
@@ -608,6 +616,7 @@ error_return:
return;
}
+#ifndef XEN
void sn_bus_store_sysdata(struct pci_dev *dev)
{
struct sysdata_el *element;
@@ -644,17 +653,16 @@ void sn_bus_free_sysdata(void)
#define PCI_BUSES_TO_SCAN 256
-static int __init sn_pci_init(void)
+static int __init sn_io_early_init(void)
{
-#ifndef XEN
int i, j;
+#ifndef XEN
struct pci_dev *pci_dev = NULL;
#endif
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
-#ifndef XEN
/*
* prime sn_pci_provider[]. Individial provider init routines will
* override their respective default entries.
@@ -663,6 +671,7 @@ static int __init sn_pci_init(void)
for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
sn_pci_provider[i] = &sn_pci_default_provider;
+#ifndef XEN
pcibr_init_provider();
tioca_init_provider();
tioce_init_provider();
@@ -683,7 +692,7 @@ static int __init sn_pci_init(void)
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
-
+#endif
/* busses are not known yet ... */
for (i = 0; i <= max_segment_number; i++)
for (j = 0; j <= max_pcibus_number; j++)
@@ -695,6 +704,7 @@ static int __init sn_pci_init(void)
* information.
*/
+#ifndef XEN
while ((pci_dev =
pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
sn_pci_fixup_slot(pci_dev);
@@ -769,9 +779,9 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
#endif
#ifdef XEN
-__initcall(sn_pci_init);
+__initcall(sn_io_early_init);
#else
-subsys_initcall(sn_pci_init);
+subsys_initcall(sn_io_early_init);
#endif
#ifndef XEN
EXPORT_SYMBOL(sn_pci_fixup_slot);
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/irq.c b/xen/arch/ia64/linux-xen/sn/kernel/irq.c
index cc126d71ee..12fcf15d87 100644
--- a/xen/arch/ia64/linux-xen/sn/kernel/irq.c
+++ b/xen/arch/ia64/linux-xen/sn/kernel/irq.c
@@ -223,6 +223,8 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
struct hw_interrupt_type irq_type_sn = {
#ifndef XEN
.name = "SN hub",
+#else
+ .typename = "SN hub",
#endif
.startup = sn_startup_irq,
.shutdown = sn_shutdown_irq,
@@ -242,19 +244,24 @@ unsigned int sn_local_vector_to_irq(u8 vector)
void sn_irq_init(void)
{
-#ifndef XEN
int i;
irq_desc_t *base_desc = irq_desc;
+#ifndef XEN
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
+#endif
for (i = 0; i < NR_IRQS; i++) {
+#ifdef XEN
+ if (base_desc[i].handler == &no_irq_type) {
+ base_desc[i].handler = &irq_type_sn;
+#else
if (base_desc[i].chip == &no_irq_type) {
base_desc[i].chip = &irq_type_sn;
+#endif
}
}
-#endif
}
#ifndef XEN
diff --git a/xen/arch/ia64/vmx/viosapic.c b/xen/arch/ia64/vmx/viosapic.c
index f52a1f6590..bed4e2bec6 100644
--- a/xen/arch/ia64/vmx/viosapic.c
+++ b/xen/arch/ia64/vmx/viosapic.c
@@ -104,11 +104,11 @@ static void service_iosapic(struct viosapic *viosapic)
while ( (irq = iosapic_get_highest_irq(viosapic)) != -1 )
{
- viosapic_deliver(viosapic, irq);
-
if ( viosapic->redirtbl[irq].trig_mode == SAPIC_LEVEL )
viosapic->isr |= (1UL << irq);
+ viosapic_deliver(viosapic, irq);
+
viosapic->irr &= ~(1UL << irq);
}
}
diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c
index 901c07bed4..917f274bf9 100644
--- a/xen/arch/ia64/vmx/vmmu.c
+++ b/xen/arch/ia64/vmx/vmmu.c
@@ -161,6 +161,7 @@ static void free_domain_vhpt(struct vcpu *v)
if (v->arch.vhpt.hash) {
page = virt_to_page(v->arch.vhpt.hash);
free_domheap_pages(page, VCPU_VHPT_ORDER);
+ v->arch.vhpt.hash = 0;
}
return;
@@ -590,6 +591,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
struct domain *d = vcpu->domain;
struct vcpu *v;
struct ptc_ga_args args;
+ int proc;
args.vadr = va;
vcpu_get_rr(vcpu, va, &args.rid);
@@ -599,20 +601,21 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u64 va, u64 ps)
continue;
args.vcpu = v;
- if (v->processor != vcpu->processor) {
- int proc;
+again: /* Try again if VCPU has migrated. */
+ proc = v->processor;
+ if (proc != vcpu->processor) {
/* Flush VHPT on remote processors. */
- do {
- proc = v->processor;
- smp_call_function_single(v->processor,
- &ptc_ga_remote_func, &args, 0, 1);
- /* Try again if VCPU has migrated. */
- } while (proc != v->processor);
- }
- else if(v == vcpu)
+ smp_call_function_single(v->processor,
+ &ptc_ga_remote_func, &args, 0, 1);
+ if (proc != v->processor)
+ goto again;
+ } else if (v == vcpu) {
vmx_vcpu_ptc_l(v, va, ps);
- else
+ } else {
ptc_ga_remote_func(&args);
+ if (proc != v->processor)
+ goto again;
+ }
}
return IA64_NO_FAULT;
}
diff --git a/xen/arch/ia64/vmx/vmx_entry.S b/xen/arch/ia64/vmx/vmx_entry.S
index 74bf2ca4d3..ef400c4f53 100644
--- a/xen/arch/ia64/vmx/vmx_entry.S
+++ b/xen/arch/ia64/vmx/vmx_entry.S
@@ -477,6 +477,11 @@ GLOBAL_ENTRY(ia64_leave_hypercall)
* resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
* needs to be redone.
*/
+ ;;
+ adds r16=PT(R8)+16,r12
+ ;;
+ st8 [r16]=r8
+ ;;
(pUStk) rsm psr.i
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
@@ -484,6 +489,11 @@ GLOBAL_ENTRY(ia64_leave_hypercall)
br.call.sptk.many b0=leave_hypervisor_tail
.work_processed_syscall:
//clean up bank 1 registers
+ ;;
+ adds r16=PT(R8)+16,r12
+ ;;
+ ld8 r8=[r16]
+ ;;
mov r16=r0
mov r17=r0
mov r18=r0
diff --git a/xen/arch/ia64/vmx/vmx_process.c b/xen/arch/ia64/vmx/vmx_process.c
index 5e40330476..4ee7d4e805 100644
--- a/xen/arch/ia64/vmx/vmx_process.c
+++ b/xen/arch/ia64/vmx/vmx_process.c
@@ -92,6 +92,11 @@ void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
switch (vec) {
+ case 22: // IA64_INST_ACCESS_RIGHTS_VECTOR
+ if (vhpt_access_rights_fixup(vcpu, ifa, 0))
+ return;
+ break;
+
case 25: // IA64_DISABLED_FPREG_VECTOR
if (FP_PSR(vcpu) & IA64_PSR_DFH) {
diff --git a/xen/arch/ia64/vmx/vtlb.c b/xen/arch/ia64/vmx/vtlb.c
index 7ed628ba66..9f01627602 100644
--- a/xen/arch/ia64/vmx/vtlb.c
+++ b/xen/arch/ia64/vmx/vtlb.c
@@ -196,6 +196,37 @@ void thash_vhpt_insert(VCPU *v, u64 pte, u64 itir, u64 va, int type)
ia64_srlz_i();
}
}
+
+int vhpt_access_rights_fixup(VCPU *v, u64 ifa, int is_data)
+{
+ thash_data_t *trp, *data;
+ u64 ps, tag, mask;
+
+ trp = __vtr_lookup(v, ifa, is_data);
+ if (trp) {
+ ps = _REGION_PAGE_SIZE(ia64_get_rr(ifa));
+ if (trp->ps < ps)
+ return 0;
+ ifa = PAGEALIGN(ifa, ps);
+ data = (thash_data_t *)ia64_thash(ifa);
+ tag = ia64_ttag(ifa);
+ do {
+ if (data->etag == tag) {
+ mask = trp->page_flags & PAGE_FLAGS_AR_PL_MASK;
+ if (mask != (data->page_flags & PAGE_FLAGS_AR_PL_MASK)) {
+ data->page_flags &= ~PAGE_FLAGS_AR_PL_MASK;
+ data->page_flags |= mask;
+ machine_tlb_purge(ifa, ps);
+ return 1;
+ }
+ return 0;
+ }
+ data = data->next;
+ } while(data);
+ }
+ return 0;
+}
+
/*
* vhpt lookup
*/
@@ -642,7 +673,7 @@ void thash_init(thash_cb_t *hcb, u64 sz)
}while(num);
hcb->cch_freelist = p = hcb->cch_buf;
- num = (hcb->cch_sz/sizeof(thash_data_t))-1;
+ num = hcb->cch_sz / sizeof(thash_data_t);
do{
p->page_flags = 0;
p->itir = 0;
@@ -650,6 +681,6 @@ void thash_init(thash_cb_t *hcb, u64 sz)
p++;
num--;
}while(num);
- p->itir = 0;
- p->next = NULL;
+
+ (p - 1)->next = NULL;
}
diff --git a/xen/arch/ia64/xen/dom0_ops.c b/xen/arch/ia64/xen/dom0_ops.c
index b23d222e84..3cd3b0ea5d 100644
--- a/xen/arch/ia64/xen/dom0_ops.c
+++ b/xen/arch/ia64/xen/dom0_ops.c
@@ -339,13 +339,17 @@ dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
if (size == 0)
size = PAGE_SIZE;
+ if (size == 0)
+ printk(XENLOG_WARNING "ioremap(): Trying to map %lx, size 0\n", mpaddr);
+
end = PAGE_ALIGN(mpaddr + size);
if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
(end >> PAGE_SHIFT) - 1))
return -EPERM;
- return assign_domain_mmio_page(d, mpaddr, size);
+ return assign_domain_mmio_page(d, mpaddr, mpaddr, size,
+ ASSIGN_writable | ASSIGN_nocache);
}
unsigned long
diff --git a/xen/arch/ia64/xen/dom_fw.c b/xen/arch/ia64/xen/dom_fw.c
index c69ab89c80..7402d3f77b 100644
--- a/xen/arch/ia64/xen/dom_fw.c
+++ b/xen/arch/ia64/xen/dom_fw.c
@@ -534,6 +534,7 @@ complete_dom0_memmap(struct domain *d,
u64 start = md->phys_addr;
u64 size = md->num_pages << EFI_PAGE_SHIFT;
u64 end = start + size;
+ u64 mpaddr;
unsigned long flags;
switch (md->type) {
@@ -566,10 +567,22 @@ complete_dom0_memmap(struct domain *d,
break;
case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ flags = ASSIGN_writable; /* dummy - zero */
+ if (md->attribute & EFI_MEMORY_UC)
+ flags |= ASSIGN_nocache;
+
+ if (start > 0x1ffffffff0000000UL) {
+ mpaddr = 0x4000000000000UL - size;
+ printk(XENLOG_INFO "Remapping IO ports from "
+ "%lx to %lx\n", start, mpaddr);
+ } else
+ mpaddr = start;
+
/* Map into dom0. */
- assign_domain_mmio_page(d, start, size);
+ assign_domain_mmio_page(d, mpaddr, start, size, flags);
/* Copy descriptor. */
*dom_md = *md;
+ dom_md->phys_addr = mpaddr;
dom_md->virt_addr = 0;
num_mds++;
break;
@@ -652,8 +665,12 @@ complete_dom0_memmap(struct domain *d,
if (domain_page_mapped(d, addr))
continue;
- if (efi_mmio(addr, PAGE_SIZE))
- assign_domain_mmio_page(d, addr, PAGE_SIZE);
+ if (efi_mmio(addr, PAGE_SIZE)) {
+ unsigned long flags;
+ flags = ASSIGN_writable | ASSIGN_nocache;
+ assign_domain_mmio_page(d, addr, addr,
+ PAGE_SIZE, flags);
+ }
}
return num_mds;
}
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
index 2bb2ef8172..ba58064151 100644
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -50,6 +50,7 @@
#include <xen/guest_access.h>
#include <asm/tlb_track.h>
#include <asm/perfmon.h>
+#include <public/vcpu.h>
unsigned long dom0_size = 512*1024*1024;
@@ -262,6 +263,9 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
vcpu_info[current->vcpu_id].evtchn_upcall_mask;
__ia64_per_cpu_var(current_psr_ic_addr) =
(int *)(nd->arch.shared_info_va + XSI_PSR_IC_OFS);
+ /* steal time accounting */
+ if (!guest_handle_is_null(runstate_guest(current)))
+ __copy_to_guest(runstate_guest(current), &current->runstate, 1);
} else {
/* When switching to idle domain, only need to disable vhpt
* walker. Then all accesses happen within idle context will
@@ -1261,6 +1265,45 @@ void sync_vcpu_execstate(struct vcpu *v)
// FIXME SMP: Anything else needed here for SMP?
}
+/* This function is taken from xen/arch/x86/domain.c */
+long
+arch_do_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg)
+{
+ long rc = 0;
+
+ switch (cmd) {
+ case VCPUOP_register_runstate_memory_area:
+ {
+ struct vcpu_register_runstate_memory_area area;
+ struct vcpu_runstate_info runstate;
+
+ rc = -EFAULT;
+ if (copy_from_guest(&area, arg, 1))
+ break;
+
+ if (!guest_handle_okay(area.addr.h, 1))
+ break;
+
+ rc = 0;
+ runstate_guest(v) = area.addr.h;
+
+ if (v == current) {
+ __copy_to_guest(runstate_guest(v), &v->runstate, 1);
+ } else {
+ vcpu_runstate_get(v, &runstate);
+ __copy_to_guest(runstate_guest(v), &runstate, 1);
+ }
+
+ break;
+ }
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+
+ return rc;
+}
+
static void parse_dom0_mem(char *s)
{
dom0_size = parse_size_and_unit(s, NULL);
diff --git a/xen/arch/ia64/xen/fw_emul.c b/xen/arch/ia64/xen/fw_emul.c
index 8a4897deaf..bacbfae510 100644
--- a/xen/arch/ia64/xen/fw_emul.c
+++ b/xen/arch/ia64/xen/fw_emul.c
@@ -22,6 +22,7 @@
#include <linux/efi.h>
#include <asm/pal.h>
#include <asm/sal.h>
+#include <asm/sn/sn_sal.h>
#include <asm/xenmca.h>
#include <public/sched.h>
@@ -131,6 +132,7 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
unsigned long in3, unsigned long in4, unsigned long in5,
unsigned long in6, unsigned long in7)
{
+ struct ia64_sal_retval ret_stuff;
unsigned long r9 = 0;
unsigned long r10 = 0;
long r11 = 0;
@@ -375,8 +377,69 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
if (!test_and_set_bit(_VCPUF_down, &current->vcpu_flags))
vcpu_sleep_nosync(current);
break;
+ case SN_SAL_GET_MASTER_NASID:
+ status = -1;
+ if (current->domain == dom0) {
+ printk("*** Emulating SN_SAL_GET_MASTER_NASID ***\n");
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_MASTER_NASID,
+ 0, 0, 0, 0, 0, 0, 0);
+ status = ret_stuff.status;
+ r9 = ret_stuff.v0;
+ r10 = ret_stuff.v1;
+ r11 = ret_stuff.v2;
+ }
+ break;
+ case SN_SAL_GET_KLCONFIG_ADDR:
+ status = -1;
+ if (current->domain == dom0) {
+ printk("*** Emulating SN_SAL_GET_KLCONFIG_ADDR ***\n");
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR,
+ in1, 0, 0, 0, 0, 0, 0);
+ status = ret_stuff.status;
+ r9 = ret_stuff.v0;
+ r10 = ret_stuff.v1;
+ r11 = ret_stuff.v2;
+ }
+ break;
+ case SN_SAL_GET_SAPIC_INFO:
+ status = -1;
+ if (current->domain == dom0) {
+ printk("*** Emulating SN_SAL_GET_SAPIC_INFO ***\n");
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, in1,
+ 0, 0, 0, 0, 0, 0);
+ status = ret_stuff.status;
+ r9 = ret_stuff.v0;
+ r10 = ret_stuff.v1;
+ r11 = ret_stuff.v2;
+ }
+ break;
+ case SN_SAL_GET_SN_INFO:
+ status = -1;
+ if (current->domain == dom0) {
+ printk("*** Emulating SN_SAL_GET_SN_INFO ***\n");
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, in1,
+ 0, 0, 0, 0, 0, 0);
+ status = ret_stuff.status;
+ r9 = ret_stuff.v0;
+ r10 = ret_stuff.v1;
+ r11 = ret_stuff.v2;
+ }
+ break;
+ case SN_SAL_IOIF_GET_HUBDEV_INFO:
+ status = -1;
+ if (current->domain == dom0) {
+ printk("*** Emulating SN_SAL_IOIF_GET_HUBDEV_INFO ***\n");
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_IOIF_GET_HUBDEV_INFO,
+ in1, in2, 0, 0, 0, 0, 0);
+ status = ret_stuff.status;
+ r9 = ret_stuff.v0;
+ r10 = ret_stuff.v1;
+ r11 = ret_stuff.v2;
+ }
+ break;
default:
- printk("*** CALLED SAL_ WITH UNKNOWN INDEX. IGNORED...\n");
+ printk("*** CALLED SAL_ WITH UNKNOWN INDEX (%lx). "
+ "IGNORED...\n", index);
status = -1;
break;
}
@@ -412,6 +475,8 @@ xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
unsigned long r10 = 0;
unsigned long r11 = 0;
long status = PAL_STATUS_UNIMPLEMENTED;
+ unsigned long flags;
+ int processor;
if (running_on_sim)
return pal_emulator_static(index);
@@ -594,18 +659,20 @@ xen_pal_emulator(unsigned long index, u64 in1, u64 in2, u64 in3)
* Clear psr.ic when call PAL_CACHE_FLUSH
*/
r10 = in3;
+ local_irq_save(flags);
+ processor = current->processor;
status = ia64_pal_cache_flush(in1, in2, &r10, &r9);
+ local_irq_restore(flags);
if (status != 0)
panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
"status %lx", status);
if (in1 == PAL_CACHE_TYPE_COHERENT) {
- int cpu = current->processor;
cpus_setall(current->arch.cache_coherent_map);
- cpu_clear(cpu, current->arch.cache_coherent_map);
+ cpu_clear(processor, current->arch.cache_coherent_map);
cpus_setall(cpu_cache_coherent_map);
- cpu_clear(cpu, cpu_cache_coherent_map);
+ cpu_clear(processor, cpu_cache_coherent_map);
}
break;
case PAL_PERF_MON_INFO:
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
index 543ffa1e4d..f40b58c9f2 100644
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -1048,9 +1048,13 @@ efi_mmio(unsigned long physaddr, unsigned long size)
}
unsigned long
-assign_domain_mmio_page(struct domain *d,
- unsigned long mpaddr, unsigned long size)
+assign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
+ unsigned long phys_addr, unsigned long size,
+ unsigned long flags)
{
+ unsigned long addr = mpaddr & PAGE_MASK;
+ unsigned long end = PAGE_ALIGN(mpaddr + size);
+
if (size == 0) {
gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
__func__, d, mpaddr, size);
@@ -1062,7 +1066,12 @@ assign_domain_mmio_page(struct domain *d,
#endif
return -EINVAL;
}
- assign_domain_same_page(d, mpaddr, size, ASSIGN_writable | ASSIGN_nocache);
+
+ for (phys_addr &= PAGE_MASK; addr < end;
+ addr += PAGE_SIZE, phys_addr += PAGE_SIZE) {
+ __assign_domain_page(d, addr, phys_addr, flags);
+ }
+
return mpaddr;
}
@@ -1894,9 +1903,20 @@ void __free_pages(struct page_info *page, unsigned int order)
free_xenheap_page(page);
}
+static int opt_p2m_xenheap;
+boolean_param("p2m_xenheap", opt_p2m_xenheap);
+
void *pgtable_quicklist_alloc(void)
{
void *p;
+ if (!opt_p2m_xenheap) {
+ struct page_info *page = alloc_domheap_page(NULL);
+ if (page == NULL)
+ return NULL;
+ p = page_to_virt(page);
+ clear_page(p);
+ return p;
+ }
p = alloc_xenheap_pages(0);
if (p)
clear_page(p);
@@ -1905,12 +1925,15 @@ void *pgtable_quicklist_alloc(void)
void pgtable_quicklist_free(void *pgtable_entry)
{
- free_xenheap_page(pgtable_entry);
+ if (!opt_p2m_xenheap)
+ free_domheap_page(virt_to_page(pgtable_entry));
+ else
+ free_xenheap_page(pgtable_entry);
}
void put_page_type(struct page_info *page)
{
- u32 nx, x, y = page->u.inuse.type_info;
+ u64 nx, x, y = page->u.inuse.type_info;
again:
do {
@@ -1958,7 +1981,7 @@ void put_page_type(struct page_info *page)
int get_page_type(struct page_info *page, u32 type)
{
- u32 nx, x, y = page->u.inuse.type_info;
+ u64 nx, x, y = page->u.inuse.type_info;
ASSERT(!(type & ~PGT_type_mask));
@@ -2004,7 +2027,7 @@ int get_page_type(struct page_info *page, u32 type)
{
if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
(type != PGT_l1_page_table) )
- MEM_LOG("Bad type (saw %08x != exp %08x) "
+ MEM_LOG("Bad type (saw %08lx != exp %08x) "
"for mfn %016lx (pfn %016lx)",
x, type, page_to_mfn(page),
get_gpfn_from_mfn(page_to_mfn(page)));
@@ -2081,18 +2104,33 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
break;
case XENMAPSPACE_grant_table:
spin_lock(&d->grant_table->lock);
+
+ if ((xatp.idx >= nr_grant_frames(d->grant_table)) &&
+ (xatp.idx < max_nr_grant_frames))
+ gnttab_grow_table(d, xatp.idx + 1);
+
if (xatp.idx < nr_grant_frames(d->grant_table))
- mfn = virt_to_mfn(d->grant_table->shared) + xatp.idx;
+ mfn = virt_to_mfn(d->grant_table->shared[xatp.idx]);
+
spin_unlock(&d->grant_table->lock);
break;
default:
break;
}
+ if (mfn == 0) {
+ put_domain(d);
+ return -EINVAL;
+ }
+
LOCK_BIGLOCK(d);
- /* Remove previously mapped page if it was present. */
+ /* Check remapping necessity */
prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+ if (mfn == prev_mfn)
+ goto out;
+
+ /* Remove previously mapped page if it was present. */
if (prev_mfn && mfn_valid(prev_mfn)) {
if (IS_XEN_HEAP_FRAME(mfn_to_page(prev_mfn)))
/* Xen heap frames are simply unhooked from this phys slot. */
@@ -2104,12 +2142,31 @@ arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
/* Unmap from old location, if any. */
gpfn = get_gpfn_from_mfn(mfn);
- if (gpfn != INVALID_M2P_ENTRY)
+ if (gpfn != INVALID_M2P_ENTRY) {
+ /*
+ * guest_physmap_remove_page() (for IPF) descrements page
+ * counter and unset PGC_allocated flag,
+ * so pre-increment page counter and post-set flag inserted
+ */
+ /* pre-increment page counter */
+ if (!get_page(mfn_to_page(mfn), d))
+ goto out;
+
guest_physmap_remove_page(d, gpfn, mfn);
+ /* post-set PGC_allocated flag */
+ if ((mfn_to_page(mfn)->count_info & PGC_count_mask) != 1) {
+ /* no one but us is using this page */
+ put_page(mfn_to_page(mfn));
+ goto out;
+ }
+ set_bit(_PGC_allocated, &mfn_to_page(mfn)->count_info);
+ }
+
/* Map at new location. */
guest_physmap_add_page(d, xatp.gpfn, mfn);
+ out:
UNLOCK_BIGLOCK(d);
put_domain(d);
diff --git a/xen/arch/ia64/xen/vcpu.c b/xen/arch/ia64/xen/vcpu.c
index b5bf169914..5a5b63f363 100644
--- a/xen/arch/ia64/xen/vcpu.c
+++ b/xen/arch/ia64/xen/vcpu.c
@@ -383,12 +383,10 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
{
- struct ia64_psr psr, newpsr, *ipsr;
+ struct ia64_psr newpsr, *ipsr;
REGS *regs = vcpu_regs(vcpu);
u64 enabling_interrupts = 0;
- // TODO: All of these bits need to be virtualized
- __asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
newpsr = *(struct ia64_psr *)&val;
ipsr = (struct ia64_psr *)&regs->cr_ipsr;
// just handle psr.up and psr.pp for now
@@ -406,21 +404,15 @@ IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
ipsr->dfl = 1;
if (newpsr.pp) {
ipsr->pp = 1;
- psr.pp = 1;
PSCB(vcpu, vpsr_pp) = 1;
} else {
ipsr->pp = 1;
- psr.pp = 1;
PSCB(vcpu, vpsr_pp) = 0;
}
- if (newpsr.up) {
+ if (newpsr.up)
ipsr->up = 1;
- psr.up = 1;
- }
- if (newpsr.sp) {
+ if (newpsr.sp)
ipsr->sp = 1;
- psr.sp = 1;
- }
if (newpsr.i) {
if (vcpu->vcpu_info->evtchn_upcall_mask)
enabling_interrupts = 1;
@@ -428,22 +420,14 @@ IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val)
}
if (newpsr.ic)
PSCB(vcpu, interrupt_collection_enabled) = 1;
- if (newpsr.mfl) {
+ if (newpsr.mfl)
ipsr->mfl = 1;
- psr.mfl = 1;
- }
- if (newpsr.mfh) {
+ if (newpsr.mfh)
ipsr->mfh = 1;
- psr.mfh = 1;
- }
- if (newpsr.ac) {
+ if (newpsr.ac)
ipsr->ac = 1;
- psr.ac = 1;
- }
- if (newpsr.up) {
+ if (newpsr.up)
ipsr->up = 1;
- psr.up = 1;
- }
if (newpsr.dt && newpsr.rt)
vcpu_set_metaphysical_mode(vcpu, FALSE);
else
diff --git a/xen/include/asm-ia64/hypercall.h b/xen/include/asm-ia64/hypercall.h
index ed0af4678d..0af7ebcee5 100644
--- a/xen/include/asm-ia64/hypercall.h
+++ b/xen/include/asm-ia64/hypercall.h
@@ -22,6 +22,7 @@ vmx_do_mmu_update(
u64 *pdone,
u64 foreigndom);
-#define arch_do_vcpu_op(cmd, vcpu, arg) (-ENOSYS)
+extern long
+arch_do_vcpu_op(int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg);
#endif /* __ASM_IA64_HYPERCALL_H__ */
diff --git a/xen/include/asm-ia64/linux/asm/sn/README.origin b/xen/include/asm-ia64/linux/asm/sn/README.origin
index 5d8d7cb671..7a4f1c645d 100644
--- a/xen/include/asm-ia64/linux/asm/sn/README.origin
+++ b/xen/include/asm-ia64/linux/asm/sn/README.origin
@@ -18,7 +18,8 @@ shubio.h -> linux/include/asm-ia64/sn/shubio.h
simulator.h -> linux/include/asm-ia64/sn/simulator.h
sn_cpuid.h -> linux/include/asm-ia64/sn/sn_cpuid.h
sn_feature_sets.h -> linux/include/asm-ia64/sn/sn_feature_sets.h
-sn_sal.h -> linux/include/asm-ia64/sn/sn_sal.h
tiocp.h -> linux/include/asm-ia64/sn/tiocp.h
xbow.h -> linux/arch/ia64/sn/include/xtalk/xbow.h
xwidgetdev.h -> linux/arch/ia64/sn/include/xtalk/xwidgetdev.h
+# from 2.6.20
+sn_sal.h -> linux/include/asm-ia64/sn/sn_sal.h
diff --git a/xen/include/asm-ia64/linux/asm/sn/sn_sal.h b/xen/include/asm-ia64/linux/asm/sn/sn_sal.h
index ba826b3f75..2c4004eb5a 100644
--- a/xen/include/asm-ia64/linux/asm/sn/sn_sal.h
+++ b/xen/include/asm-ia64/linux/asm/sn/sn_sal.h
@@ -77,6 +77,7 @@
#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 // deprecated
#define SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST 0x0200005a
+#define SN_SAL_IOIF_INIT 0x0200005f
#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
#define SN_SAL_BTE_RECOVER 0x02000061
#define SN_SAL_RESERVED_DO_NOT_USE 0x02000062
@@ -87,6 +88,8 @@
#define SN_SAL_INJECT_ERROR 0x02000067
#define SN_SAL_SET_CPU_NUMBER 0x02000068
+#define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
+
/*
* Service-specific constants
*/
@@ -1154,4 +1157,11 @@ ia64_sn_set_cpu_number(int cpu)
SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
return rv.status;
}
+static inline int
+ia64_sn_kernel_launch_event(void)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
+ return rv.status;
+}
#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h
index 057f841c16..e992f148ab 100644
--- a/xen/include/asm-ia64/mm.h
+++ b/xen/include/asm-ia64/mm.h
@@ -87,33 +87,33 @@ struct page_info
* IA-64 should make it a definition same as x86_64.
*/
/* The following page types are MUTUALLY EXCLUSIVE. */
-#define PGT_none (0<<29) /* no special uses of this page */
-#define PGT_l1_page_table (1<<29) /* using this page as an L1 page table? */
-#define PGT_l2_page_table (2<<29) /* using this page as an L2 page table? */
-#define PGT_l3_page_table (3<<29) /* using this page as an L3 page table? */
-#define PGT_l4_page_table (4<<29) /* using this page as an L4 page table? */
+#define PGT_none (0UL<<29) /* no special uses of this page */
+#define PGT_l1_page_table (1UL<<29) /* using this page as an L1 page table? */
+#define PGT_l2_page_table (2UL<<29) /* using this page as an L2 page table? */
+#define PGT_l3_page_table (3UL<<29) /* using this page as an L3 page table? */
+#define PGT_l4_page_table (4UL<<29) /* using this page as an L4 page table? */
/* Value 5 reserved. See asm-x86/mm.h */
/* Value 6 reserved. See asm-x86/mm.h */
-#define PGT_writable_page (7<<29) /* has writable mappings of this page? */
-#define PGT_type_mask (7<<29) /* Bits 29-31. */
+#define PGT_writable_page (7UL<<29) /* has writable mappings of this page? */
+#define PGT_type_mask (7UL<<29) /* Bits 29-31. */
/* Has this page been validated for use as its current type? */
#define _PGT_validated 28
-#define PGT_validated (1<<_PGT_validated)
+#define PGT_validated (1UL<<_PGT_validated)
/* Owning guest has pinned this page to its current type? */
#define _PGT_pinned 27
-#define PGT_pinned (1U<<_PGT_pinned)
+#define PGT_pinned (1UL<<_PGT_pinned)
/* 16-bit count of uses of this frame as its current type. */
-#define PGT_count_mask ((1U<<16)-1)
+#define PGT_count_mask ((1UL<<16)-1)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated 31
-#define PGC_allocated (1U<<_PGC_allocated)
+#define PGC_allocated (1UL<<_PGC_allocated)
/* Bit 30 reserved. See asm-x86/mm.h */
/* Bit 29 reserved. See asm-x86/mm.h */
/* 29-bit count of references to this frame. */
-#define PGC_count_mask ((1U<<29)-1)
+#define PGC_count_mask ((1UL<<29)-1)
#define IS_XEN_HEAP_FRAME(_pfn) ((page_to_maddr(_pfn) < xenheap_phys_end) \
&& (page_to_maddr(_pfn) >= xen_pstart))
@@ -433,7 +433,7 @@ struct p2m_entry;
extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr, struct p2m_entry* entry);
extern void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr);
extern volatile pte_t *lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr);
-extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long size);
+extern unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long phys_addr, unsigned long size, unsigned long flags);
extern unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size, unsigned long flags);
int domain_page_mapped(struct domain *d, unsigned long mpaddr);
int efi_mmio(unsigned long physaddr, unsigned long size);
diff --git a/xen/include/asm-ia64/vmmu.h b/xen/include/asm-ia64/vmmu.h
index 905d122f68..84c23c2dfd 100644
--- a/xen/include/asm-ia64/vmmu.h
+++ b/xen/include/asm-ia64/vmmu.h
@@ -291,6 +291,7 @@ extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, in
#define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3)
#define PAGE_FLAGS_RV_MASK (0x2 | (0x3UL<<50)|(((1UL<<11)-1)<<53))
+#define PAGE_FLAGS_AR_PL_MASK ((0x7UL<<9)|(0x3UL<<7))
extern u64 machine_ttag(PTA pta, u64 va);
extern u64 machine_thash(PTA pta, u64 va);
extern void purge_machine_tc_by_domid(domid_t domid);
@@ -309,6 +310,7 @@ extern u64 translate_phy_pte(struct vcpu *v, u64 *pte, u64 itir, u64 va);
extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
int type);
extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
+extern int vhpt_access_rights_fixup(struct vcpu *v, u64 ifa, int is_data);
static inline void vmx_vcpu_set_tr (thash_data_t *trp, u64 pte, u64 itir, u64 va, u64 rid)
{