aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Limpach <Christian.Limpach@xensource.com>2007-01-19 14:48:57 +0000
committerChristian Limpach <Christian.Limpach@xensource.com>2007-01-19 14:48:57 +0000
commitc6a0a466fab92f08e73fe2bdaed40ef73f524c9b (patch)
tree4946ac5fcbb06016b2c3ab8a73cb22d73e7c00e6
parent361f8004cbabe9cdb5386cf2c838b5c732feb53a (diff)
parentbeb6098f214a6c227f0dce614f48cd0fc391da2d (diff)
downloadxen-c6a0a466fab92f08e73fe2bdaed40ef73f524c9b.tar.gz
xen-c6a0a466fab92f08e73fe2bdaed40ef73f524c9b.tar.bz2
xen-c6a0a466fab92f08e73fe2bdaed40ef73f524c9b.zip
[merge] with xen-unstable
Signed-off-by: Christian Limpach <Christian.Limpach@xensource.com>
-rw-r--r--Config.mk5
-rw-r--r--buildconfigs/linux-defconfig_xen0_ia643
-rw-r--r--buildconfigs/linux-defconfig_xen0_x86_326
-rw-r--r--buildconfigs/linux-defconfig_xen0_x86_646
-rw-r--r--buildconfigs/linux-defconfig_xenU_ia643
-rw-r--r--buildconfigs/linux-defconfig_xenU_x86_322
-rw-r--r--buildconfigs/linux-defconfig_xenU_x86_642
-rw-r--r--buildconfigs/linux-defconfig_xen_ia643
-rw-r--r--buildconfigs/linux-defconfig_xen_x86_322
-rw-r--r--buildconfigs/linux-defconfig_xen_x86_642
-rw-r--r--buildconfigs/mk.linux-2.6-xen4
-rw-r--r--extras/mini-os/Makefile2
-rw-r--r--extras/mini-os/include/x86/x86_32/hypercall-x86_32.h14
-rw-r--r--extras/mini-os/include/x86/x86_64/hypercall-x86_64.h14
-rw-r--r--linux-2.6-xen-sparse/arch/i386/kernel/sysenter.c4
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/Kconfig6
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c1
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/kernel/fsys.S925
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/kernel/gate.S24
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S8
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/kernel/patch.c4
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/kernel/setup.c10
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S267
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c88
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S45
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S48
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/xenminstate.h11
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S23
-rw-r--r--linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S7
-rw-r--r--linux-2.6-xen-sparse/drivers/xen/Kconfig8
-rw-r--r--linux-2.6-xen-sparse/include/asm-ia64/hypercall.h108
-rw-r--r--linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h3
-rw-r--r--linux-2.6-xen-sparse/include/asm-ia64/maddr.h1
-rw-r--r--linux-2.6-xen-sparse/include/asm-ia64/page.h1
-rw-r--r--linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h18
-rw-r--r--linux-2.6-xen-sparse/include/linux/skbuff.h3
-rw-r--r--linux-2.6-xen-sparse/net/core/skbuff.c37
-rw-r--r--tools/ioemu/hw/cirrus_vga.c52
-rw-r--r--tools/ioemu/hw/ide.c3
-rw-r--r--tools/ioemu/hw/pci.c3
-rw-r--r--tools/ioemu/target-i386-dm/helper2.c15
-rw-r--r--tools/ioemu/vl.c38
-rw-r--r--tools/libxc/Makefile2
-rw-r--r--tools/libxc/ia64/xc_ia64_hvm_build.c44
-rw-r--r--tools/libxc/xc_domain.c44
-rw-r--r--tools/libxc/xc_hvm_restore.c360
-rw-r--r--tools/libxc/xc_hvm_save.c727
-rw-r--r--tools/libxc/xenctrl.h24
-rw-r--r--tools/libxc/xenguest.h20
-rw-r--r--tools/pygrub/src/pygrub6
-rw-r--r--tools/python/setup.py13
-rw-r--r--tools/python/xen/lowlevel/scf/scf.c156
-rw-r--r--tools/python/xen/lowlevel/xc/xc.c22
-rw-r--r--tools/python/xen/util/xmlrpclib2.py1
-rw-r--r--tools/python/xen/web/httpserver.py10
-rw-r--r--tools/python/xen/xend/Vifctl.py4
-rw-r--r--tools/python/xen/xend/XendCheckpoint.py77
-rw-r--r--tools/python/xen/xend/XendConfig.py45
-rw-r--r--tools/python/xen/xend/XendDomain.py12
-rw-r--r--tools/python/xen/xend/XendDomainInfo.py64
-rw-r--r--tools/python/xen/xend/XendNode.py4
-rw-r--r--tools/python/xen/xend/XendOptions.py (renamed from tools/python/xen/xend/XendRoot.py)243
-rw-r--r--tools/python/xen/xend/XendProtocol.py6
-rw-r--r--tools/python/xen/xend/balloon.py6
-rw-r--r--tools/python/xen/xend/image.py28
-rw-r--r--tools/python/xen/xend/osdep.py10
-rw-r--r--tools/python/xen/xend/server/DevController.py8
-rw-r--r--tools/python/xen/xend/server/SrvRoot.py2
-rw-r--r--tools/python/xen/xend/server/SrvServer.py58
-rw-r--r--tools/python/xen/xend/server/XMLRPCServer.py15
-rw-r--r--tools/python/xen/xend/server/netif.py13
-rw-r--r--tools/python/xen/xend/server/relocate.py14
-rw-r--r--tools/python/xen/xend/server/tests/test_controllers.py10
-rw-r--r--tools/python/xen/xend/server/tpmif.py8
-rw-r--r--tools/python/xen/xend/server/vfbif.py4
-rw-r--r--tools/python/xen/xm/create.py6
-rw-r--r--tools/python/xen/xm/main.py13
-rw-r--r--tools/python/xen/xm/opts.py8
-rw-r--r--tools/python/xen/xm/tests/test_create.py4
-rw-r--r--tools/xcutils/xc_restore.c19
-rw-r--r--tools/xcutils/xc_save.c5
-rw-r--r--unmodified_drivers/linux-2.6/platform-pci/platform-pci.c9
-rw-r--r--xen/arch/ia64/asm-offsets.c2
-rwxr-xr-xxen/arch/ia64/asm-xsi-offsets.c2
-rw-r--r--xen/arch/ia64/linux-xen/Makefile3
-rw-r--r--xen/arch/ia64/linux-xen/README.origin1
-rw-r--r--xen/arch/ia64/linux-xen/cmdline.c (renamed from xen/arch/ia64/linux/cmdline.c)10
-rw-r--r--xen/arch/ia64/linux-xen/entry.S120
-rw-r--r--xen/arch/ia64/linux-xen/sn/Makefile1
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/Makefile5
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/README.origin12
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/io_init.c783
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/iomv.c82
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/irq.c542
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/setup.c808
-rw-r--r--xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c548
-rw-r--r--xen/arch/ia64/linux/Makefile6
-rw-r--r--xen/arch/ia64/linux/README.origin4
-rw-r--r--xen/arch/ia64/linux/dig/Makefile1
-rw-r--r--xen/arch/ia64/linux/dig/README.origin7
-rw-r--r--xen/arch/ia64/linux/dig/machvec.c3
-rw-r--r--xen/arch/ia64/linux/hp/Makefile1
-rw-r--r--xen/arch/ia64/linux/hp/zx1/Makefile1
-rw-r--r--xen/arch/ia64/linux/hp/zx1/README.origin7
-rw-r--r--xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c3
-rw-r--r--xen/arch/ia64/linux/io.c164
-rw-r--r--xen/arch/ia64/linux/sn/Makefile2
-rw-r--r--xen/arch/ia64/linux/sn/kernel/Makefile3
-rw-r--r--xen/arch/ia64/linux/sn/kernel/README.origin9
-rw-r--r--xen/arch/ia64/linux/sn/kernel/machvec.c11
-rw-r--r--xen/arch/ia64/linux/sn/kernel/pio_phys.S71
-rw-r--r--xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S92
-rw-r--r--xen/arch/ia64/linux/sn/pci/Makefile1
-rw-r--r--xen/arch/ia64/linux/sn/pci/pcibr/Makefile1
-rw-r--r--xen/arch/ia64/linux/sn/pci/pcibr/README.origin7
-rw-r--r--xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c285
-rw-r--r--xen/arch/ia64/vmx/mmio.c8
-rw-r--r--xen/arch/ia64/vmx/vmx_process.c23
-rw-r--r--xen/arch/ia64/vmx/vmx_vcpu.c45
-rw-r--r--xen/arch/ia64/vmx/vmx_virt.c2
-rw-r--r--xen/arch/ia64/xen/dom0_ops.c11
-rw-r--r--xen/arch/ia64/xen/dom_fw.c101
-rw-r--r--xen/arch/ia64/xen/domain.c149
-rw-r--r--xen/arch/ia64/xen/faults.c33
-rw-r--r--xen/arch/ia64/xen/hypercall.c170
-rw-r--r--xen/arch/ia64/xen/hyperprivop.S153
-rw-r--r--xen/arch/ia64/xen/irq.c7
-rw-r--r--xen/arch/ia64/xen/ivt.S416
-rw-r--r--xen/arch/ia64/xen/mm.c19
-rw-r--r--xen/arch/ia64/xen/tlb_track.c25
-rw-r--r--xen/arch/ia64/xen/vcpu.c75
-rw-r--r--xen/arch/ia64/xen/vhpt.c13
-rw-r--r--xen/arch/ia64/xen/xenasm.S22
-rw-r--r--xen/arch/ia64/xen/xencomm.c4
-rw-r--r--xen/arch/ia64/xen/xensetup.c6
-rw-r--r--xen/arch/x86/hvm/hpet.c24
-rw-r--r--xen/arch/x86/hvm/hvm.c11
-rw-r--r--xen/arch/x86/hvm/i8254.c153
-rw-r--r--xen/arch/x86/hvm/intercept.c303
-rw-r--r--xen/arch/x86/hvm/rtc.c2
-rw-r--r--xen/arch/x86/hvm/vioapic.c132
-rw-r--r--xen/arch/x86/hvm/vlapic.c74
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c296
-rw-r--r--xen/arch/x86/hvm/vpic.c83
-rw-r--r--xen/arch/x86/hvm/vpt.c10
-rw-r--r--xen/arch/x86/mm/shadow/common.c7
-rw-r--r--xen/arch/x86/mm/shadow/multi.c2
-rw-r--r--xen/common/domain.c1
-rw-r--r--xen/common/domctl.c73
-rw-r--r--xen/include/asm-ia64/config.h29
-rw-r--r--xen/include/asm-ia64/domain.h4
-rw-r--r--xen/include/asm-ia64/hypercall.h2
-rw-r--r--xen/include/asm-ia64/linux-null/asm/nmi.h (renamed from xen/include/asm-ia64/linux-null/linux/ioport.h)0
-rw-r--r--xen/include/asm-ia64/linux-null/linux/dmapool.h1
-rw-r--r--xen/include/asm-ia64/linux-null/linux/rwsem.h1
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/README.origin7
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/machvec.h (renamed from xen/include/asm-ia64/linux/asm/machvec.h)108
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/machvec_dig.h46
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h66
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h166
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/page.h10
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/pci.h (renamed from xen/include/asm-ia64/linux/asm/pci.h)24
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/README.origin16
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/addrs.h299
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/arch.h92
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h95
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/intr.h73
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/io.h281
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h87
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h153
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h32
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/sn/types.h28
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/system.h1
-rw-r--r--xen/include/asm-ia64/linux-xen/asm/types.h8
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/README.origin5
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/device.h489
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/kobject.h286
-rw-r--r--xen/include/asm-ia64/linux-xen/linux/pci.h820
-rw-r--r--xen/include/asm-ia64/linux/README.origin12
-rw-r--r--xen/include/asm-ia64/linux/asm/README.origin4
-rw-r--r--xen/include/asm-ia64/linux/asm/machvec_init.h32
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/README.origin24
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/geo.h132
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/klconfig.h246
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/l1.h51
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/leds.h33
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/module.h127
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h68
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/pcidev.h83
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/pda.h69
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/pic.h261
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/shub_mmr.h502
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/shubio.h3358
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/simulator.h20
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h132
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h51
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/sn_sal.h (renamed from xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h)377
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/tiocp.h257
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/xbow.h301
-rw-r--r--xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h70
-rw-r--r--xen/include/asm-ia64/linux/completion.h57
-rw-r--r--xen/include/asm-ia64/linux/ioport.h136
-rw-r--r--xen/include/asm-ia64/linux/klist.h61
-rw-r--r--xen/include/asm-ia64/linux/kref.h32
-rw-r--r--xen/include/asm-ia64/linux/mod_devicetable.h323
-rw-r--r--xen/include/asm-ia64/linux/pci_ids.h2356
-rw-r--r--xen/include/asm-ia64/linux/pci_regs.h488
-rw-r--r--xen/include/asm-ia64/linux/pm.h279
-rw-r--r--xen/include/asm-ia64/linux/sysfs.h206
-rw-r--r--xen/include/asm-ia64/multicall.h12
-rw-r--r--xen/include/asm-ia64/tlbflush.h2
-rw-r--r--xen/include/asm-ia64/vcpu.h1
-rw-r--r--xen/include/asm-ia64/vmx_vcpu.h2
-rw-r--r--xen/include/asm-ia64/xensystem.h1
-rw-r--r--xen/include/asm-ia64/xentypes.h19
-rw-r--r--xen/include/asm-x86/hvm/domain.h17
-rw-r--r--xen/include/asm-x86/hvm/hvm.h38
-rw-r--r--xen/include/asm-x86/hvm/support.h127
-rw-r--r--xen/include/asm-x86/hvm/vpt.h2
-rw-r--r--xen/include/public/arch-ia64.h64
-rw-r--r--xen/include/public/arch-x86/xen.h64
-rw-r--r--xen/include/public/domctl.h16
-rw-r--r--xen/include/xlat.lst2
223 files changed, 22145 insertions, 1603 deletions
diff --git a/Config.mk b/Config.mk
index 6a7ed679ac..7474ce19c5 100644
--- a/Config.mk
+++ b/Config.mk
@@ -6,9 +6,12 @@ debug ?= n
XEN_COMPILE_ARCH ?= $(shell uname -m | sed -e s/i.86/x86_32/ \
-e s/ppc/powerpc/ -e s/i86pc/x86_32/)
XEN_TARGET_ARCH ?= $(XEN_COMPILE_ARCH)
-XEN_TARGET_X86_PAE ?= n
XEN_OS ?= $(shell uname -s)
+ifeq ($(XEN_TARGET_ARCH),x86_32)
+XEN_TARGET_X86_PAE ?= y
+endif
+
CONFIG_$(XEN_OS) := y
SHELL ?= /bin/sh
diff --git a/buildconfigs/linux-defconfig_xen0_ia64 b/buildconfigs/linux-defconfig_xen0_ia64
index 612bcfd42d..dc97753420 100644
--- a/buildconfigs/linux-defconfig_xen0_ia64
+++ b/buildconfigs/linux-defconfig_xen0_ia64
@@ -1512,10 +1512,7 @@ CONFIG_CRYPTO_DES=y
# Hardware crypto devices
#
# CONFIG_XEN_UTIL is not set
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_XEN_BALLOON=y
-CONFIG_XEN_SKBUFF=y
# CONFIG_XEN_DEVMEM is not set
CONFIG_XEN_REBOOT=y
# CONFIG_XEN_SMPBOOT is not set
diff --git a/buildconfigs/linux-defconfig_xen0_x86_32 b/buildconfigs/linux-defconfig_xen0_x86_32
index 76b2840685..27fc2f666a 100644
--- a/buildconfigs/linux-defconfig_xen0_x86_32
+++ b/buildconfigs/linux-defconfig_xen0_x86_32
@@ -569,7 +569,7 @@ CONFIG_MEGARAID_NEWGEN=y
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
CONFIG_SCSI_SATA=y
-# CONFIG_SCSI_SATA_AHCI is not set
+CONFIG_SCSI_SATA_AHCI=y
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
@@ -734,7 +734,7 @@ CONFIG_E1000=y
CONFIG_SK98LIN=y
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=y
-# CONFIG_BNX2 is not set
+CONFIG_BNX2=y
#
# Ethernet (10000 Mbit)
@@ -1413,8 +1413,6 @@ CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT_030002=y
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_NO_IDLE_HZ=y
CONFIG_XEN_UTIL=y
diff --git a/buildconfigs/linux-defconfig_xen0_x86_64 b/buildconfigs/linux-defconfig_xen0_x86_64
index 50191f868c..d93d10d1cb 100644
--- a/buildconfigs/linux-defconfig_xen0_x86_64
+++ b/buildconfigs/linux-defconfig_xen0_x86_64
@@ -517,7 +517,7 @@ CONFIG_MEGARAID_NEWGEN=y
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
CONFIG_SCSI_SATA=y
-# CONFIG_SCSI_SATA_AHCI is not set
+CONFIG_SCSI_SATA_AHCI=y
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
@@ -683,7 +683,7 @@ CONFIG_E1000=y
CONFIG_SK98LIN=y
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=y
-# CONFIG_BNX2 is not set
+CONFIG_BNX2=y
#
# Ethernet (10000 Mbit)
@@ -1363,8 +1363,6 @@ CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT_030002=y
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_NO_IDLE_HZ=y
CONFIG_XEN_UTIL=y
diff --git a/buildconfigs/linux-defconfig_xenU_ia64 b/buildconfigs/linux-defconfig_xenU_ia64
index a7e15190ee..537fd311a9 100644
--- a/buildconfigs/linux-defconfig_xenU_ia64
+++ b/buildconfigs/linux-defconfig_xenU_ia64
@@ -1386,10 +1386,7 @@ CONFIG_CRYPTO_DES=y
# Hardware crypto devices
#
# CONFIG_XEN_UTIL is not set
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_XEN_BALLOON=y
-CONFIG_XEN_SKBUFF=y
# CONFIG_XEN_DEVMEM is not set
CONFIG_XEN_REBOOT=y
# CONFIG_XEN_SMPBOOT is not set
diff --git a/buildconfigs/linux-defconfig_xenU_x86_32 b/buildconfigs/linux-defconfig_xenU_x86_32
index 67650c73d5..590d23841c 100644
--- a/buildconfigs/linux-defconfig_xenU_x86_32
+++ b/buildconfigs/linux-defconfig_xenU_x86_32
@@ -922,8 +922,6 @@ CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT_030002=y
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_NO_IDLE_HZ=y
CONFIG_XEN_UTIL=y
diff --git a/buildconfigs/linux-defconfig_xenU_x86_64 b/buildconfigs/linux-defconfig_xenU_x86_64
index 1cb9be1236..1d7a46dba7 100644
--- a/buildconfigs/linux-defconfig_xenU_x86_64
+++ b/buildconfigs/linux-defconfig_xenU_x86_64
@@ -1218,8 +1218,6 @@ CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT_030002=y
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_NO_IDLE_HZ=y
CONFIG_XEN_UTIL=y
diff --git a/buildconfigs/linux-defconfig_xen_ia64 b/buildconfigs/linux-defconfig_xen_ia64
index e04b1db69f..2e41eb5892 100644
--- a/buildconfigs/linux-defconfig_xen_ia64
+++ b/buildconfigs/linux-defconfig_xen_ia64
@@ -1518,10 +1518,7 @@ CONFIG_CRYPTO_DES=y
# Hardware crypto devices
#
# CONFIG_XEN_UTIL is not set
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_XEN_BALLOON=y
-CONFIG_XEN_SKBUFF=y
# CONFIG_XEN_DEVMEM is not set
CONFIG_XEN_REBOOT=y
# CONFIG_XEN_SMPBOOT is not set
diff --git a/buildconfigs/linux-defconfig_xen_x86_32 b/buildconfigs/linux-defconfig_xen_x86_32
index 4928f8d32e..fedfa15cce 100644
--- a/buildconfigs/linux-defconfig_xen_x86_32
+++ b/buildconfigs/linux-defconfig_xen_x86_32
@@ -3272,8 +3272,6 @@ CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT_030002=y
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_NO_IDLE_HZ=y
CONFIG_XEN_UTIL=y
diff --git a/buildconfigs/linux-defconfig_xen_x86_64 b/buildconfigs/linux-defconfig_xen_x86_64
index 7672bd1fc6..a7269fbf88 100644
--- a/buildconfigs/linux-defconfig_xen_x86_64
+++ b/buildconfigs/linux-defconfig_xen_x86_64
@@ -3103,8 +3103,6 @@ CONFIG_XEN_SYSFS=y
CONFIG_XEN_COMPAT_030002_AND_LATER=y
# CONFIG_XEN_COMPAT_LATEST_ONLY is not set
CONFIG_XEN_COMPAT_030002=y
-CONFIG_HAVE_ARCH_ALLOC_SKB=y
-CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_HAVE_IRQ_IGNORE_UNHANDLED=y
CONFIG_NO_IDLE_HZ=y
CONFIG_XEN_UTIL=y
diff --git a/buildconfigs/mk.linux-2.6-xen b/buildconfigs/mk.linux-2.6-xen
index 729381d7d0..bdc5e738ad 100644
--- a/buildconfigs/mk.linux-2.6-xen
+++ b/buildconfigs/mk.linux-2.6-xen
@@ -9,6 +9,10 @@ LINUX_DIR = build-linux-$(LINUX_VER)-$(EXTRAVERSION)_$(XEN_TARGET_ARCH)
IMAGE_TARGET ?= vmlinuz
INSTALL_BOOT_PATH ?= $(DESTDIR)
+ifeq ($(XEN_TARGET_ARCH),ia64)
+INSTALL_BOOT_PATH := $(DESTDIR)/boot
+endif
+
LINUX_VER3 := $(LINUX_SERIES).$(word 3, $(subst ., ,$(LINUX_VER)))
include buildconfigs/Rules.mk
diff --git a/extras/mini-os/Makefile b/extras/mini-os/Makefile
index ad4ff8d910..df313e48d5 100644
--- a/extras/mini-os/Makefile
+++ b/extras/mini-os/Makefile
@@ -9,7 +9,7 @@ pae ?= n
XEN_ROOT = ../..
include $(XEN_ROOT)/Config.mk
-XEN_INTERFACE_VERSION := 0x00030204
+XEN_INTERFACE_VERSION := 0x00030205
export XEN_INTERFACE_VERSION
# Set TARGET_ARCH
diff --git a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h
index 9c41c946b2..fbaa7ba16e 100644
--- a/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h
+++ b/extras/mini-os/include/x86/x86_32/hypercall-x86_32.h
@@ -305,6 +305,20 @@ HYPERVISOR_nmi_op(
return _hypercall2(int, nmi_op, op, arg);
}
+static inline int
+HYPERVISOR_sysctl(
+ unsigned long op)
+{
+ return _hypercall1(int, sysctl, op);
+}
+
+static inline int
+HYPERVISOR_domctl(
+ unsigned long op)
+{
+ return _hypercall1(int, domctl, op);
+}
+
#endif /* __HYPERCALL_X86_32_H__ */
/*
diff --git a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h
index 3a4aa049e8..179b776ed5 100644
--- a/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h
+++ b/extras/mini-os/include/x86/x86_64/hypercall-x86_64.h
@@ -305,6 +305,20 @@ HYPERVISOR_nmi_op(
return _hypercall2(int, nmi_op, op, arg);
}
+static inline int
+HYPERVISOR_sysctl(
+ unsigned long op)
+{
+ return _hypercall1(int, sysctl, op);
+}
+
+static inline int
+HYPERVISOR_domctl(
+ unsigned long op)
+{
+ return _hypercall1(int, domctl, op);
+}
+
#endif /* __HYPERCALL_X86_64_H__ */
/*
diff --git a/linux-2.6-xen-sparse/arch/i386/kernel/sysenter.c b/linux-2.6-xen-sparse/arch/i386/kernel/sysenter.c
index 4dd50f81b8..e72b58ecca 100644
--- a/linux-2.6-xen-sparse/arch/i386/kernel/sysenter.c
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/sysenter.c
@@ -76,7 +76,9 @@ static void *syscall_page;
int __init sysenter_setup(void)
{
- syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ void *page = (void *)get_zeroed_page(GFP_ATOMIC);
+
+ syscall_page = page;
#ifdef CONFIG_XEN
if (boot_cpu_has(X86_FEATURE_SEP)) {
diff --git a/linux-2.6-xen-sparse/arch/ia64/Kconfig b/linux-2.6-xen-sparse/arch/ia64/Kconfig
index 09038d2f45..e4f4a6431a 100644
--- a/linux-2.6-xen-sparse/arch/ia64/Kconfig
+++ b/linux-2.6-xen-sparse/arch/ia64/Kconfig
@@ -579,12 +579,6 @@ if XEN
config XEN_UTIL
default n
-config HAVE_ARCH_ALLOC_SKB
- default y
-
-config HAVE_ARCH_DEV_ALLOC_SKB
- default y
-
config XEN_BALLOON
default y
diff --git a/linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c b/linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c
index 21c97ad745..905d0f7267 100644
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c
@@ -287,7 +287,6 @@ void foo(void)
DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
- DEFINE_MAPPED_REG_OFS(XSI_INCOMPL_REGFR_OFS, incomplete_regframe);
DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
diff --git a/linux-2.6-xen-sparse/arch/ia64/kernel/fsys.S b/linux-2.6-xen-sparse/arch/ia64/kernel/fsys.S
new file mode 100644
index 0000000000..98b5d15855
--- /dev/null
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/fsys.S
@@ -0,0 +1,925 @@
+/*
+ * This file contains the light-weight system call handlers (fsyscall-handlers).
+ *
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 25-Sep-03 davidm Implement fsys_rt_sigprocmask().
+ * 18-Feb-03 louisk Implement fsys_gettimeofday().
+ * 28-Feb-03 davidm Fixed several bugs in fsys_gettimeofday(). Tuned it some more,
+ * probably broke it along the way... ;-)
+ * 13-Jul-04 clameter Implement fsys_clock_gettime and revise fsys_gettimeofday to make
+ * it capable of using memory based clocks without falling back to C code.
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/errno.h>
+#include <asm/asm-offsets.h>
+#include <asm/percpu.h>
+#include <asm/thread_info.h>
+#include <asm/sal.h>
+#include <asm/signal.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+
+#include "entry.h"
+
+/*
+ * See Documentation/ia64/fsys.txt for details on fsyscalls.
+ *
+ * On entry to an fsyscall handler:
+ * r10 = 0 (i.e., defaults to "successful syscall return")
+ * r11 = saved ar.pfs (a user-level value)
+ * r15 = system call number
+ * r16 = "current" task pointer (in normal kernel-mode, this is in r13)
+ * r32-r39 = system call arguments
+ * b6 = return address (a user-level value)
+ * ar.pfs = previous frame-state (a user-level value)
+ * PSR.be = cleared to zero (i.e., little-endian byte order is in effect)
+ * all other registers may contain values passed in from user-mode
+ *
+ * On return from an fsyscall handler:
+ * r11 = saved ar.pfs (as passed into the fsyscall handler)
+ * r15 = system call number (as passed into the fsyscall handler)
+ * r32-r39 = system call arguments (as passed into the fsyscall handler)
+ * b6 = return address (as passed into the fsyscall handler)
+ * ar.pfs = previous frame-state (as passed into the fsyscall handler)
+ */
+
+ENTRY(fsys_ni_syscall)
+ .prologue
+ .altrp b6
+ .body
+ mov r8=ENOSYS
+ mov r10=-1
+ FSYS_RETURN
+END(fsys_ni_syscall)
+
+ENTRY(fsys_getpid)
+ .prologue
+ .altrp b6
+ .body
+ add r9=TI_FLAGS+IA64_TASK_SIZE,r16
+ ;;
+ ld4 r9=[r9]
+ add r8=IA64_TASK_TGID_OFFSET,r16
+ ;;
+ and r9=TIF_ALLWORK_MASK,r9
+ ld4 r8=[r8] // r8 = current->tgid
+ ;;
+ cmp.ne p8,p0=0,r9
+(p8) br.spnt.many fsys_fallback_syscall
+ FSYS_RETURN
+END(fsys_getpid)
+
+ENTRY(fsys_getppid)
+ .prologue
+ .altrp b6
+ .body
+ add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
+ ;;
+ ld8 r17=[r17] // r17 = current->group_leader
+ add r9=TI_FLAGS+IA64_TASK_SIZE,r16
+ ;;
+
+ ld4 r9=[r9]
+ add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = &current->group_leader->real_parent
+ ;;
+ and r9=TIF_ALLWORK_MASK,r9
+
+1: ld8 r18=[r17] // r18 = current->group_leader->real_parent
+ ;;
+ cmp.ne p8,p0=0,r9
+ add r8=IA64_TASK_TGID_OFFSET,r18 // r8 = &current->group_leader->real_parent->tgid
+ ;;
+
+ /*
+ * The .acq is needed to ensure that the read of tgid has returned its data before
+ * we re-check "real_parent".
+ */
+ ld4.acq r8=[r8] // r8 = current->group_leader->real_parent->tgid
+#ifdef CONFIG_SMP
+ /*
+ * Re-read current->group_leader->real_parent.
+ */
+ ld8 r19=[r17] // r19 = current->group_leader->real_parent
+(p8) br.spnt.many fsys_fallback_syscall
+ ;;
+ cmp.ne p6,p0=r18,r19 // did real_parent change?
+ mov r19=0 // i must not leak kernel bits...
+(p6) br.cond.spnt.few 1b // yes -> redo the read of tgid and the check
+ ;;
+ mov r17=0 // i must not leak kernel bits...
+ mov r18=0 // i must not leak kernel bits...
+#else
+ mov r17=0 // i must not leak kernel bits...
+ mov r18=0 // i must not leak kernel bits...
+ mov r19=0 // i must not leak kernel bits...
+#endif
+ FSYS_RETURN
+END(fsys_getppid)
+
+ENTRY(fsys_set_tid_address)
+ .prologue
+ .altrp b6
+ .body
+ add r9=TI_FLAGS+IA64_TASK_SIZE,r16
+ ;;
+ ld4 r9=[r9]
+ tnat.z p6,p7=r32 // check argument register for being NaT
+ ;;
+ and r9=TIF_ALLWORK_MASK,r9
+ add r8=IA64_TASK_PID_OFFSET,r16
+ add r18=IA64_TASK_CLEAR_CHILD_TID_OFFSET,r16
+ ;;
+ ld4 r8=[r8]
+ cmp.ne p8,p0=0,r9
+ mov r17=-1
+ ;;
+(p6) st8 [r18]=r32
+(p7) st8 [r18]=r17
+(p8) br.spnt.many fsys_fallback_syscall
+ ;;
+ mov r17=0 // i must not leak kernel bits...
+ mov r18=0 // i must not leak kernel bits...
+ FSYS_RETURN
+END(fsys_set_tid_address)
+
+/*
+ * Ensure that the time interpolator structure is compatible with the asm code
+ */
+#if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 \
+ || IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4
+#error fsys_gettimeofday incompatible with changes to struct time_interpolator
+#endif
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 1
+#define CLOCK_DIVIDE_BY_1000 0x4000
+#define CLOCK_ADD_MONOTONIC 0x8000
+
+ENTRY(fsys_gettimeofday)
+ .prologue
+ .altrp b6
+ .body
+ mov r31 = r32
+ tnat.nz p6,p0 = r33 // guard against NaT argument
+(p6) br.cond.spnt.few .fail_einval
+ mov r30 = CLOCK_DIVIDE_BY_1000
+ ;;
+.gettime:
+ // Register map
+ // Incoming r31 = pointer to address where to place result
+ // r30 = flags determining how time is processed
+ // r2,r3 = temp r4-r7 preserved
+ // r8 = result nanoseconds
+ // r9 = result seconds
+ // r10 = temporary storage for clock difference
+ // r11 = preserved: saved ar.pfs
+ // r12 = preserved: memory stack
+ // r13 = preserved: thread pointer
+ // r14 = address of mask / mask
+ // r15 = preserved: system call number
+ // r16 = preserved: current task pointer
+ // r17 = wall to monotonic use
+ // r18 = time_interpolator->offset
+ // r19 = address of wall_to_monotonic
+ // r20 = pointer to struct time_interpolator / pointer to time_interpolator->address
+ // r21 = shift factor
+ // r22 = address of time interpolator->last_counter
+ // r23 = address of time_interpolator->last_cycle
+ // r24 = adress of time_interpolator->offset
+ // r25 = last_cycle value
+ // r26 = last_counter value
+ // r27 = pointer to xtime
+ // r28 = sequence number at the beginning of critcal section
+ // r29 = address of seqlock
+ // r30 = time processing flags / memory address
+ // r31 = pointer to result
+ // Predicates
+ // p6,p7 short term use
+ // p8 = timesource ar.itc
+ // p9 = timesource mmio64
+ // p10 = timesource mmio32
+ // p11 = timesource not to be handled by asm code
+ // p12 = memory time source ( = p9 | p10)
+ // p13 = do cmpxchg with time_interpolator_last_cycle
+ // p14 = Divide by 1000
+ // p15 = Add monotonic
+ //
+ // Note that instructions are optimized for McKinley. McKinley can process two
+ // bundles simultaneously and therefore we continuously try to feed the CPU
+ // two bundles and then a stop.
+ tnat.nz p6,p0 = r31 // branch deferred since it does not fit into bundle structure
+ mov pr = r30,0xc000 // Set predicates according to function
+ add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
+ movl r20 = time_interpolator
+ ;;
+ ld8 r20 = [r20] // get pointer to time_interpolator structure
+ movl r29 = xtime_lock
+ ld4 r2 = [r2] // process work pending flags
+ movl r27 = xtime
+ ;; // only one bundle here
+ ld8 r21 = [r20] // first quad with control information
+ and r2 = TIF_ALLWORK_MASK,r2
+(p6) br.cond.spnt.few .fail_einval // deferred branch
+ ;;
+ add r10 = IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET,r20
+ extr r3 = r21,32,32 // time_interpolator->nsec_per_cyc
+ extr r8 = r21,0,16 // time_interpolator->source
+ cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
+(p6) br.cond.spnt.many fsys_fallback_syscall
+ ;;
+ cmp.eq p8,p12 = 0,r8 // Check for cpu timer
+ cmp.eq p9,p0 = 1,r8 // MMIO64 ?
+ extr r2 = r21,24,8 // time_interpolator->jitter
+ cmp.eq p10,p0 = 2,r8 // MMIO32 ?
+ cmp.ltu p11,p0 = 2,r8 // function or other clock
+(p11) br.cond.spnt.many fsys_fallback_syscall
+ ;;
+ setf.sig f7 = r3 // Setup for scaling of counter
+(p15) movl r19 = wall_to_monotonic
+(p12) ld8 r30 = [r10]
+ cmp.ne p13,p0 = r2,r0 // need jitter compensation?
+ extr r21 = r21,16,8 // shift factor
+ ;;
+.time_redo:
+ .pred.rel.mutex p8,p9,p10
+ ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes
+(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
+ add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20
+(p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues..
+(p10) ld4 r2 = [r30] // readw(ti->address)
+(p13) add r23 = IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET,r20
+ ;; // could be removed by moving the last add upward
+ ld8 r26 = [r22] // time_interpolator->last_counter
+(p13) ld8 r25 = [r23] // time interpolator->last_cycle
+ add r24 = IA64_TIME_INTERPOLATOR_OFFSET_OFFSET,r20
+(p15) ld8 r17 = [r19],IA64_TIMESPEC_TV_NSEC_OFFSET
+ ld8 r9 = [r27],IA64_TIMESPEC_TV_NSEC_OFFSET
+ add r14 = IA64_TIME_INTERPOLATOR_MASK_OFFSET, r20
+ ;;
+ ld8 r18 = [r24] // time_interpolator->offset
+ ld8 r8 = [r27],-IA64_TIMESPEC_TV_NSEC_OFFSET // xtime.tv_nsec
+(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
+ ;;
+ ld8 r14 = [r14] // time_interpolator->mask
+(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
+ sub r10 = r2,r26 // current_counter - last_counter
+ ;;
+(p6) sub r10 = r25,r26 // time we got was less than last_cycle
+(p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg
+ ;;
+ and r10 = r10,r14 // Apply mask
+ ;;
+ setf.sig f8 = r10
+ nop.i 123
+ ;;
+(p7) cmpxchg8.rel r3 = [r23],r2,ar.ccv
+EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare time
+ xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter)
+(p15) add r9 = r9,r17 // Add wall to monotonic.secs to result secs
+ ;;
+(p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET
+(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo
+ // simulate tbit.nz.or p7,p0 = r28,0
+ and r28 = ~1,r28 // Make sequence even to force retry if odd
+ getf.sig r2 = f8
+ mf
+ add r8 = r8,r18 // Add time interpolator offset
+ ;;
+ ld4 r10 = [r29] // xtime_lock.sequence
+(p15) add r8 = r8, r17 // Add monotonic.nsecs to nsecs
+ shr.u r2 = r2,r21
+ ;; // overloaded 3 bundles!
+ // End critical section.
+ add r8 = r8,r2 // Add xtime.nsecs
+ cmp4.ne.or p7,p0 = r28,r10
+(p7) br.cond.dpnt.few .time_redo // sequence number changed ?
+ // Now r8=tv->tv_nsec and r9=tv->tv_sec
+ mov r10 = r0
+ movl r2 = 1000000000
+ add r23 = IA64_TIMESPEC_TV_NSEC_OFFSET, r31
+(p14) movl r3 = 2361183241434822607 // Prep for / 1000 hack
+ ;;
+.time_normalize:
+ mov r21 = r8
+ cmp.ge p6,p0 = r8,r2
+(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting some time
+ ;;
+(p14) setf.sig f8 = r20
+(p6) sub r8 = r8,r2
+(p6) add r9 = 1,r9 // two nops before the branch.
+(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod
+(p6) br.cond.dpnt.few .time_normalize
+ ;;
+ // Divided by 8 though shift. Now divide by 125
+ // The compiler was able to do that with a multiply
+ // and a shift and we do the same
+EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
+(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it...
+ ;;
+ mov r8 = r0
+(p14) getf.sig r2 = f8
+ ;;
+(p14) shr.u r21 = r2, 4
+ ;;
+EX(.fail_efault, st8 [r31] = r9)
+EX(.fail_efault, st8 [r23] = r21)
+ FSYS_RETURN
+.fail_einval:
+ mov r8 = EINVAL
+ mov r10 = -1
+ FSYS_RETURN
+.fail_efault:
+ mov r8 = EFAULT
+ mov r10 = -1
+ FSYS_RETURN
+END(fsys_gettimeofday)
+
+ENTRY(fsys_clock_gettime)
+ .prologue
+ .altrp b6
+ .body
+ cmp4.ltu p6, p0 = CLOCK_MONOTONIC, r32
+ // Fallback if this is not CLOCK_REALTIME or CLOCK_MONOTONIC
+(p6) br.spnt.few fsys_fallback_syscall
+ mov r31 = r33
+ shl r30 = r32,15
+ br.many .gettime
+END(fsys_clock_gettime)
+
+/*
+ * long fsys_rt_sigprocmask (int how, sigset_t *set, sigset_t *oset, size_t sigsetsize).
+ */
+#if _NSIG_WORDS != 1
+# error Sorry, fsys_rt_sigprocmask() needs to be updated for _NSIG_WORDS != 1.
+#endif
+ENTRY(fsys_rt_sigprocmask)
+ .prologue
+ .altrp b6
+ .body
+
+ add r2=IA64_TASK_BLOCKED_OFFSET,r16
+ add r9=TI_FLAGS+IA64_TASK_SIZE,r16
+ cmp4.ltu p6,p0=SIG_SETMASK,r32
+
+ cmp.ne p15,p0=r0,r34 // oset != NULL?
+ tnat.nz p8,p0=r34
+ add r31=IA64_TASK_SIGHAND_OFFSET,r16
+ ;;
+ ld8 r3=[r2] // read/prefetch current->blocked
+ ld4 r9=[r9]
+ tnat.nz.or p6,p0=r35
+
+ cmp.ne.or p6,p0=_NSIG_WORDS*8,r35
+ tnat.nz.or p6,p0=r32
+(p6) br.spnt.few .fail_einval // fail with EINVAL
+ ;;
+#ifdef CONFIG_SMP
+ ld8 r31=[r31] // r31 <- current->sighand
+#endif
+ and r9=TIF_ALLWORK_MASK,r9
+ tnat.nz.or p8,p0=r33
+ ;;
+ cmp.ne p7,p0=0,r9
+ cmp.eq p6,p0=r0,r33 // set == NULL?
+ add r31=IA64_SIGHAND_SIGLOCK_OFFSET,r31 // r31 <- current->sighand->siglock
+(p8) br.spnt.few .fail_efault // fail with EFAULT
+(p7) br.spnt.many fsys_fallback_syscall // got pending kernel work...
+(p6) br.dpnt.many .store_mask // -> short-circuit to just reading the signal mask
+
+ /* Argh, we actually have to do some work and _update_ the signal mask: */
+
+EX(.fail_efault, probe.r.fault r33, 3) // verify user has read-access to *set
+EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
+ mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))
+ ;;
+
+ rsm psr.i // mask interrupt delivery
+ mov ar.ccv=0
+ andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
+
+#ifdef CONFIG_SMP
+ mov r17=1
+ ;;
+ cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock
+ mov r8=EINVAL // default to EINVAL
+ ;;
+ ld8 r3=[r2] // re-read current->blocked now that we hold the lock
+ cmp4.ne p6,p0=r18,r0
+(p6) br.cond.spnt.many .lock_contention
+ ;;
+#else
+ ld8 r3=[r2] // re-read current->blocked now that we hold the lock
+ mov r8=EINVAL // default to EINVAL
+#endif
+ add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16
+ add r19=IA64_TASK_SIGNAL_OFFSET,r16
+ cmp4.eq p6,p0=SIG_BLOCK,r32
+ ;;
+ ld8 r19=[r19] // r19 <- current->signal
+ cmp4.eq p7,p0=SIG_UNBLOCK,r32
+ cmp4.eq p8,p0=SIG_SETMASK,r32
+ ;;
+ ld8 r18=[r18] // r18 <- current->pending.signal
+ .pred.rel.mutex p6,p7,p8
+(p6) or r14=r3,r14 // SIG_BLOCK
+(p7) andcm r14=r3,r14 // SIG_UNBLOCK
+
+(p8) mov r14=r14 // SIG_SETMASK
+(p6) mov r8=0 // clear error code
+ // recalc_sigpending()
+ add r17=IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,r19
+
+ add r19=IA64_SIGNAL_SHARED_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r19
+ ;;
+ ld4 r17=[r17] // r17 <- current->signal->group_stop_count
+(p7) mov r8=0 // clear error code
+
+ ld8 r19=[r19] // r19 <- current->signal->shared_pending
+ ;;
+ cmp4.gt p6,p7=r17,r0 // p6/p7 <- (current->signal->group_stop_count > 0)?
+(p8) mov r8=0 // clear error code
+
+ or r18=r18,r19 // r18 <- current->pending | current->signal->shared_pending
+ ;;
+ // r18 <- (current->pending | current->signal->shared_pending) & ~current->blocked:
+ andcm r18=r18,r14
+ add r9=TI_FLAGS+IA64_TASK_SIZE,r16
+ ;;
+
+(p7) cmp.ne.or.andcm p6,p7=r18,r0 // p6/p7 <- signal pending
+ mov r19=0 // i must not leak kernel bits...
+(p6) br.cond.dpnt.many .sig_pending
+ ;;
+
+1: ld4 r17=[r9] // r17 <- current->thread_info->flags
+ ;;
+ mov ar.ccv=r17
+ and r18=~_TIF_SIGPENDING,r17 // r18 <- r17 & ~(1 << TIF_SIGPENDING)
+ ;;
+
+ st8 [r2]=r14 // update current->blocked with new mask
+ cmpxchg4.acq r8=[r9],r18,ar.ccv // current->thread_info->flags <- r18
+ ;;
+ cmp.ne p6,p0=r17,r8 // update failed?
+(p6) br.cond.spnt.few 1b // yes -> retry
+
+#ifdef CONFIG_SMP
+ st4.rel [r31]=r0 // release the lock
+#endif
+ ssm psr.i
+ ;;
+
+ srlz.d // ensure psr.i is set again
+ mov r18=0 // i must not leak kernel bits...
+
+.store_mask:
+EX(.fail_efault, (p15) probe.w.fault r34, 3) // verify user has write-access to *oset
+EX(.fail_efault, (p15) st8 [r34]=r3)
+ mov r2=0 // i must not leak kernel bits...
+ mov r3=0 // i must not leak kernel bits...
+ mov r8=0 // return 0
+ mov r9=0 // i must not leak kernel bits...
+ mov r14=0 // i must not leak kernel bits...
+ mov r17=0 // i must not leak kernel bits...
+ mov r31=0 // i must not leak kernel bits...
+ FSYS_RETURN
+
+.sig_pending:
+#ifdef CONFIG_SMP
+ st4.rel [r31]=r0 // release the lock
+#endif
+ ssm psr.i
+ ;;
+ srlz.d
+ br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall
+
+#ifdef CONFIG_SMP
+.lock_contention:
+ /* Rather than spinning here, fall back on doing a heavy-weight syscall. */
+ ssm psr.i
+ ;;
+ srlz.d
+ br.sptk.many fsys_fallback_syscall
+#endif
+END(fsys_rt_sigprocmask)
+
+ENTRY(fsys_fallback_syscall)
+ .prologue
+ .altrp b6
+ .body
+ /*
+ * We only get here from light-weight syscall handlers. Thus, we already
+ * know that r15 contains a valid syscall number. No need to re-check.
+ */
+ adds r17=-1024,r15
+ movl r14=sys_call_table
+ ;;
+#ifdef CONFIG_XEN
+ movl r18=running_on_xen;;
+ ld4 r18=[r18];;
+ // p14 = running_on_xen
+ // p15 = !running_on_xen
+ cmp.ne p14,p15=r0,r18
+ ;;
+(p14) movl r18=XSI_PSR_I_ADDR;;
+(p14) ld8 r18=[r18]
+(p14) mov r29=1;;
+(p14) st1 [r18]=r29
+(p15) rsm psr.i
+#else
+ rsm psr.i
+#endif
+ shladd r18=r17,3,r14
+ ;;
+ ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
+#ifdef CONFIG_XEN
+(p14) mov r27=r8
+(p14) XEN_HYPER_GET_PSR
+ ;;
+(p14) mov r29=r8
+(p14) mov r8=r27
+(p15) mov r29=psr // read psr (12 cyc load latency)
+#else
+ mov r29=psr // read psr (12 cyc load latency)
+#endif
+ mov r27=ar.rsc
+ mov r21=ar.fpsr
+ mov r26=ar.pfs
+END(fsys_fallback_syscall)
+ /* FALL THROUGH */
+GLOBAL_ENTRY(fsys_bubble_down)
+ .prologue
+ .altrp b6
+ .body
+ /*
+ * We get here for syscalls that don't have a lightweight
+ * handler. For those, we need to bubble down into the kernel
+ * and that requires setting up a minimal pt_regs structure,
+ * and initializing the CPU state more or less as if an
+ * interruption had occurred. To make syscall-restarts work,
+ * we setup pt_regs such that cr_iip points to the second
+ * instruction in syscall_via_break. Decrementing the IP
+ * hence will restart the syscall via break and not
+ * decrementing IP will return us to the caller, as usual.
+ * Note that we preserve the value of psr.pp rather than
+ * initializing it from dcr.pp. This makes it possible to
+ * distinguish fsyscall execution from other privileged
+ * execution.
+ *
+ * On entry:
+ * - normal fsyscall handler register usage, except
+ * that we also have:
+ * - r18: address of syscall entry point
+ * - r21: ar.fpsr
+ * - r26: ar.pfs
+ * - r27: ar.rsc
+ * - r29: psr
+ *
+ * We used to clear some PSR bits here but that requires slow
+ * serialization. Fortuntely, that isn't really necessary.
+ * The rationale is as follows: we used to clear bits
+ * ~PSR_PRESERVED_BITS in PSR.L. Since
+ * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we
+ * ended up clearing PSR.{BE,AC,I,DFL,DFH,DI,DB,SI,TB}.
+ * However,
+ *
+ * PSR.BE : already is turned off in __kernel_syscall_via_epc()
+ * PSR.AC : don't care (kernel normally turns PSR.AC on)
+ * PSR.I : already turned off by the time fsys_bubble_down gets
+ * invoked
+ * PSR.DFL: always 0 (kernel never turns it on)
+ * PSR.DFH: don't care --- kernel never touches f32-f127 on its own
+ * initiative
+ * PSR.DI : always 0 (kernel never turns it on)
+ * PSR.SI : always 0 (kernel never turns it on)
+ * PSR.DB : don't care --- kernel never enables kernel-level
+ * breakpoints
+ * PSR.TB : must be 0 already; if it wasn't zero on entry to
+ * __kernel_syscall_via_epc, the branch to fsys_bubble_down
+ * will trigger a taken branch; the taken-trap-handler then
+ * converts the syscall into a break-based system-call.
+ */
+ /*
+ * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc.
+ * The rest we have to synthesize.
+ */
+# define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) \
+ | (0x1 << IA64_PSR_RI_BIT) \
+ | IA64_PSR_BN | IA64_PSR_I)
+
+ invala // M0|1
+ movl r14=ia64_ret_from_syscall // X
+
+ nop.m 0
+ movl r28=__kernel_syscall_via_break // X create cr.iip
+ ;;
+
+ mov r2=r16 // A get task addr to addl-addressable register
+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // A
+ mov r31=pr // I0 save pr (2 cyc)
+ ;;
+ st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
+ addl r22=IA64_RBS_OFFSET,r2 // A compute base of RBS
+ add r3=TI_FLAGS+IA64_TASK_SIZE,r2 // A
+ ;;
+ ld4 r3=[r3] // M0|1 r3 = current_thread_info()->flags
+ lfetch.fault.excl.nt1 [r22] // M0|1 prefetch register backing-store
+ nop.i 0
+ ;;
+ mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
+ nop.m 0
+ nop.i 0
+ ;;
+ mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore
+ mov.m r24=ar.rnat // M2 (5 cyc) read ar.rnat (dual-issues!)
+ nop.i 0
+ ;;
+ mov ar.bspstore=r22 // M2 (6 cyc) switch to kernel RBS
+ movl r8=PSR_ONE_BITS // X
+ ;;
+ mov r25=ar.unat // M2 (5 cyc) save ar.unat
+ mov r19=b6 // I0 save b6 (2 cyc)
+ mov r20=r1 // A save caller's gp in r20
+ ;;
+ or r29=r8,r29 // A construct cr.ipsr value to save
+ mov b6=r18 // I0 copy syscall entry-point to b6 (7 cyc)
+ addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of memory stack
+
+ mov r18=ar.bsp // M2 save (kernel) ar.bsp (12 cyc)
+ cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
+ br.call.sptk.many b7=ia64_syscall_setup // B
+ ;;
+ mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
+ mov rp=r14 // I0 set the real return addr
+ and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
+ ;;
+#ifdef CONFIG_XEN
+ movl r14=running_on_xen;;
+ ld4 r14=[r14];;
+ // p14 = running_on_xen
+ // p15 = !running_on_xen
+ cmp.ne p14,p15=r0,r14
+ ;;
+(p14) movl r28=XSI_PSR_I_ADDR;;
+(p14) ld8 r28=[r28];;
+(p14) adds r28=-1,r28;; // event_pending
+(p14) ld1 r14=[r28];;
+(p14) cmp.ne.unc p13,p14=r14,r0;;
+(p13) XEN_HYPER_SSM_I
+(p14) adds r28=1,r28;; // event_mask
+(p14) st1 [r28]=r0;;
+(p15) ssm psr.i
+#else
+ ssm psr.i // M2 we're on kernel stacks now, reenable irqs
+#endif
+ cmp.eq p8,p0=r3,r0 // A
+(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
+
+ nop.m 0
+(p8) br.call.sptk.many b6=b6 // B (ignore return address)
+ br.cond.spnt ia64_trace_syscall // B
+END(fsys_bubble_down)
+
+ .rodata
+ .align 8
+ .globl fsyscall_table
+
+ data8 fsys_bubble_down
+fsyscall_table:
+ data8 fsys_ni_syscall
+ data8 0 // exit // 1025
+ data8 0 // read
+ data8 0 // write
+ data8 0 // open
+ data8 0 // close
+ data8 0 // creat // 1030
+ data8 0 // link
+ data8 0 // unlink
+ data8 0 // execve
+ data8 0 // chdir
+ data8 0 // fchdir // 1035
+ data8 0 // utimes
+ data8 0 // mknod
+ data8 0 // chmod
+ data8 0 // chown
+ data8 0 // lseek // 1040
+ data8 fsys_getpid // getpid
+ data8 fsys_getppid // getppid
+ data8 0 // mount
+ data8 0 // umount
+ data8 0 // setuid // 1045
+ data8 0 // getuid
+ data8 0 // geteuid
+ data8 0 // ptrace
+ data8 0 // access
+ data8 0 // sync // 1050
+ data8 0 // fsync
+ data8 0 // fdatasync
+ data8 0 // kill
+ data8 0 // rename
+ data8 0 // mkdir // 1055
+ data8 0 // rmdir
+ data8 0 // dup
+ data8 0 // pipe
+ data8 0 // times
+ data8 0 // brk // 1060
+ data8 0 // setgid
+ data8 0 // getgid
+ data8 0 // getegid
+ data8 0 // acct
+ data8 0 // ioctl // 1065
+ data8 0 // fcntl
+ data8 0 // umask
+ data8 0 // chroot
+ data8 0 // ustat
+ data8 0 // dup2 // 1070
+ data8 0 // setreuid
+ data8 0 // setregid
+ data8 0 // getresuid
+ data8 0 // setresuid
+ data8 0 // getresgid // 1075
+ data8 0 // setresgid
+ data8 0 // getgroups
+ data8 0 // setgroups
+ data8 0 // getpgid
+ data8 0 // setpgid // 1080
+ data8 0 // setsid
+ data8 0 // getsid
+ data8 0 // sethostname
+ data8 0 // setrlimit
+ data8 0 // getrlimit // 1085
+ data8 0 // getrusage
+ data8 fsys_gettimeofday // gettimeofday
+ data8 0 // settimeofday
+ data8 0 // select
+ data8 0 // poll // 1090
+ data8 0 // symlink
+ data8 0 // readlink
+ data8 0 // uselib
+ data8 0 // swapon
+ data8 0 // swapoff // 1095
+ data8 0 // reboot
+ data8 0 // truncate
+ data8 0 // ftruncate
+ data8 0 // fchmod
+ data8 0 // fchown // 1100
+ data8 0 // getpriority
+ data8 0 // setpriority
+ data8 0 // statfs
+ data8 0 // fstatfs
+ data8 0 // gettid // 1105
+ data8 0 // semget
+ data8 0 // semop
+ data8 0 // semctl
+ data8 0 // msgget
+ data8 0 // msgsnd // 1110
+ data8 0 // msgrcv
+ data8 0 // msgctl
+ data8 0 // shmget
+ data8 0 // shmat
+ data8 0 // shmdt // 1115
+ data8 0 // shmctl
+ data8 0 // syslog
+ data8 0 // setitimer
+ data8 0 // getitimer
+ data8 0 // 1120
+ data8 0
+ data8 0
+ data8 0 // vhangup
+ data8 0 // lchown
+ data8 0 // remap_file_pages // 1125
+ data8 0 // wait4
+ data8 0 // sysinfo
+ data8 0 // clone
+ data8 0 // setdomainname
+ data8 0 // newuname // 1130
+ data8 0 // adjtimex
+ data8 0
+ data8 0 // init_module
+ data8 0 // delete_module
+ data8 0 // 1135
+ data8 0
+ data8 0 // quotactl
+ data8 0 // bdflush
+ data8 0 // sysfs
+ data8 0 // personality // 1140
+ data8 0 // afs_syscall
+ data8 0 // setfsuid
+ data8 0 // setfsgid
+ data8 0 // getdents
+ data8 0 // flock // 1145
+ data8 0 // readv
+ data8 0 // writev
+ data8 0 // pread64
+ data8 0 // pwrite64
+ data8 0 // sysctl // 1150
+ data8 0 // mmap
+ data8 0 // munmap
+ data8 0 // mlock
+ data8 0 // mlockall
+ data8 0 // mprotect // 1155
+ data8 0 // mremap
+ data8 0 // msync
+ data8 0 // munlock
+ data8 0 // munlockall
+ data8 0 // sched_getparam // 1160
+ data8 0 // sched_setparam
+ data8 0 // sched_getscheduler
+ data8 0 // sched_setscheduler
+ data8 0 // sched_yield
+ data8 0 // sched_get_priority_max // 1165
+ data8 0 // sched_get_priority_min
+ data8 0 // sched_rr_get_interval
+ data8 0 // nanosleep
+ data8 0 // nfsservctl
+ data8 0 // prctl // 1170
+ data8 0 // getpagesize
+ data8 0 // mmap2
+ data8 0 // pciconfig_read
+ data8 0 // pciconfig_write
+ data8 0 // perfmonctl // 1175
+ data8 0 // sigaltstack
+ data8 0 // rt_sigaction
+ data8 0 // rt_sigpending
+ data8 fsys_rt_sigprocmask // rt_sigprocmask
+ data8 0 // rt_sigqueueinfo // 1180
+ data8 0 // rt_sigreturn
+ data8 0 // rt_sigsuspend
+ data8 0 // rt_sigtimedwait
+ data8 0 // getcwd
+ data8 0 // capget // 1185
+ data8 0 // capset
+ data8 0 // sendfile
+ data8 0
+ data8 0
+ data8 0 // socket // 1190
+ data8 0 // bind
+ data8 0 // connect
+ data8 0 // listen
+ data8 0 // accept
+ data8 0 // getsockname // 1195
+ data8 0 // getpeername
+ data8 0 // socketpair
+ data8 0 // send
+ data8 0 // sendto
+ data8 0 // recv // 1200
+ data8 0 // recvfrom
+ data8 0 // shutdown
+ data8 0 // setsockopt
+ data8 0 // getsockopt
+ data8 0 // sendmsg // 1205
+ data8 0 // recvmsg
+ data8 0 // pivot_root
+ data8 0 // mincore
+ data8 0 // madvise
+ data8 0 // newstat // 1210
+ data8 0 // newlstat
+ data8 0 // newfstat
+ data8 0 // clone2
+ data8 0 // getdents64
+ data8 0 // getunwind // 1215
+ data8 0 // readahead
+ data8 0 // setxattr
+ data8 0 // lsetxattr
+ data8 0 // fsetxattr
+ data8 0 // getxattr // 1220
+ data8 0 // lgetxattr
+ data8 0 // fgetxattr
+ data8 0 // listxattr
+ data8 0 // llistxattr
+ data8 0 // flistxattr // 1225
+ data8 0 // removexattr
+ data8 0 // lremovexattr
+ data8 0 // fremovexattr
+ data8 0 // tkill
+ data8 0 // futex // 1230
+ data8 0 // sched_setaffinity
+ data8 0 // sched_getaffinity
+ data8 fsys_set_tid_address // set_tid_address
+ data8 0 // fadvise64_64
+ data8 0 // tgkill // 1235
+ data8 0 // exit_group
+ data8 0 // lookup_dcookie
+ data8 0 // io_setup
+ data8 0 // io_destroy
+ data8 0 // io_getevents // 1240
+ data8 0 // io_submit
+ data8 0 // io_cancel
+ data8 0 // epoll_create
+ data8 0 // epoll_ctl
+ data8 0 // epoll_wait // 1245
+ data8 0 // restart_syscall
+ data8 0 // semtimedop
+ data8 0 // timer_create
+ data8 0 // timer_settime
+ data8 0 // timer_gettime // 1250
+ data8 0 // timer_getoverrun
+ data8 0 // timer_delete
+ data8 0 // clock_settime
+ data8 fsys_clock_gettime // clock_gettime
+
+ // fill in zeros for the remaining entries
+ .zero:
+ .space fsyscall_table + 8*NR_syscalls - .zero, 0
diff --git a/linux-2.6-xen-sparse/arch/ia64/kernel/gate.S b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.S
index f91ba279ed..e242e36b04 100644
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/gate.S
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.S
@@ -56,18 +56,6 @@
[1:] movl reg=0; \
.xdata4 ".data.patch.running_on_xen", 1b-.
- .section ".data.patch.brl_xen_rsm_be_i", "a"
- .previous
-#define BRL_COND_XEN_RSM_BE_I(pr) \
-[1:](pr)brl.cond.sptk 0; \
- .xdata4 ".data.patch.brl_xen_rsm_be_i", 1b-.
-
- .section ".data.patch.brl_xen_get_psr", "a"
- .previous
-#define BRL_COND_XEN_GET_PSR(pr) \
-[1:](pr)brl.cond.sptk 0; \
- .xdata4 ".data.patch.brl_xen_get_psr", 1b-.
-
.section ".data.patch.brl_xen_ssm_i_0", "a"
.previous
#define BRL_COND_XEN_SSM_I_0(pr) \
@@ -155,9 +143,9 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
cmp.ne isXen,isRaw=r0,r30
;;
(isRaw) rsm psr.be | psr.i
- BRL_COND_XEN_RSM_BE_I(isXen)
- .global .vdso_rsm_be_i_ret
-.vdso_rsm_be_i_ret:
+(isXen) st1 [r22]=r20
+(isXen) rum psr.be
+ ;;
#else
rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
#endif
@@ -170,9 +158,9 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
lfetch [r18] // M0|1
#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
(isRaw) mov r29=psr
- BRL_COND_XEN_GET_PSR(isXen)
- .global .vdso_get_psr_ret
-.vdso_get_psr_ret:
+(isXen) XEN_HYPER_GET_PSR
+ ;;
+(isXen) mov r29=r8
#else
mov r29=psr // M2 (12 cyc)
#endif
diff --git a/linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S
index 983719c326..58582ccdfe 100644
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/gate.lds.S
@@ -49,14 +49,6 @@ SECTIONS
*(.data.patch.running_on_xen)
__end_gate_running_on_xen_patchlist = .;
- __start_gate_brl_xen_rsm_be_i_patchlist = .;
- *(.data.patch.brl_xen_rsm_be_i)
- __end_gate_brl_xen_rsm_be_i_patchlist = .;
-
- __start_gate_brl_xen_get_psr_patchlist = .;
- *(.data.patch.brl_xen_get_psr)
- __end_gate_brl_xen_get_psr_patchlist = .;
-
__start_gate_brl_xen_ssm_i_0_patchlist = .;
*(.data.patch.brl_xen_ssm_i_0)
__end_gate_brl_xen_ssm_i_0_patchlist = .;
diff --git a/linux-2.6-xen-sparse/arch/ia64/kernel/patch.c b/linux-2.6-xen-sparse/arch/ia64/kernel/patch.c
index ff943fafb7..73597d2866 100644
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/patch.c
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/patch.c
@@ -236,13 +236,9 @@ patch_brl_symaddr(unsigned long start, unsigned long end,
static void
patch_brl_in_vdso(void)
{
- EXTERN_PATCHLIST(xen_rsm_be_i);
- EXTERN_PATCHLIST(xen_get_psr);
EXTERN_PATCHLIST(xen_ssm_i_0);
EXTERN_PATCHLIST(xen_ssm_i_1);
- PATCH_BRL_SYMADDR(xen_rsm_be_i);
- PATCH_BRL_SYMADDR(xen_get_psr);
PATCH_BRL_SYMADDR(xen_ssm_i_0);
PATCH_BRL_SYMADDR(xen_ssm_i_1);
}
diff --git a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c
index ccfc9ca069..c2699f43e1 100644
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/setup.c
@@ -76,10 +76,18 @@ EXPORT_SYMBOL(__per_cpu_offset);
#endif
#ifdef CONFIG_XEN
+static void
+xen_panic_hypercall(struct unw_frame_info *info, void *arg)
+{
+ current->thread.ksp = (__u64)info->sw - 16;
+ HYPERVISOR_shutdown(SHUTDOWN_crash);
+ /* we're never actually going to get here... */
+}
+
static int
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- HYPERVISOR_shutdown(SHUTDOWN_crash);
+ unw_init_running(xen_panic_hypercall, NULL);
/* we're never actually going to get here... */
return NOTIFY_DONE;
}
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S b/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S
index 1aff9c6564..0e2b1cf9d0 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S
@@ -8,336 +8,110 @@
#include <asm/processor.h>
#include <asm/asmmacro.h>
-/* To clear vpsr.ic, vpsr.i needs to be cleared first */
-#define XEN_CLEAR_PSR_IC \
- mov r14=1; \
- movl r15=XSI_PSR_I_ADDR; \
- movl r2=XSI_PSR_IC; \
- ;; \
- ld8 r15=[r15]; \
- ld4 r3=[r2]; \
- ;; \
- ld1 r16=[r15]; \
- ;; \
- st1 [r15]=r14; \
- st4 [r2]=r0; \
- ;;
-
-/* First restore vpsr.ic, and then vpsr.i */
-#define XEN_RESTORE_PSR_IC \
- st4 [r2]=r3; \
- st1 [r15]=r16; \
- ;;
+GLOBAL_ENTRY(xen_get_psr)
+ XEN_HYPER_GET_PSR
+ br.ret.sptk.many rp
+ ;;
+END(xen_get_psr)
GLOBAL_ENTRY(xen_get_ivr)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov r8=cr.ivr;;
-(p7) br.ret.sptk.many rp
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_GET_IVR
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_get_ivr)
GLOBAL_ENTRY(xen_get_tpr)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov r8=cr.tpr;;
-(p7) br.ret.sptk.many rp
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_GET_TPR
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_get_tpr)
GLOBAL_ENTRY(xen_set_tpr)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov cr.tpr=r32;;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_SET_TPR
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_set_tpr)
GLOBAL_ENTRY(xen_eoi)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov cr.eoi=r0;;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_EOI
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_eoi)
GLOBAL_ENTRY(xen_thash)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) thash r8=r32;;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_THASH
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_thash)
GLOBAL_ENTRY(xen_set_itm)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov cr.itm=r32;;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_SET_ITM
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_set_itm)
GLOBAL_ENTRY(xen_ptcga)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) ptc.ga r32,r33;;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
mov r9=r33
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_PTC_GA
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_ptcga)
GLOBAL_ENTRY(xen_get_rr)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov r8=rr[r32];;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_GET_RR
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_get_rr)
GLOBAL_ENTRY(xen_set_rr)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov rr[r32]=r33;;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
mov r9=r33
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_SET_RR
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
;;
END(xen_set_rr)
GLOBAL_ENTRY(xen_set_kr)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.ne p7,p0=r8,r0;;
-(p7) br.cond.spnt.few 1f;
- ;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar0=r9
-(p7) br.ret.sptk.many rp;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar1=r9
-(p7) br.ret.sptk.many rp;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar2=r9
-(p7) br.ret.sptk.many rp;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar3=r9
-(p7) br.ret.sptk.many rp;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar4=r9
-(p7) br.ret.sptk.many rp;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar5=r9
-(p7) br.ret.sptk.many rp;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar6=r9
-(p7) br.ret.sptk.many rp;;
- cmp.eq p7,p0=r8,r0
- adds r8=-1,r8;;
-(p7) mov ar7=r9
-(p7) br.ret.sptk.many rp;;
-
-1: mov r8=r32
+ mov r8=r32
mov r9=r33
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_SET_KR
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
END(xen_set_kr)
GLOBAL_ENTRY(xen_fc)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) fc r32;;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_FC
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
END(xen_fc)
GLOBAL_ENTRY(xen_get_cpuid)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov r8=cpuid[r32];;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_GET_CPUID
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
END(xen_get_cpuid)
GLOBAL_ENTRY(xen_get_pmd)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov r8=pmd[r32];;
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_GET_PMD
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
END(xen_get_pmd)
#ifdef CONFIG_IA32_SUPPORT
GLOBAL_ENTRY(xen_get_eflag)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov r8=ar24;;
-(p7) br.ret.sptk.many rp
- ;;
- mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_GET_EFLAG
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
END(xen_get_eflag)
// some bits aren't set if pl!=0, see SDM vol1 3.1.8
GLOBAL_ENTRY(xen_set_eflag)
- movl r8=running_on_xen;;
- ld4 r8=[r8];;
- cmp.eq p7,p0=r8,r0;;
-(p7) mov ar24=r32
-(p7) br.ret.sptk.many rp
- ;;
mov r8=r32
- ;;
- XEN_CLEAR_PSR_IC
- ;;
XEN_HYPER_SET_EFLAG
- ;;
- XEN_RESTORE_PSR_IC
- ;;
br.ret.sptk.many rp
END(xen_set_eflag)
#endif
@@ -355,27 +129,6 @@ END(xen_send_ipi)
#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
// Those are vdso specialized.
// In fsys mode, call, ret can't be used.
-GLOBAL_ENTRY(xen_rsm_be_i)
- st1 [r22]=r20
- st4 [r23]=r0
- XEN_HYPER_RSM_BE
- st4 [r23]=r20
- brl.cond.sptk .vdso_rsm_be_i_ret
- ;;
-END(xen_rsm_be_i)
-
-GLOBAL_ENTRY(xen_get_psr)
- mov r31=r8
- mov r25=IA64_PSR_IC
- st4 [r23]=r0
- XEN_HYPER_GET_PSR
- ;;
- st4 [r23]=r20
- or r29=r8,r25 // vpsr.ic was cleared for hyperprivop
- mov r8=r31
- brl.cond.sptk .vdso_get_psr_ret
- ;;
-END(xen_get_psr)
// see xen_ssm_i() in privop.h
// r22 = &vcpu->vcpu_info->evtchn_upcall_mask
@@ -395,7 +148,6 @@ END(xen_get_psr)
(p14) cmp.ne.unc p11,p0=r0,r25; \
;; \
(p11) st1 [r22]=r20; \
-(p11) st4 [r23]=r0; \
(p11) XEN_HYPER_SSM_I;
GLOBAL_ENTRY(xen_ssm_i_0)
@@ -409,4 +161,11 @@ GLOBAL_ENTRY(xen_ssm_i_1)
brl.cond.sptk .vdso_ssm_i_1_ret
;;
END(xen_ssm_i_1)
+
+GLOBAL_ENTRY(__hypercall)
+ mov r2=r37
+ break 0x1000
+ br.ret.sptk.many b0
+ ;;
+END(__hypercall)
#endif
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
index 9bd507a6bf..f65cf94ee1 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c
@@ -25,7 +25,10 @@
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/efi.h>
#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/meminit.h>
#include <asm/hypervisor.h>
#include <asm/hypercall.h>
#include <xen/interface/memory.h>
@@ -46,6 +49,8 @@ static int p2m_expose_init(void);
#define p2m_expose_init() (-ENOSYS)
#endif
+EXPORT_SYMBOL(__hypercall);
+
//XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
// move those to lib/contiguous_bitmap?
//XXX discontigmem/sparsemem
@@ -56,13 +61,90 @@ static int p2m_expose_init(void);
*/
unsigned long *contiguous_bitmap;
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+/* Following logic is stolen from create_mem_map_table() for virtual memmap */
+static int
+create_contiguous_bitmap(u64 start, u64 end, void *arg)
+{
+ unsigned long address, start_page, end_page;
+ unsigned long bitmap_start, bitmap_end;
+ unsigned char *bitmap;
+ int node;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ bitmap_start = (unsigned long)contiguous_bitmap +
+ ((__pa(start) >> PAGE_SHIFT) >> 3);
+ bitmap_end = (unsigned long)contiguous_bitmap +
+ (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
+
+ start_page = bitmap_start & PAGE_MASK;
+ end_page = PAGE_ALIGN(bitmap_end);
+ node = paddr_to_nid(__pa(start));
+
+ bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
+ end_page - start_page);
+ BUG_ON(!bitmap);
+ memset(bitmap, 0, end_page - start_page);
+
+ for (address = start_page; address < end_page; address += PAGE_SIZE) {
+ pgd = pgd_offset_k(address);
+ if (pgd_none(*pgd))
+ pgd_populate(&init_mm, pgd,
+ alloc_bootmem_pages_node(NODE_DATA(node),
+ PAGE_SIZE));
+ pud = pud_offset(pgd, address);
+
+ if (pud_none(*pud))
+ pud_populate(&init_mm, pud,
+ alloc_bootmem_pages_node(NODE_DATA(node),
+ PAGE_SIZE));
+ pmd = pmd_offset(pud, address);
+
+ if (pmd_none(*pmd))
+ pmd_populate_kernel(&init_mm, pmd,
+ alloc_bootmem_pages_node
+ (NODE_DATA(node), PAGE_SIZE));
+ pte = pte_offset_kernel(pmd, address);
+
+ if (pte_none(*pte))
+ set_pte(pte,
+ pfn_pte(__pa(bitmap + (address - start_page))
+ >> PAGE_SHIFT, PAGE_KERNEL));
+ }
+ return 0;
+}
+#endif
+
+static void
+__contiguous_bitmap_init(unsigned long size)
+{
+ contiguous_bitmap = alloc_bootmem_pages(size);
+ BUG_ON(!contiguous_bitmap);
+ memset(contiguous_bitmap, 0, size);
+}
+
void
contiguous_bitmap_init(unsigned long end_pfn)
{
unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
- contiguous_bitmap = alloc_bootmem_low_pages(size);
- BUG_ON(!contiguous_bitmap);
- memset(contiguous_bitmap, 0, size);
+#ifndef CONFIG_VIRTUAL_MEM_MAP
+ __contiguous_bitmap_init(size);
+#else
+ unsigned long max_gap = 0;
+
+ efi_memmap_walk(find_largest_hole, (u64*)&max_gap);
+ if (max_gap < LARGE_GAP) {
+ __contiguous_bitmap_init(size);
+ } else {
+ unsigned long map_size = PAGE_ALIGN(size);
+ vmalloc_end -= map_size;
+ contiguous_bitmap = (unsigned long*)vmalloc_end;
+ efi_memmap_walk(create_contiguous_bitmap, NULL);
+ }
+#endif
}
#if 0
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S b/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S
index aad0aac631..f0a75468fa 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S
@@ -59,12 +59,6 @@ GLOBAL_ENTRY(ia64_switch_to)
shr.u r26=r20,IA64_GRANULE_SHIFT
cmp.eq p7,p6=r25,in0
;;
-#ifdef CONFIG_XEN
- movl r8=XSI_PSR_IC
- ;;
- st4 [r8]=r0 // force psr.ic off for hyperprivop(s)
- ;;
-#endif
/*
* If we've already mapped this task's page, we can skip doing it again.
*/
@@ -72,19 +66,13 @@ GLOBAL_ENTRY(ia64_switch_to)
(p6) br.cond.dpnt .map
;;
.done:
+ ld8 sp=[r21] // load kernel stack pointer of new task
#ifdef CONFIG_XEN
- // psr.ic already off
// update "current" application register
mov r8=IA64_KR_CURRENT
mov r9=in0;;
XEN_HYPER_SET_KR
- ld8 sp=[r21] // load kernel stack pointer of new task
- movl r27=XSI_PSR_IC
- mov r8=1
- ;;
- st4 [r27]=r8 // psr.ic back on
#else
- ld8 sp=[r21] // load kernel stack pointer of new task
mov IA64_KR(CURRENT)=in0 // update "current" application register
#endif
mov r8=r13 // return pointer to previously running task
@@ -99,7 +87,10 @@ GLOBAL_ENTRY(ia64_switch_to)
.map:
#ifdef CONFIG_XEN
- // psr.ic already off
+ movl r25=XSI_PSR_IC // clear psr.ic
+ ;;
+ st4 [r25]=r0
+ ;;
#else
rsm psr.ic // interrupts (psr.i) are already disabled here
#endif
@@ -132,7 +123,13 @@ GLOBAL_ENTRY(ia64_switch_to)
#endif
;;
itr.d dtr[r25]=r23 // wire in new mapping...
-#ifndef CONFIG_XEN
+#ifdef CONFIG_XEN
+ mov r9=1
+ movl r8=XSI_PSR_IC
+ ;;
+ st4 [r8]=r9
+ ;;
+#else
ssm psr.ic // reenable the psr.ic bit
;;
srlz.d
@@ -415,7 +412,16 @@ ENTRY(ia64_leave_syscall)
(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
;;
ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
+#ifdef CONFIG_XEN
+(pKStk) mov r21=r8
+(pKStk) XEN_HYPER_GET_PSR
+ ;;
+(pKStk) mov r22=r8
+(pKStk) mov r8=r21
+ ;;
+#else
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
+#endif
nop 0
;;
ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
@@ -645,7 +651,16 @@ GLOBAL_ENTRY(ia64_leave_kernel)
adds r16=PT(CR_IPSR)+16,r12
adds r17=PT(CR_IIP)+16,r12
+#ifdef CONFIG_XEN
+(pKStk) mov r29=r8
+(pKStk) XEN_HYPER_GET_PSR
+ ;;
+(pKStk) mov r22=r8
+(pKStk) mov r8=r29
+ ;;
+#else
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
+#endif
nop.i 0
nop.i 0
;;
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S b/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S
index 6217a395ff..c7648e0bcd 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S
@@ -709,11 +709,23 @@ ENTRY(page_fault)
;;
#endif
#ifdef CONFIG_XEN
- br.cond.sptk.many xen_page_fault
+
+#define MASK_TO_PEND_OFS (-1)
+
+(p15) movl r14=XSI_PSR_I_ADDR
;;
-done_xen_page_fault:
-#endif
+(p15) ld8 r14=[r14]
+ ;;
+(p15) st1 [r14]=r0,MASK_TO_PEND_OFS // if (p15) vpsr.i = 1
+ ;; // if (p15) (vcpu->vcpu_info->evtchn_upcall_mask)=0
+(p15) ld1 r14=[r14] // if (vcpu->vcpu_info->evtchn_upcall_pending)
+ ;;
+(p15) cmp.ne p15,p0=r14,r0
+ ;;
+(p15) XEN_HYPER_SSM_I
+#else
(p15) ssm psr.i // restore psr.i
+#endif
movl r14=ia64_leave_kernel
;;
SAVE_REST
@@ -729,25 +741,6 @@ END(page_fault)
ENTRY(dkey_miss)
DBG_FAULT(7)
FAULT(7)
-#ifdef CONFIG_XEN
- // Leaving this code inline above results in an IVT section overflow
- // There is no particular reason for this code to be here...
-xen_page_fault:
-(p15) movl r3=XSI_PSR_I_ADDR
- ;;
-(p15) ld8 r3=[r3]
- ;;
-(p15) st1 [r3]=r0,-1 // if (p15) vpsr.i = 1
- mov r14=r0
- ;;
-(p15) ld1 r14=[r3] // if (pending_events)
- adds r3=8,r2 // re-set up second base pointer
- ;;
-(p15) cmp.ne p15,p0=r14,r0
- ;;
- br.cond.sptk.many done_xen_page_fault
- ;;
-#endif
END(dkey_miss)
.org ia64_ivt+0x2000
@@ -1170,14 +1163,13 @@ ENTRY(break_fault)
#ifdef CONFIG_XEN
(p15) ld8 r16=[r16] // vpsr.i
;;
-(p15) st1 [r16]=r0,-1 // if (p15) vpsr.i = 1
- mov r2=r0
- ;;
-(p15) ld1 r2=[r16] // if (pending_events)
+(p15) st1 [r16]=r0,MASK_TO_PEND_OFS // if (p15) vpsr.i = 1
+ ;; // if (p15) (vcpu->vcpu_info->evtchn_upcall_mask)=0
+(p15) ld1 r2=[r16] // if (vcpu->vcpu_info->evtchn_upcall_pending)
;;
- cmp.ne p6,p0=r2,r0
+(p15) cmp.ne.unc p6,p0=r2,r0
;;
-(p6) ssm psr.i // do a real ssm psr.i
+(p6) XEN_HYPER_SSM_I // do a real ssm psr.i
#else
(p15) ssm psr.i // M2 restore psr.i
#endif
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/xenminstate.h b/linux-2.6-xen-sparse/arch/ia64/xen/xenminstate.h
index b3902a6a80..75ec49f886 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenminstate.h
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenminstate.h
@@ -151,16 +151,7 @@
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
/* xen special handling for possibly lazy cover */ \
- movl r8=XSI_INCOMPL_REGFR; \
- ;; \
- ld4 r30=[r8]; \
- ;; \
- /* set XSI_INCOMPL_REGFR 0 */ \
- st4 [r8]=r0; \
- cmp.eq p6,p7=r30,r0; \
- ;; /* not sure if this stop bit is necessary */ \
-(p6) adds r8=XSI_PRECOVER_IFS-XSI_INCOMPL_REGFR,r8; \
-(p7) adds r8=XSI_IFS-XSI_INCOMPL_REGFR,r8; \
+ movl r8=XSI_PRECOVER_IFS; \
;; \
ld8 r30=[r8]; \
;; \
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S b/linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S
index 5f9681e49a..d8ebfb994a 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S
@@ -33,7 +33,16 @@ GLOBAL_ENTRY(xen_pal_call_static)
mov loc4=ar.rsc // save RSE configuration
;;
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
+#ifdef CONFIG_XEN
+ mov r9 = r8
+ XEN_HYPER_GET_PSR
+ ;;
+ mov loc3 = r8
+ mov r8 = r9
+ ;;
+#else
mov loc3 = psr
+#endif
mov loc0 = rp
.body
mov r30 = in2
@@ -41,16 +50,16 @@ GLOBAL_ENTRY(xen_pal_call_static)
#ifdef CONFIG_XEN
// this is low priority for paravirtualization, but is called
// from the idle loop so confuses privop counting
- movl r31=XSI_PSR_IC
- ;;
-(p6) st4 [r31]=r0
+ movl r31=XSI_PSR_I_ADDR
;;
-(p7) adds r31=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r31
-(p7) mov r22=1
+ ld8 r31=[r31]
+ mov r22=1
;;
-(p7) ld8 r31=[r31]
+ st1 [r31]=r22
+ ;;
+(p6) movl r31=XSI_PSR_IC
;;
-(p7) st1 [r31]=r22
+(p6) st4.rel [r31]=r0
;;
mov r31 = in3
mov b7 = loc2
diff --git a/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S b/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S
index e761278670..c295cc3431 100644
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S
@@ -38,17 +38,16 @@ END(early_xen_setup)
/* Stub for suspend.
Just force the stacked registers to be written in memory. */
GLOBAL_ENTRY(xencomm_arch_hypercall_suspend)
- mov r15=r32
;;
- alloc r20=ar.pfs,0,0,0,0
+ alloc r20=ar.pfs,0,0,6,0
mov r2=__HYPERVISOR_sched_op
;;
/* We don't want to deal with RSE. */
flushrs
- mov r14=2 // SCHEDOP_shutdown
+ mov r33=r32
+ mov r32=2 // SCHEDOP_shutdown
;;
break 0x1000
;;
- mov ar.pfs=r20
br.ret.sptk.many b0
END(xencomm_arch_hypercall_suspend)
diff --git a/linux-2.6-xen-sparse/drivers/xen/Kconfig b/linux-2.6-xen-sparse/drivers/xen/Kconfig
index 6c4e0caab7..019256771c 100644
--- a/linux-2.6-xen-sparse/drivers/xen/Kconfig
+++ b/linux-2.6-xen-sparse/drivers/xen/Kconfig
@@ -238,14 +238,6 @@ config XEN_COMPAT_030002
endmenu
-config HAVE_ARCH_ALLOC_SKB
- bool
- default y
-
-config HAVE_ARCH_DEV_ALLOC_SKB
- bool
- default y
-
config HAVE_IRQ_IGNORE_UNHANDLED
bool
default y
diff --git a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
index 19be0e6ed3..9ae66fe781 100644
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypercall.h
@@ -39,6 +39,9 @@
#include <asm/xen/xcom_hcall.h>
struct xencomm_handle;
+extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long cmd);
/*
* Assembler stubs for hyper-calls.
@@ -47,115 +50,58 @@ struct xencomm_handle;
#define _hypercall0(type, name) \
({ \
long __res; \
- __asm__ __volatile__ (";;\n" \
- "mov r2=%1\n" \
- "break 0x1000 ;;\n" \
- "mov %0=r8 ;;\n" \
- : "=r" (__res) \
- : "J" (__HYPERVISOR_##name) \
- : "r2","r8", \
- "memory" ); \
+ __res=__hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name); \
(type)__res; \
})
#define _hypercall1(type, name, a1) \
({ \
long __res; \
- __asm__ __volatile__ (";;\n" \
- "mov r14=%2\n" \
- "mov r2=%1\n" \
- "break 0x1000 ;;\n" \
- "mov %0=r8 ;;\n" \
- : "=r" (__res) \
- : "J" (__HYPERVISOR_##name), \
- "rI" ((unsigned long)(a1)) \
- : "r14","r2","r8", \
- "memory" ); \
+ __res = __hypercall((unsigned long)a1, \
+ 0, 0, 0, 0, __HYPERVISOR_##name); \
(type)__res; \
})
#define _hypercall2(type, name, a1, a2) \
({ \
long __res; \
- __asm__ __volatile__ (";;\n" \
- "mov r14=%2\n" \
- "mov r15=%3\n" \
- "mov r2=%1\n" \
- "break 0x1000 ;;\n" \
- "mov %0=r8 ;;\n" \
- : "=r" (__res) \
- : "J" (__HYPERVISOR_##name), \
- "rI" ((unsigned long)(a1)), \
- "rI" ((unsigned long)(a2)) \
- : "r14","r15","r2","r8", \
- "memory" ); \
+ __res = __hypercall((unsigned long)a1, \
+ (unsigned long)a2, \
+ 0, 0, 0, __HYPERVISOR_##name); \
(type)__res; \
})
#define _hypercall3(type, name, a1, a2, a3) \
({ \
long __res; \
- __asm__ __volatile__ (";;\n" \
- "mov r14=%2\n" \
- "mov r15=%3\n" \
- "mov r16=%4\n" \
- "mov r2=%1\n" \
- "break 0x1000 ;;\n" \
- "mov %0=r8 ;;\n" \
- : "=r" (__res) \
- : "J" (__HYPERVISOR_##name), \
- "rI" ((unsigned long)(a1)), \
- "rI" ((unsigned long)(a2)), \
- "rI" ((unsigned long)(a3)) \
- : "r14","r15","r16","r2","r8", \
- "memory" ); \
- (type)__res; \
+ __res = __hypercall((unsigned long)a1, \
+ (unsigned long)a2, \
+ (unsigned long)a3, \
+ 0, 0, __HYPERVISOR_##name); \
+ (type)__res; \
})
#define _hypercall4(type, name, a1, a2, a3, a4) \
({ \
long __res; \
- __asm__ __volatile__ (";;\n" \
- "mov r14=%2\n" \
- "mov r15=%3\n" \
- "mov r16=%4\n" \
- "mov r17=%5\n" \
- "mov r2=%1\n" \
- "break 0x1000 ;;\n" \
- "mov %0=r8 ;;\n" \
- : "=r" (__res) \
- : "J" (__HYPERVISOR_##name), \
- "rI" ((unsigned long)(a1)), \
- "rI" ((unsigned long)(a2)), \
- "rI" ((unsigned long)(a3)), \
- "rI" ((unsigned long)(a4)) \
- : "r14","r15","r16","r2","r8", \
- "r17","memory" ); \
- (type)__res; \
+ __res = __hypercall((unsigned long)a1, \
+ (unsigned long)a2, \
+ (unsigned long)a3, \
+ (unsigned long)a4, \
+ 0, __HYPERVISOR_##name); \
+ (type)__res; \
})
#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
({ \
long __res; \
- __asm__ __volatile__ (";;\n" \
- "mov r14=%2\n" \
- "mov r15=%3\n" \
- "mov r16=%4\n" \
- "mov r17=%5\n" \
- "mov r18=%6\n" \
- "mov r2=%1\n" \
- "break 0x1000 ;;\n" \
- "mov %0=r8 ;;\n" \
- : "=r" (__res) \
- : "J" (__HYPERVISOR_##name), \
- "rI" ((unsigned long)(a1)), \
- "rI" ((unsigned long)(a2)), \
- "rI" ((unsigned long)(a3)), \
- "rI" ((unsigned long)(a4)), \
- "rI" ((unsigned long)(a5)) \
- : "r14","r15","r16","r2","r8", \
- "r17","r18","memory" ); \
- (type)__res; \
+ __res = __hypercall((unsigned long)a1, \
+ (unsigned long)a2, \
+ (unsigned long)a3, \
+ (unsigned long)a4, \
+ (unsigned long)a5, \
+ __HYPERVISOR_##name); \
+ (type)__res; \
})
diff --git a/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h b/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h
index 083884c130..6485dd87f0 100644
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h
@@ -215,7 +215,8 @@ asmlinkage int xprintk(const char *fmt, ...);
#endif /* CONFIG_XEN || CONFIG_VMX_GUEST */
#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
+#define is_initial_xendomain() \
+ (is_running_on_xen() ? xen_start_info->flags & SIF_INITDOMAIN : 0)
#else
#define is_initial_xendomain() 0
#endif
diff --git a/linux-2.6-xen-sparse/include/asm-ia64/maddr.h b/linux-2.6-xen-sparse/include/asm-ia64/maddr.h
index cbdef5a96e..cacad452d8 100644
--- a/linux-2.6-xen-sparse/include/asm-ia64/maddr.h
+++ b/linux-2.6-xen-sparse/include/asm-ia64/maddr.h
@@ -68,7 +68,6 @@ machine_to_phys_for_dma(unsigned long machine)
static inline unsigned long
mfn_to_local_pfn(unsigned long mfn)
{
- extern unsigned long max_mapnr;
unsigned long pfn = mfn_to_pfn_for_dma(mfn);
if (!pfn_valid(pfn))
return INVALID_P2M_ENTRY;
diff --git a/linux-2.6-xen-sparse/include/asm-ia64/page.h b/linux-2.6-xen-sparse/include/asm-ia64/page.h
index fd614c0a94..e8ad9ec4ee 100644
--- a/linux-2.6-xen-sparse/include/asm-ia64/page.h
+++ b/linux-2.6-xen-sparse/include/asm-ia64/page.h
@@ -119,6 +119,7 @@ extern struct page *vmem_map;
#endif
#ifdef CONFIG_FLATMEM
+extern unsigned long max_mapnr;
# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
#elif defined(CONFIG_DISCONTIGMEM)
extern unsigned long min_low_pfn;
diff --git a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
index b3bd8fb7dc..19624f5ba3 100644
--- a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
+++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
@@ -45,12 +45,10 @@
#define XEN_HYPER_GET_PMD break HYPERPRIVOP_GET_PMD
#define XEN_HYPER_GET_EFLAG break HYPERPRIVOP_GET_EFLAG
#define XEN_HYPER_SET_EFLAG break HYPERPRIVOP_SET_EFLAG
-#define XEN_HYPER_RSM_BE break HYPERPRIVOP_RSM_BE
#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
-#define XSI_INCOMPL_REGFR (XSI_BASE + XSI_INCOMPL_REGFR_OFS)
#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
@@ -123,8 +121,6 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
* that we inline it */
#define xen_hyper_ssm_i() \
({ \
- xen_set_virtual_psr_i(0); \
- xen_set_virtual_psr_ic(0); \
XEN_HYPER_SSM_I; \
})
@@ -139,8 +135,12 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
#define xen_ssm_i() \
({ \
int old = xen_get_virtual_psr_i(); \
- xen_set_virtual_psr_i(1); \
- if (!old && xen_get_virtual_pend()) xen_hyper_ssm_i(); \
+ if (!old) { \
+ if (xen_get_virtual_pend()) \
+ xen_hyper_ssm_i(); \
+ else \
+ xen_set_virtual_psr_i(1); \
+ } \
})
#define xen_ia64_intrin_local_irq_restore(x) \
@@ -182,6 +182,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
* be properly handled by Xen, some are frequent enough that we use
* hyperprivops for performance. */
+extern unsigned long xen_get_psr(void);
extern unsigned long xen_get_ivr(void);
extern unsigned long xen_get_tpr(void);
extern void xen_set_itm(unsigned long);
@@ -201,6 +202,11 @@ extern void xen_ptcga(unsigned long addr, unsigned long size);
__u64 ia64_intri_res; \
\
switch(regnum) { \
+ case _IA64_REG_PSR: \
+ ia64_intri_res = (is_running_on_xen()) ? \
+ xen_get_psr() : \
+ __ia64_getreg(regnum); \
+ break; \
case _IA64_REG_CR_IVR: \
ia64_intri_res = (is_running_on_xen()) ? \
xen_get_ivr() : \
diff --git a/linux-2.6-xen-sparse/include/linux/skbuff.h b/linux-2.6-xen-sparse/include/linux/skbuff.h
index 77b9012839..46ad73c420 100644
--- a/linux-2.6-xen-sparse/include/linux/skbuff.h
+++ b/linux-2.6-xen-sparse/include/linux/skbuff.h
@@ -353,8 +353,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size,
- gfp_t priority,
- int fclone);
+ gfp_t priority);
extern void kfree_skbmem(struct sk_buff *skb);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
diff --git a/linux-2.6-xen-sparse/net/core/skbuff.c b/linux-2.6-xen-sparse/net/core/skbuff.c
index 02cfeeb09a..e7bbe91b8f 100644
--- a/linux-2.6-xen-sparse/net/core/skbuff.c
+++ b/linux-2.6-xen-sparse/net/core/skbuff.c
@@ -210,18 +210,14 @@ nodata:
*/
struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
unsigned int size,
- gfp_t gfp_mask,
- int fclone)
+ gfp_t gfp_mask)
{
- kmem_cache_t *cache;
- struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
- cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
-
/* Get the HEAD */
- skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
+ skb = kmem_cache_alloc(skbuff_head_cache,
+ gfp_mask & ~__GFP_DMA);
if (!skb)
goto out;
@@ -238,29 +234,18 @@ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
skb->data = data;
skb->tail = data;
skb->end = data + size;
- /* make sure we initialize shinfo sequentially */
- shinfo = skb_shinfo(skb);
- atomic_set(&shinfo->dataref, 1);
- shinfo->nr_frags = 0;
- shinfo->gso_size = 0;
- shinfo->gso_segs = 0;
- shinfo->gso_type = 0;
- shinfo->ip6_frag_id = 0;
- shinfo->frag_list = NULL;
-
- if (fclone) {
- struct sk_buff *child = skb + 1;
- atomic_t *fclone_ref = (atomic_t *) (child + 1);
-
- skb->fclone = SKB_FCLONE_ORIG;
- atomic_set(fclone_ref, 1);
- child->fclone = SKB_FCLONE_UNAVAILABLE;
- }
+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
+ skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ skb_shinfo(skb)->gso_segs = 0;
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->ip6_frag_id = 0;
+ skb_shinfo(skb)->frag_list = NULL;
out:
return skb;
nodata:
- kmem_cache_free(cache, skb);
+ kmem_cache_free(skbuff_head_cache, skb);
skb = NULL;
goto out;
}
diff --git a/tools/ioemu/hw/cirrus_vga.c b/tools/ioemu/hw/cirrus_vga.c
index 4a5631195c..8467b403a9 100644
--- a/tools/ioemu/hw/cirrus_vga.c
+++ b/tools/ioemu/hw/cirrus_vga.c
@@ -3010,11 +3010,44 @@ static CPUWriteMemoryFunc *cirrus_mmio_write[3] = {
cirrus_mmio_writel,
};
+void cirrus_stop_acc(CirrusVGAState *s)
+{
+ if (s->map_addr){
+ int error;
+ s->map_addr = 0;
+ error = unset_vram_mapping(s->cirrus_lfb_addr,
+ s->cirrus_lfb_end);
+ fprintf(stderr, "cirrus_stop_acc:unset_vram_mapping.\n");
+
+ munmap(s->vram_ptr, VGA_RAM_SIZE);
+ }
+}
+
+void cirrus_restart_acc(CirrusVGAState *s)
+{
+ if (s->cirrus_lfb_addr && s->cirrus_lfb_end) {
+ void *vram_pointer, *old_vram;
+ fprintf(stderr, "cirrus_vga_load:re-enable vga acc.lfb_addr=0x%lx, lfb_end=0x%lx.\n",
+ s->cirrus_lfb_addr, s->cirrus_lfb_end);
+ vram_pointer = set_vram_mapping(s->cirrus_lfb_addr ,s->cirrus_lfb_end);
+ if (!vram_pointer){
+ fprintf(stderr, "cirrus_vga_load:NULL vram_pointer\n");
+ } else {
+ old_vram = vga_update_vram((VGAState *)s, vram_pointer,
+ VGA_RAM_SIZE);
+ qemu_free(old_vram);
+ s->map_addr = s->cirrus_lfb_addr;
+ s->map_end = s->cirrus_lfb_end;
+ }
+ }
+}
+
/* load/save state */
static void cirrus_vga_save(QEMUFile *f, void *opaque)
{
CirrusVGAState *s = opaque;
+ uint8_t vga_acc;
qemu_put_be32s(f, &s->latch);
qemu_put_8s(f, &s->sr_index);
@@ -3049,11 +3082,20 @@ static void cirrus_vga_save(QEMUFile *f, void *opaque)
qemu_put_be32s(f, &s->hw_cursor_y);
/* XXX: we do not save the bitblt state - we assume we do not save
the state when the blitter is active */
+
+ vga_acc = (!!s->map_addr);
+ qemu_put_8s(f, &vga_acc);
+ qemu_put_be64s(f, (uint64_t*)&s->cirrus_lfb_addr);
+ qemu_put_be64s(f, (uint64_t*)&s->cirrus_lfb_end);
+ qemu_put_buffer(f, s->vram_ptr, VGA_RAM_SIZE);
+ if (vga_acc)
+ cirrus_stop_acc(s);
}
static int cirrus_vga_load(QEMUFile *f, void *opaque, int version_id)
{
CirrusVGAState *s = opaque;
+ uint8_t vga_acc = 0;
if (version_id != 1)
return -EINVAL;
@@ -3092,6 +3134,14 @@ static int cirrus_vga_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_be32s(f, &s->hw_cursor_x);
qemu_get_be32s(f, &s->hw_cursor_y);
+ qemu_get_8s(f, &vga_acc);
+ qemu_get_be64s(f, (uint64_t*)&s->cirrus_lfb_addr);
+ qemu_get_be64s(f, (uint64_t*)&s->cirrus_lfb_end);
+ qemu_get_buffer(f, s->vram_ptr, VGA_RAM_SIZE);
+ if (vga_acc){
+ cirrus_restart_acc(s);
+ }
+
/* force refresh */
s->graphic_mode = -1;
cirrus_update_bank_ptr(s, 0);
@@ -3297,6 +3347,8 @@ void pci_cirrus_vga_init(PCIBus *bus, DisplayState *ds, uint8_t *vga_ram_base,
ds, vga_ram_base, vga_ram_offset, vga_ram_size);
cirrus_init_common(s, device_id, 1);
+ register_savevm("cirrus_vga_pci", 0, 1, generic_pci_save, generic_pci_load, d);
+
/* setup memory space */
/* memory #0 LFB */
/* memory #1 memory-mapped I/O */
diff --git a/tools/ioemu/hw/ide.c b/tools/ioemu/hw/ide.c
index 532afbbffd..ef203a2f8a 100644
--- a/tools/ioemu/hw/ide.c
+++ b/tools/ioemu/hw/ide.c
@@ -2512,6 +2512,9 @@ void pci_piix3_ide_init(PCIBus *bus, BlockDriverState **hd_table, int devfn)
pic_set_irq_new, isa_pic, 15);
ide_init_ioport(&d->ide_if[0], 0x1f0, 0x3f6);
ide_init_ioport(&d->ide_if[2], 0x170, 0x376);
+
+ register_savevm("ide_pci", 0, 1, generic_pci_save, generic_pci_load, d);
+
#ifdef DMA_MULTI_THREAD
dma_create_thread();
#endif //DMA_MULTI_THREAD
diff --git a/tools/ioemu/hw/pci.c b/tools/ioemu/hw/pci.c
index 77737c8615..fd2dcc9d93 100644
--- a/tools/ioemu/hw/pci.c
+++ b/tools/ioemu/hw/pci.c
@@ -40,6 +40,8 @@ target_phys_addr_t pci_mem_base;
static int pci_irq_index;
static PCIBus *first_bus;
+static void pci_update_mappings(PCIDevice *d);
+
PCIBus *pci_register_bus(pci_set_irq_fn set_irq, void *pic, int devfn_min)
{
PCIBus *bus;
@@ -71,6 +73,7 @@ int generic_pci_load(QEMUFile* f, void *opaque, int version_id)
return -EINVAL;
qemu_get_buffer(f, s->config, 256);
+ pci_update_mappings(s);
return 0;
}
diff --git a/tools/ioemu/target-i386-dm/helper2.c b/tools/ioemu/target-i386-dm/helper2.c
index ae84b9ed23..dc3394d5b7 100644
--- a/tools/ioemu/target-i386-dm/helper2.c
+++ b/tools/ioemu/target-i386-dm/helper2.c
@@ -546,6 +546,7 @@ int main_loop(void)
{
extern int vm_running;
extern int shutdown_requested;
+ extern int suspend_requested;
CPUState *env = cpu_single_env;
int evtchn_fd = xc_evtchn_fd(xce_handle);
@@ -563,12 +564,24 @@ int main_loop(void)
qemu_system_reset();
reset_requested = 0;
}
+ if (suspend_requested) {
+ fprintf(logfile, "device model received suspend signal!\n");
+ break;
+ }
}
/* Wait up to 10 msec. */
main_loop_wait(10);
}
- destroy_hvm_domain();
+ if (!suspend_requested)
+ destroy_hvm_domain();
+ else {
+ char qemu_file[20];
+ sprintf(qemu_file, "/tmp/xen.qemu-dm.%d", domid);
+ if (qemu_savevm(qemu_file) < 0)
+ fprintf(stderr, "qemu save fail.\n");
+ }
+
return 0;
}
diff --git a/tools/ioemu/vl.c b/tools/ioemu/vl.c
index 573687a08d..f5a79ad7e5 100644
--- a/tools/ioemu/vl.c
+++ b/tools/ioemu/vl.c
@@ -4441,6 +4441,11 @@ int qemu_loadvm(const char *filename)
qemu_fseek(f, cur_pos + record_len, SEEK_SET);
}
fclose(f);
+
+ /* del tmp file */
+ if (unlink(filename) == -1)
+ fprintf(stderr, "delete tmp qemu state file failed.\n");
+
ret = 0;
the_end:
if (saved_vm_running)
@@ -5027,6 +5032,7 @@ typedef struct QEMUResetEntry {
static QEMUResetEntry *first_reset_entry;
int reset_requested;
int shutdown_requested;
+int suspend_requested;
static int powerdown_requested;
void qemu_register_reset(QEMUResetHandler *func, void *opaque)
@@ -5808,6 +5814,14 @@ int set_mm_mapping(int xc_handle, uint32_t domid,
return 0;
}
+void suspend(int sig)
+{
+ fprintf(logfile, "suspend sig handler called with requested=%d!\n", suspend_requested);
+ if (sig != SIGUSR1)
+ fprintf(logfile, "suspend signal dismatch, get sig=%d!\n", sig);
+ suspend_requested = 1;
+}
+
#if defined(__i386__) || defined(__x86_64__)
static struct map_cache *mapcache_entry;
static unsigned long nr_buckets;
@@ -6464,10 +6478,6 @@ int main(int argc, char **argv)
}
#if defined (__ia64__)
- /* ram_size passed from xend has added on GFW memory,
- so we must subtract it here */
- ram_size -= 16 * MEM_M;
-
if (ram_size > MMIO_START)
ram_size += 1 * MEM_G; /* skip 3G-4G MMIO, LEGACY_IO_SPACE etc. */
#endif
@@ -6718,6 +6728,26 @@ int main(int argc, char **argv)
vm_start();
}
}
+
+ /* register signal for the suspend request when save */
+ {
+ struct sigaction act;
+ sigset_t set;
+ act.sa_handler = suspend;
+ act.sa_flags = SA_RESTART;
+ sigemptyset(&act.sa_mask);
+
+ sigaction(SIGUSR1, &act, NULL);
+
+ /* control panel mask some signals when spawn qemu, need unmask here*/
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR1);
+ sigaddset(&set, SIGTERM);
+ if (sigprocmask(SIG_UNBLOCK, &set, NULL) == -1)
+ fprintf(stderr, "unblock signal fail, possible issue for HVM save!\n");
+
+ }
+
main_loop();
quit_timers();
return 0;
diff --git a/tools/libxc/Makefile b/tools/libxc/Makefile
index f92cde836d..8a563d00e0 100644
--- a/tools/libxc/Makefile
+++ b/tools/libxc/Makefile
@@ -27,7 +27,7 @@ GUEST_SRCS-y += xg_private.c
GUEST_SRCS-$(CONFIG_X86) += xc_linux_build.c
GUEST_SRCS-$(CONFIG_IA64) += xc_linux_build.c
GUEST_SRCS-$(CONFIG_MIGRATE) += xc_linux_restore.c xc_linux_save.c
-GUEST_SRCS-$(CONFIG_HVM) += xc_hvm_build.c
+GUEST_SRCS-$(CONFIG_HVM) += xc_hvm_build.c xc_hvm_restore.c xc_hvm_save.c
-include $(XEN_TARGET_ARCH)/Makefile
diff --git a/tools/libxc/ia64/xc_ia64_hvm_build.c b/tools/libxc/ia64/xc_ia64_hvm_build.c
index 3bfa274293..270a6f67a2 100644
--- a/tools/libxc/ia64/xc_ia64_hvm_build.c
+++ b/tools/libxc/ia64/xc_ia64_hvm_build.c
@@ -569,18 +569,13 @@ setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
xen_pfn_t *pfn_list;
shared_iopage_t *sp;
void *ioreq_buffer_page;
- // memsize equal to normal memory size(in configure file) + 16M
- // dom_memsize will pass to xc_ia64_build_hob(), so must be subbed 16M
- unsigned long dom_memsize = ((memsize - 16) << 20);
- unsigned long nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT);
- unsigned long normal_pages = nr_pages - GFW_PAGES;
+ unsigned long dom_memsize = memsize << 20;
+ unsigned long nr_pages = memsize << (20 - PAGE_SHIFT);
unsigned long vcpus;
int rc;
- long i, j;
+ long i;
DECLARE_DOMCTL;
- // ROM size for guest firmware, ioreq page and xenstore page
- nr_pages += 3;
if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
PERROR("Guest firmware size is incorrect [%ld]?", image_size);
@@ -598,20 +593,20 @@ setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
pfn_list[i] = i;
// If normal memory > 3G. Reserve 3G ~ 4G for MMIO, GFW and others.
- for (j = (MMIO_START >> PAGE_SHIFT); j < (dom_memsize >> PAGE_SHIFT); j++)
- pfn_list[j] += ((1 * MEM_G) >> PAGE_SHIFT);
+ for (i = (MMIO_START >> PAGE_SHIFT); i < (dom_memsize >> PAGE_SHIFT); i++)
+ pfn_list[i] += ((1 * MEM_G) >> PAGE_SHIFT);
// Allocate memory for VTI guest, up to VGA hole from 0xA0000-0xC0000.
rc = xc_domain_memory_populate_physmap(xc_handle, dom,
- (normal_pages > VGA_START_PAGE) ?
- VGA_START_PAGE : normal_pages,
+ (nr_pages > VGA_START_PAGE) ?
+ VGA_START_PAGE : nr_pages,
0, 0, &pfn_list[0]);
// We're not likely to attempt to create a domain with less than
// 640k of memory, but test for completeness
if (rc == 0 && nr_pages > VGA_END_PAGE)
rc = xc_domain_memory_populate_physmap(xc_handle, dom,
- normal_pages - VGA_END_PAGE,
+ nr_pages - VGA_END_PAGE,
0, 0, &pfn_list[VGA_END_PAGE]);
if (rc != 0) {
PERROR("Could not allocate normal memory for Vti guest.\n");
@@ -621,24 +616,22 @@ setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
// We allocate additional pfn for GFW and other three pages, so
// the pfn_list is not contiguous. Due to this we must support
// old interface xc_ia64_get_pfn_list().
- // Here i = (dom_memsize >> PAGE_SHIFT)
- for (j = 0; i < nr_pages - 3; i++, j++)
- pfn_list[i] = (GFW_START >> PAGE_SHIFT) + j;
+ for (i = 0; i < GFW_PAGES; i++)
+ pfn_list[i] = (GFW_START >> PAGE_SHIFT) + i;
rc = xc_domain_memory_populate_physmap(xc_handle, dom, GFW_PAGES,
- 0, 0, &pfn_list[normal_pages]);
+ 0, 0, &pfn_list[0]);
if (rc != 0) {
PERROR("Could not allocate GFW memory for Vti guest.\n");
goto error_out;
}
- // Here i = (dom_memsize >> PAGE_SHIFT) + GFW_PAGES
- pfn_list[i] = IO_PAGE_START >> PAGE_SHIFT;
- pfn_list[i+1] = STORE_PAGE_START >> PAGE_SHIFT;
- pfn_list[i+2] = BUFFER_IO_PAGE_START >> PAGE_SHIFT;
+ pfn_list[0] = IO_PAGE_START >> PAGE_SHIFT;
+ pfn_list[1] = STORE_PAGE_START >> PAGE_SHIFT;
+ pfn_list[2] = BUFFER_IO_PAGE_START >> PAGE_SHIFT;
rc = xc_domain_memory_populate_physmap(xc_handle, dom, 3,
- 0, 0, &pfn_list[nr_pages - 3]);
+ 0, 0, &pfn_list[0]);
if (rc != 0) {
PERROR("Could not allocate IO page or store page or buffer io page.\n");
goto error_out;
@@ -675,13 +668,12 @@ setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
goto error_out;
}
- xc_set_hvm_param(xc_handle, dom,
- HVM_PARAM_STORE_PFN, pfn_list[nr_pages - 2]);
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, pfn_list[1]);
// Retrieve special pages like io, xenstore, etc.
sp = (shared_iopage_t *)xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
PROT_READ | PROT_WRITE,
- pfn_list[nr_pages - 3]);
+ pfn_list[0]);
if (sp == 0)
goto error_out;
@@ -689,7 +681,7 @@ setup_guest(int xc_handle, uint32_t dom, unsigned long memsize,
munmap(sp, PAGE_SIZE);
ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
PROT_READ | PROT_WRITE,
- pfn_list[nr_pages - 1]);
+ pfn_list[2]);
memset(ioreq_buffer_page,0,PAGE_SIZE);
munmap(ioreq_buffer_page, PAGE_SIZE);
free(pfn_list);
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 82aef99ccc..3c42aba129 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -235,6 +235,50 @@ int xc_domain_getinfolist(int xc_handle,
return ret;
}
+/* get info from hvm guest for save */
+int xc_domain_hvm_getcontext(int xc_handle,
+ uint32_t domid,
+ hvm_domain_context_t *hvm_ctxt)
+{
+ int rc;
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_gethvmcontext;
+ domctl.domain = (domid_t)domid;
+ set_xen_guest_handle(domctl.u.hvmcontext.ctxt, hvm_ctxt);
+
+ if ( (rc = mlock(hvm_ctxt, sizeof(*hvm_ctxt))) != 0 )
+ return rc;
+
+ rc = do_domctl(xc_handle, &domctl);
+
+ safe_munlock(hvm_ctxt, sizeof(*hvm_ctxt));
+
+ return rc;
+}
+
+/* set info to hvm guest for restore */
+int xc_domain_hvm_setcontext(int xc_handle,
+ uint32_t domid,
+ hvm_domain_context_t *hvm_ctxt)
+{
+ int rc;
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_sethvmcontext;
+ domctl.domain = domid;
+ set_xen_guest_handle(domctl.u.hvmcontext.ctxt, hvm_ctxt);
+
+ if ( (rc = mlock(hvm_ctxt, sizeof(*hvm_ctxt))) != 0 )
+ return rc;
+
+ rc = do_domctl(xc_handle, &domctl);
+
+ safe_munlock(hvm_ctxt, sizeof(*hvm_ctxt));
+
+ return rc;
+}
+
int xc_vcpu_getcontext(int xc_handle,
uint32_t domid,
uint32_t vcpu,
diff --git a/tools/libxc/xc_hvm_restore.c b/tools/libxc/xc_hvm_restore.c
new file mode 100644
index 0000000000..7092d5cea8
--- /dev/null
+++ b/tools/libxc/xc_hvm_restore.c
@@ -0,0 +1,360 @@
+/******************************************************************************
+ * xc_hvm_restore.c
+ *
+ * Restore the state of a HVM guest.
+ *
+ * Copyright (c) 2003, K A Fraser.
+ * Copyright (c) 2006 Intel Corperation
+ * rewriten for hvm guest by Zhai Edwin <edwin.zhai@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "xg_private.h"
+#include "xg_save_restore.h"
+
+#include <xen/hvm/ioreq.h>
+#include <xen/hvm/params.h>
+#include <xen/hvm/e820.h>
+
+/* max mfn of the whole machine */
+static unsigned long max_mfn;
+
+/* virtual starting address of the hypervisor */
+static unsigned long hvirt_start;
+
+/* #levels of page tables used by the currrent guest */
+static unsigned int pt_levels;
+
+/* total number of pages used by the current guest */
+static unsigned long max_pfn;
+
+/* A table mapping each PFN to its new MFN. */
+static xen_pfn_t *p2m = NULL;
+
+static ssize_t
+read_exact(int fd, void *buf, size_t count)
+{
+ int r = 0, s;
+ unsigned char *b = buf;
+
+ while (r < count) {
+ s = read(fd, &b[r], count - r);
+ if ((s == -1) && (errno == EINTR))
+ continue;
+ if (s <= 0) {
+ break;
+ }
+ r += s;
+ }
+
+ return (r == count) ? 1 : 0;
+}
+
+int xc_hvm_restore(int xc_handle, int io_fd,
+ uint32_t dom, unsigned long nr_pfns,
+ unsigned int store_evtchn, unsigned long *store_mfn,
+ unsigned int console_evtchn, unsigned long *console_mfn,
+ unsigned int pae, unsigned int apic)
+{
+ DECLARE_DOMCTL;
+
+ /* The new domain's shared-info frame number. */
+ unsigned long shared_info_frame;
+
+ /* A copy of the CPU context of the guest. */
+ vcpu_guest_context_t ctxt;
+
+ char *region_base;
+
+ unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
+
+ xc_dominfo_t info;
+ unsigned int rc = 1, n, i;
+ uint32_t rec_len, nr_vcpus;
+ hvm_domain_context_t hvm_ctxt;
+ unsigned long long v_end, memsize;
+ unsigned long shared_page_nr;
+
+ unsigned long mfn, pfn;
+ unsigned int prev_pc, this_pc;
+ int verify = 0;
+
+ /* Types of the pfns in the current region */
+ unsigned long region_pfn_type[MAX_BATCH_SIZE];
+
+ /* hvm guest mem size (Mb) */
+ memsize = (unsigned long long)*store_mfn;
+ v_end = memsize << 20;
+
+ DPRINTF("xc_hvm_restore:dom=%d, nr_pfns=0x%lx, store_evtchn=%d, *store_mfn=%ld, console_evtchn=%d, *console_mfn=%ld, pae=%u, apic=%u.\n",
+ dom, nr_pfns, store_evtchn, *store_mfn, console_evtchn, *console_mfn, pae, apic);
+
+ max_pfn = nr_pfns;
+
+ if(!get_platform_info(xc_handle, dom,
+ &max_mfn, &hvirt_start, &pt_levels)) {
+ ERROR("Unable to get platform info.");
+ return 1;
+ }
+
+ DPRINTF("xc_hvm_restore start: max_pfn = %lx, max_mfn = %lx, hvirt_start=%lx, pt_levels=%d\n",
+ max_pfn,
+ max_mfn,
+ hvirt_start,
+ pt_levels);
+
+ if (mlock(&ctxt, sizeof(ctxt))) {
+ /* needed for build dom0 op, but might as well do early */
+ ERROR("Unable to mlock ctxt");
+ return 1;
+ }
+
+
+ p2m = malloc(max_pfn * sizeof(xen_pfn_t));
+
+ if (p2m == NULL) {
+ ERROR("memory alloc failed");
+ errno = ENOMEM;
+ goto out;
+ }
+
+ /* Get the domain's shared-info frame. */
+ domctl.cmd = XEN_DOMCTL_getdomaininfo;
+ domctl.domain = (domid_t)dom;
+ if (xc_domctl(xc_handle, &domctl) < 0) {
+ ERROR("Could not get information on new domain");
+ goto out;
+ }
+ shared_info_frame = domctl.u.getdomaininfo.shared_info_frame;
+
+ if(xc_domain_setmaxmem(xc_handle, dom, PFN_TO_KB(max_pfn)) != 0) {
+ errno = ENOMEM;
+ goto out;
+ }
+
+ for ( i = 0; i < max_pfn; i++ )
+ p2m[i] = i;
+ for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < max_pfn; i++ )
+ p2m[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
+
+ /* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000. */
+ rc = xc_domain_memory_populate_physmap(
+ xc_handle, dom, (max_pfn > 0xa0) ? 0xa0 : max_pfn,
+ 0, 0, &p2m[0x00]);
+ if ( (rc == 0) && (max_pfn > 0xc0) )
+ rc = xc_domain_memory_populate_physmap(
+ xc_handle, dom, max_pfn - 0xc0, 0, 0, &p2m[0xc0]);
+ if ( rc != 0 )
+ {
+ PERROR("Could not allocate memory for HVM guest.\n");
+ goto out;
+ }
+
+
+ /**********XXXXXXXXXXXXXXXX******************/
+ if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
+ ERROR("Could not get domain info");
+ return 1;
+ }
+
+ domctl.cmd = XEN_DOMCTL_getdomaininfo;
+ domctl.domain = (domid_t)dom;
+ if (xc_domctl(xc_handle, &domctl) < 0) {
+ ERROR("Could not get information on new domain");
+ goto out;
+ }
+
+ for ( i = 0; i < max_pfn; i++)
+ p2m[i] = i;
+
+ prev_pc = 0;
+
+ n = 0;
+ while (1) {
+
+ int j;
+
+ this_pc = (n * 100) / max_pfn;
+ if ( (this_pc - prev_pc) >= 5 )
+ {
+ PPRINTF("\b\b\b\b%3d%%", this_pc);
+ prev_pc = this_pc;
+ }
+
+ if (!read_exact(io_fd, &j, sizeof(int))) {
+ ERROR("HVM restore Error when reading batch size");
+ goto out;
+ }
+
+ PPRINTF("batch %d\n",j);
+
+ if (j == -1) {
+ verify = 1;
+ DPRINTF("Entering page verify mode\n");
+ continue;
+ }
+
+ if (j == 0)
+ break; /* our work here is done */
+
+ if (j > MAX_BATCH_SIZE) {
+ ERROR("Max batch size exceeded. Giving up.");
+ goto out;
+ }
+
+ if (!read_exact(io_fd, region_pfn_type, j*sizeof(unsigned long))) {
+ ERROR("Error when reading region pfn types");
+ goto out;
+ }
+
+ region_base = xc_map_foreign_batch(
+ xc_handle, dom, PROT_WRITE, region_pfn_type, j);
+
+ for ( i = 0; i < j; i++ )
+ {
+ void *page;
+
+ pfn = region_pfn_type[i];
+ if ( pfn > max_pfn )
+ {
+ ERROR("pfn out of range");
+ goto out;
+ }
+
+ if ( pfn >= 0xa0 && pfn < 0xc0) {
+ ERROR("hvm restore:pfn in vga hole");
+ goto out;
+ }
+
+
+ mfn = p2m[pfn];
+
+ /* In verify mode, we use a copy; otherwise we work in place */
+ page = verify ? (void *)buf : (region_base + i*PAGE_SIZE);
+
+ if (!read_exact(io_fd, page, PAGE_SIZE)) {
+ ERROR("Error when reading page (%x)", i);
+ goto out;
+ }
+
+ if (verify) {
+
+ int res = memcmp(buf, (region_base + i*PAGE_SIZE), PAGE_SIZE);
+
+ if (res) {
+
+ int v;
+
+ DPRINTF("************** pfn=%lx mfn=%lx gotcs=%08lx "
+ "actualcs=%08lx\n", pfn, p2m[pfn],
+ csum_page(region_base + i*PAGE_SIZE),
+ csum_page(buf));
+
+ for (v = 0; v < 4; v++) {
+
+ unsigned long *p = (unsigned long *)
+ (region_base + i*PAGE_SIZE);
+ if (buf[v] != p[v])
+ DPRINTF(" %d: %08lx %08lx\n", v, buf[v], p[v]);
+ }
+ }
+ }
+
+ } /* end of 'batch' for loop */
+ munmap(region_base, j*PAGE_SIZE);
+ n+= j; /* crude stats */
+
+ }/*while 1*/
+
+/* xc_set_hvm_param(xc_handle, dom, HVM_PARAM_APIC_ENABLED, apic);*/
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_PAE_ENABLED, pae);
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
+
+ if ( v_end > HVM_BELOW_4G_RAM_END )
+ shared_page_nr = (HVM_BELOW_4G_RAM_END >> PAGE_SHIFT) - 1;
+ else
+ shared_page_nr = (v_end >> PAGE_SHIFT) - 1;
+
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, shared_page_nr-1);
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_BUFIOREQ_PFN, shared_page_nr-2);
+ xc_set_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN, shared_page_nr);
+
+ /* caculate the store_mfn , wrong val cause hang when introduceDomain */
+ *store_mfn = (v_end >> PAGE_SHIFT) - 2;
+ DPRINTF("hvm restore:calculate new store_mfn=0x%lx,v_end=0x%llx..\n", *store_mfn, v_end);
+
+ /* restore hvm context including pic/pit/shpage */
+ if (!read_exact(io_fd, &rec_len, sizeof(uint32_t))) {
+ ERROR("error read hvm context size!\n");
+ goto out;
+ }
+ if (rec_len != sizeof(hvm_ctxt)) {
+ ERROR("hvm context size dismatch!\n");
+ goto out;
+ }
+
+ if (!read_exact(io_fd, &hvm_ctxt, sizeof(hvm_ctxt))) {
+ ERROR("error read hvm context!\n");
+ goto out;
+ }
+
+ if (( rc = xc_domain_hvm_setcontext(xc_handle, dom, &hvm_ctxt))) {
+ ERROR("error set hvm context!\n");
+ goto out;
+ }
+
+ if (!read_exact(io_fd, &nr_vcpus, sizeof(uint32_t))) {
+ ERROR("error read nr vcpu !\n");
+ goto out;
+ }
+ DPRINTF("hvm restore:get nr_vcpus=%d.\n", nr_vcpus);
+
+ for (i =0; i < nr_vcpus; i++) {
+ if (!read_exact(io_fd, &rec_len, sizeof(uint32_t))) {
+ ERROR("error read vcpu context size!\n");
+ goto out;
+ }
+ if (rec_len != sizeof(ctxt)) {
+ ERROR("vcpu context size dismatch!\n");
+ goto out;
+ }
+
+ if (!read_exact(io_fd, &(ctxt), sizeof(ctxt))) {
+ ERROR("error read vcpu context.\n");
+ goto out;
+ }
+
+ if ( (rc = xc_vcpu_setcontext(xc_handle, dom, i, &ctxt)) ) {
+ ERROR("Could not set vcpu context, rc=%d", rc);
+ goto out;
+ }
+ }
+
+ rc = 0;
+ goto out;
+
+ out:
+ if ( (rc != 0) && (dom != 0) )
+ xc_domain_destroy(xc_handle, dom);
+ free(p2m);
+
+ DPRINTF("Restore exit with rc=%d\n", rc);
+
+ return rc;
+}
diff --git a/tools/libxc/xc_hvm_save.c b/tools/libxc/xc_hvm_save.c
new file mode 100644
index 0000000000..aa6ad5a930
--- /dev/null
+++ b/tools/libxc/xc_hvm_save.c
@@ -0,0 +1,727 @@
+/******************************************************************************
+ * xc_hvm_save.c
+ *
+ * Save the state of a running HVM guest.
+ *
+ * Copyright (c) 2003, K A Fraser.
+ * Copyright (c) 2006 Intel Corperation
+ * rewriten for hvm guest by Zhai Edwin <edwin.zhai@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <inttypes.h>
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include "xc_private.h"
+#include "xg_private.h"
+#include "xg_save_restore.h"
+
+/*
+** Default values for important tuning parameters. Can override by passing
+** non-zero replacement values to xc_hvm_save().
+**
+** XXX SMH: should consider if want to be able to override MAX_MBIT_RATE too.
+**
+*/
+#define DEF_MAX_ITERS 29 /* limit us to 30 times round loop */
+#define DEF_MAX_FACTOR 3 /* never send more than 3x nr_pfns */
+
+/* max mfn of the whole machine */
+static unsigned long max_mfn;
+
+/* virtual starting address of the hypervisor */
+static unsigned long hvirt_start;
+
+/* #levels of page tables used by the currrent guest */
+static unsigned int pt_levels;
+
+/* total number of pages used by the current guest */
+static unsigned long max_pfn;
+
+/*
+** During (live) save/migrate, we maintain a number of bitmaps to track
+** which pages we have to send, to fixup, and to skip.
+*/
+
+#define BITS_PER_LONG (sizeof(unsigned long) * 8)
+#define BITMAP_SIZE ((max_pfn + BITS_PER_LONG - 1) / 8)
+
+#define BITMAP_ENTRY(_nr,_bmap) \
+ ((unsigned long *)(_bmap))[(_nr)/BITS_PER_LONG]
+
+#define BITMAP_SHIFT(_nr) ((_nr) % BITS_PER_LONG)
+
+static inline int test_bit (int nr, volatile void * addr)
+{
+ return (BITMAP_ENTRY(nr, addr) >> BITMAP_SHIFT(nr)) & 1;
+}
+
+static inline void clear_bit (int nr, volatile void * addr)
+{
+ BITMAP_ENTRY(nr, addr) &= ~(1UL << BITMAP_SHIFT(nr));
+}
+
+static inline int permute( int i, int nr, int order_nr )
+{
+ /* Need a simple permutation function so that we scan pages in a
+ pseudo random order, enabling us to get a better estimate of
+ the domain's page dirtying rate as we go (there are often
+ contiguous ranges of pfns that have similar behaviour, and we
+ want to mix them up. */
+
+ /* e.g. nr->oder 15->4 16->4 17->5 */
+ /* 512MB domain, 128k pages, order 17 */
+
+ /*
+ QPONMLKJIHGFEDCBA
+ QPONMLKJIH
+ GFEDCBA
+ */
+
+ /*
+ QPONMLKJIHGFEDCBA
+ EDCBA
+ QPONM
+ LKJIHGF
+ */
+
+ do { i = ((i>>(order_nr-10)) | ( i<<10 ) ) & ((1<<order_nr)-1); }
+ while ( i >= nr ); /* this won't ever loop if nr is a power of 2 */
+
+ return i;
+}
+
+static uint64_t tv_to_us(struct timeval *new)
+{
+ return (new->tv_sec * 1000000) + new->tv_usec;
+}
+
+static uint64_t llgettimeofday(void)
+{
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return tv_to_us(&now);
+}
+
+static uint64_t tv_delta(struct timeval *new, struct timeval *old)
+{
+ return ((new->tv_sec - old->tv_sec)*1000000 ) +
+ (new->tv_usec - old->tv_usec);
+}
+
+
+#define RATE_IS_MAX() (0)
+#define ratewrite(_io_fd, _buf, _n) write((_io_fd), (_buf), (_n))
+#define initialize_mbit_rate()
+
+static inline ssize_t write_exact(int fd, void *buf, size_t count)
+{
+ if(write(fd, buf, count) != count)
+ return 0;
+ return 1;
+}
+
+static int print_stats(int xc_handle, uint32_t domid, int pages_sent,
+ xc_shadow_op_stats_t *stats, int print)
+{
+ static struct timeval wall_last;
+ static long long d0_cpu_last;
+ static long long d1_cpu_last;
+
+ struct timeval wall_now;
+ long long wall_delta;
+ long long d0_cpu_now, d0_cpu_delta;
+ long long d1_cpu_now, d1_cpu_delta;
+
+ gettimeofday(&wall_now, NULL);
+
+ d0_cpu_now = xc_domain_get_cpu_usage(xc_handle, 0, /* FIXME */ 0)/1000;
+ d1_cpu_now = xc_domain_get_cpu_usage(xc_handle, domid, /* FIXME */ 0)/1000;
+
+ if ( (d0_cpu_now == -1) || (d1_cpu_now == -1) )
+ DPRINTF("ARRHHH!!\n");
+
+ wall_delta = tv_delta(&wall_now,&wall_last)/1000;
+
+ if (wall_delta == 0) wall_delta = 1;
+
+ d0_cpu_delta = (d0_cpu_now - d0_cpu_last)/1000;
+ d1_cpu_delta = (d1_cpu_now - d1_cpu_last)/1000;
+
+ if (print)
+ DPRINTF(
+ "delta %lldms, dom0 %d%%, target %d%%, sent %dMb/s, "
+ "dirtied %dMb/s %" PRId32 " pages\n",
+ wall_delta,
+ (int)((d0_cpu_delta*100)/wall_delta),
+ (int)((d1_cpu_delta*100)/wall_delta),
+ (int)((pages_sent*PAGE_SIZE)/(wall_delta*(1000/8))),
+ (int)((stats->dirty_count*PAGE_SIZE)/(wall_delta*(1000/8))),
+ stats->dirty_count);
+
+ d0_cpu_last = d0_cpu_now;
+ d1_cpu_last = d1_cpu_now;
+ wall_last = wall_now;
+
+ return 0;
+}
+
+static int analysis_phase(int xc_handle, uint32_t domid, int max_pfn,
+ unsigned long *arr, int runs)
+{
+ long long start, now;
+ xc_shadow_op_stats_t stats;
+ int j;
+
+ start = llgettimeofday();
+
+ for (j = 0; j < runs; j++) {
+ int i;
+
+ xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
+ arr, max_pfn, NULL, 0, NULL);
+ DPRINTF("#Flush\n");
+ for ( i = 0; i < 40; i++ ) {
+ usleep(50000);
+ now = llgettimeofday();
+ xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_PEEK,
+ NULL, 0, NULL, 0, &stats);
+
+ DPRINTF("now= %lld faults= %"PRId32" dirty= %"PRId32"\n",
+ ((now-start)+500)/1000,
+ stats.fault_count, stats.dirty_count);
+ }
+ }
+
+ return -1;
+}
+
+static int suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd,
+ int dom, xc_dominfo_t *info,
+ vcpu_guest_context_t *ctxt)
+{
+ int i = 0;
+
+ if (!(*suspend)(dom)) {
+ ERROR("Suspend request failed");
+ return -1;
+ }
+
+ retry:
+
+ if (xc_domain_getinfo(xc_handle, dom, 1, info) != 1) {
+ ERROR("Could not get domain info");
+ return -1;
+ }
+
+ if ( xc_vcpu_getcontext(xc_handle, dom, 0 /* XXX */, ctxt))
+ ERROR("Could not get vcpu context");
+
+
+ if (info->shutdown && info->shutdown_reason == SHUTDOWN_suspend)
+ return 0; // success
+
+ if (info->paused) {
+ // try unpausing domain, wait, and retest
+ xc_domain_unpause( xc_handle, dom );
+
+ ERROR("Domain was paused. Wait and re-test.");
+ usleep(10000); // 10ms
+
+ goto retry;
+ }
+
+
+ if( ++i < 100 ) {
+ ERROR("Retry suspend domain.");
+ usleep(10000); // 10ms
+ goto retry;
+ }
+
+ ERROR("Unable to suspend domain.");
+
+ return -1;
+}
+
+int xc_hvm_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
+ uint32_t max_factor, uint32_t flags, int (*suspend)(int))
+{
+ xc_dominfo_t info;
+
+ int rc = 1, i, last_iter, iter = 0;
+ int live = (flags & XCFLAGS_LIVE);
+ int debug = (flags & XCFLAGS_DEBUG);
+ int sent_last_iter, skip_this_iter;
+
+ /* The new domain's shared-info frame number. */
+ unsigned long shared_info_frame;
+
+ /* A copy of the CPU context of the guest. */
+ vcpu_guest_context_t ctxt;
+
+ /* A table containg the type of each PFN (/not/ MFN!). */
+ unsigned long *pfn_type = NULL;
+ unsigned long *pfn_batch = NULL;
+
+ /* A copy of hvm domain context */
+ hvm_domain_context_t hvm_ctxt;
+
+ /* Live mapping of shared info structure */
+ shared_info_t *live_shinfo = NULL;
+
+ /* base of the region in which domain memory is mapped */
+ unsigned char *region_base = NULL;
+
+ uint32_t nr_pfns, rec_size, nr_vcpus;
+ unsigned long *page_array = NULL;
+
+ /* power of 2 order of max_pfn */
+ int order_nr;
+
+ /* bitmap of pages:
+ - that should be sent this iteration (unless later marked as skip);
+ - to skip this iteration because already dirty; */
+ unsigned long *to_send = NULL, *to_skip = NULL;
+
+ xc_shadow_op_stats_t stats;
+
+ unsigned long total_sent = 0;
+
+ DPRINTF("xc_hvm_save:dom=%d, max_iters=%d, max_factor=%d, flags=0x%x, live=%d, debug=%d.\n",
+ dom, max_iters, max_factor, flags,
+ live, debug);
+
+ /* If no explicit control parameters given, use defaults */
+ if(!max_iters)
+ max_iters = DEF_MAX_ITERS;
+ if(!max_factor)
+ max_factor = DEF_MAX_FACTOR;
+
+ initialize_mbit_rate();
+
+ if(!get_platform_info(xc_handle, dom,
+ &max_mfn, &hvirt_start, &pt_levels)) {
+ ERROR("HVM:Unable to get platform info.");
+ return 1;
+ }
+
+ if (xc_domain_getinfo(xc_handle, dom, 1, &info) != 1) {
+ ERROR("HVM:Could not get domain info");
+ return 1;
+ }
+ nr_vcpus = info.nr_online_vcpus;
+
+ if (mlock(&ctxt, sizeof(ctxt))) {
+ ERROR("HVM:Unable to mlock ctxt");
+ return 1;
+ }
+
+ /* Only have to worry about vcpu 0 even for SMP */
+ if (xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt)) {
+ ERROR("HVM:Could not get vcpu context");
+ goto out;
+ }
+ shared_info_frame = info.shared_info_frame;
+
+ /* A cheesy test to see whether the domain contains valid state. */
+ if (ctxt.ctrlreg[3] == 0)
+ {
+ ERROR("Domain is not in a valid HVM guest state");
+ goto out;
+ }
+
+ /* cheesy sanity check */
+ if ((info.max_memkb >> (PAGE_SHIFT - 10)) > max_mfn) {
+ ERROR("Invalid HVM state record -- pfn count out of range: %lu",
+ (info.max_memkb >> (PAGE_SHIFT - 10)));
+ goto out;
+ }
+
+ /* Map the shared info frame */
+ if(!(live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ, shared_info_frame))) {
+ ERROR("HVM:Couldn't map live_shinfo");
+ goto out;
+ }
+
+ max_pfn = live_shinfo->arch.max_pfn;
+
+ DPRINTF("saved hvm domain info:max_memkb=0x%lx, max_mfn=0x%lx, nr_pages=0x%lx\n", info.max_memkb, max_mfn, info.nr_pages);
+
+ /* nr_pfns: total pages excluding vga acc mem
+ * max_pfn: nr_pfns + 0x20 vga hole(0xa0~0xc0)
+ * getdomaininfo.tot_pages: all the allocated pages for this domain
+ */
+ if (live) {
+ ERROR("hvm domain doesn't support live migration now.\n");
+ goto out;
+
+ if (xc_shadow_control(xc_handle, dom,
+ XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY,
+ NULL, 0, NULL, 0, NULL) < 0) {
+ ERROR("Couldn't enable shadow mode");
+ goto out;
+ }
+
+ /* excludes vga acc mem */
+ nr_pfns = info.nr_pages - 0x800;
+
+ last_iter = 0;
+ DPRINTF("hvm domain live migration debug start: logdirty enable.\n");
+ } else {
+ /* This is a non-live suspend. Issue the call back to get the
+ domain suspended */
+
+ last_iter = 1;
+
+ /* suspend hvm domain */
+ if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt)) {
+ ERROR("HVM Domain appears not to have suspended");
+ goto out;
+ }
+ nr_pfns = info.nr_pages;
+ DPRINTF("after suspend hvm domain nr_pages=0x%x.\n", nr_pfns);
+ }
+
+ DPRINTF("after 1st handle hvm domain nr_pfns=0x%x, nr_pages=0x%lx, max_memkb=0x%lx, live=%d.\n",
+ nr_pfns,
+ info.nr_pages,
+ info.max_memkb,
+ live);
+
+ nr_pfns = info.nr_pages;
+
+ /*XXX: caculate the VGA hole*/
+ max_pfn = nr_pfns + 0x20;
+
+ skip_this_iter = 0;/*XXX*/
+ /* pretend we sent all the pages last iteration */
+ sent_last_iter = max_pfn;
+
+ /* calculate the power of 2 order of max_pfn, e.g.
+ 15->4 16->4 17->5 */
+ for (i = max_pfn-1, order_nr = 0; i ; i >>= 1, order_nr++)
+ continue;
+
+ /* Setup to_send / to_fix and to_skip bitmaps */
+ to_send = malloc(BITMAP_SIZE);
+ to_skip = malloc(BITMAP_SIZE);
+
+ if (!to_send ||!to_skip) {
+ ERROR("Couldn't allocate to_send array");
+ goto out;
+ }
+
+ memset(to_send, 0xff, BITMAP_SIZE);
+
+ if (lock_pages(to_send, BITMAP_SIZE)) {
+ ERROR("Unable to lock to_send");
+ return 1;
+ }
+
+ /* (to fix is local only) */
+ if (lock_pages(to_skip, BITMAP_SIZE)) {
+ ERROR("Unable to lock to_skip");
+ return 1;
+ }
+
+ analysis_phase(xc_handle, dom, max_pfn, to_skip, 0);
+
+ /* get all the HVM domain pfns */
+ if ( (page_array = (unsigned long *) malloc (sizeof(unsigned long) * max_pfn)) == NULL) {
+ ERROR("HVM:malloc fail!\n");
+ goto out;
+ }
+
+ for ( i = 0; i < max_pfn; i++)
+ page_array[i] = i;
+
+
+ /* We want zeroed memory so use calloc rather than malloc. */
+ pfn_type = calloc(MAX_BATCH_SIZE, sizeof(*pfn_type));
+ pfn_batch = calloc(MAX_BATCH_SIZE, sizeof(*pfn_batch));
+
+ if ((pfn_type == NULL) || (pfn_batch == NULL)) {
+ ERROR("failed to alloc memory for pfn_type and/or pfn_batch arrays");
+ errno = ENOMEM;
+ goto out;
+ }
+
+ if (lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) {
+ ERROR("Unable to lock");
+ goto out;
+ }
+
+ /* Start writing out the saved-domain record. */
+ if (!write_exact(io_fd, &max_pfn, sizeof(unsigned long))) {
+ ERROR("write: max_pfn");
+ goto out;
+ }
+
+ while(1) {
+
+ unsigned int prev_pc, sent_this_iter, N, batch;
+
+ iter++;
+ sent_this_iter = 0;
+ skip_this_iter = 0;
+ prev_pc = 0;
+ N=0;
+
+ DPRINTF("Saving HVM domain memory pages: iter %d 0%%", iter);
+
+ while( N < max_pfn ){
+
+ unsigned int this_pc = (N * 100) / max_pfn;
+
+ if ((this_pc - prev_pc) >= 5) {
+ DPRINTF("\b\b\b\b%3d%%", this_pc);
+ prev_pc = this_pc;
+ }
+
+ /* slightly wasteful to peek the whole array evey time,
+ but this is fast enough for the moment. */
+ if (!last_iter && xc_shadow_control(
+ xc_handle, dom, XEN_DOMCTL_SHADOW_OP_PEEK,
+ to_skip, max_pfn, NULL, 0, NULL) != max_pfn) {
+ ERROR("Error peeking HVM shadow bitmap");
+ goto out;
+ }
+
+
+ /* load pfn_type[] with the mfn of all the pages we're doing in
+ this batch. */
+ for (batch = 0; batch < MAX_BATCH_SIZE && N < max_pfn ; N++) {
+
+ int n = permute(N, max_pfn, order_nr);
+
+ if (debug) {
+ DPRINTF("%d pfn= %08lx mfn= %08lx %d \n",
+ iter, (unsigned long)n, page_array[n],
+ test_bit(n, to_send));
+ }
+
+ if (!last_iter && test_bit(n, to_send)&& test_bit(n, to_skip))
+ skip_this_iter++; /* stats keeping */
+
+ if (!((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
+ (test_bit(n, to_send) && last_iter)))
+ continue;
+
+ if (n >= 0xa0 && n < 0xc0) {
+/* DPRINTF("get a vga hole pfn= %x.\n", n);*/
+ continue;
+ }
+ /*
+ ** we get here if:
+ ** 1. page is marked to_send & hasn't already been re-dirtied
+ ** 2. (ignore to_skip in last iteration)
+ */
+
+ pfn_batch[batch] = n;
+ pfn_type[batch] = page_array[n];
+
+ batch++;
+ }
+
+ if (batch == 0)
+ goto skip; /* vanishingly unlikely... */
+
+ /* map_foreign use pfns now !*/
+ if ((region_base = xc_map_foreign_batch(
+ xc_handle, dom, PROT_READ, pfn_batch, batch)) == 0) {
+ ERROR("map batch failed");
+ goto out;
+ }
+
+ /* write num of pfns */
+ if(!write_exact(io_fd, &batch, sizeof(unsigned int))) {
+ ERROR("Error when writing to state file (2)");
+ goto out;
+ }
+
+ /* write all the pfns */
+ if(!write_exact(io_fd, pfn_batch, sizeof(unsigned long)*batch)) {
+ ERROR("Error when writing to state file (3)");
+ goto out;
+ }
+
+ if (ratewrite(io_fd, region_base, PAGE_SIZE * batch) != PAGE_SIZE * batch) {
+ ERROR("ERROR when writting to state file (4)");
+ goto out;
+ }
+
+
+ sent_this_iter += batch;
+
+ munmap(region_base, batch*PAGE_SIZE);
+
+ } /* end of this while loop for this iteration */
+
+ skip:
+
+ total_sent += sent_this_iter;
+
+ DPRINTF("\r %d: sent %d, skipped %d, ",
+ iter, sent_this_iter, skip_this_iter );
+
+ if (last_iter) {
+ print_stats( xc_handle, dom, sent_this_iter, &stats, 1);
+
+ DPRINTF("Total pages sent= %ld (%.2fx)\n",
+ total_sent, ((float)total_sent)/max_pfn );
+ }
+
+ if (last_iter && debug){
+ int minusone = -1;
+ memset(to_send, 0xff, BITMAP_SIZE);
+ debug = 0;
+ DPRINTF("Entering debug resend-all mode\n");
+
+ /* send "-1" to put receiver into debug mode */
+ if(!write_exact(io_fd, &minusone, sizeof(int))) {
+ ERROR("Error when writing to state file (6)");
+ goto out;
+ }
+
+ continue;
+ }
+
+ if (last_iter) break;
+
+ if (live) {
+
+
+ if(
+ ((sent_this_iter > sent_last_iter) && RATE_IS_MAX()) ||
+ (iter >= max_iters) ||
+ (sent_this_iter+skip_this_iter < 50) ||
+ (total_sent > max_pfn*max_factor) ) {
+
+ DPRINTF("Start last iteration for HVM domain\n");
+ last_iter = 1;
+
+ if (suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
+ &ctxt)) {
+ ERROR("Domain appears not to have suspended");
+ goto out;
+ }
+
+ DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n",
+ info.shared_info_frame,
+ (unsigned long)ctxt.user_regs.eip,
+ (unsigned long)ctxt.user_regs.edx);
+ }
+
+ if (xc_shadow_control(xc_handle, dom,
+ XEN_DOMCTL_SHADOW_OP_CLEAN, to_send,
+ max_pfn, NULL, 0, &stats) != max_pfn) {
+ ERROR("Error flushing shadow PT");
+ goto out;
+ }
+
+ sent_last_iter = sent_this_iter;
+
+ print_stats(xc_handle, dom, sent_this_iter, &stats, 1);
+
+ }
+
+
+ } /* end of while 1 */
+
+
+ DPRINTF("All HVM memory is saved\n");
+
+ /* Zero terminate */
+ i = 0;
+ if (!write_exact(io_fd, &i, sizeof(int))) {
+ ERROR("Error when writing to state file (6)");
+ goto out;
+ }
+
+ /* save hvm hypervisor state including pic/pit/shpage */
+ if (mlock(&hvm_ctxt, sizeof(hvm_ctxt))) {
+ ERROR("Unable to mlock ctxt");
+ return 1;
+ }
+
+ if (xc_domain_hvm_getcontext(xc_handle, dom, &hvm_ctxt)){
+ ERROR("HVM:Could not get hvm context");
+ goto out;
+ }
+
+ rec_size = sizeof(hvm_ctxt);
+ if (!write_exact(io_fd, &rec_size, sizeof(uint32_t))) {
+ ERROR("error write hvm ctxt size");
+ goto out;
+ }
+
+ if ( !write_exact(io_fd, &hvm_ctxt, sizeof(hvm_ctxt)) ) {
+ ERROR("write HVM info failed!\n");
+ }
+
+ /* save vcpu/vmcs context */
+ if (!write_exact(io_fd, &nr_vcpus, sizeof(uint32_t))) {
+ ERROR("error write nr vcpus");
+ goto out;
+ }
+
+ /*XXX: need a online map to exclude down cpu */
+ for (i = 0; i < nr_vcpus; i++) {
+
+ if (xc_vcpu_getcontext(xc_handle, dom, i, &ctxt)) {
+ ERROR("HVM:Could not get vcpu context");
+ goto out;
+ }
+
+ rec_size = sizeof(ctxt);
+ DPRINTF("write %d vcpucontext of total %d.\n", i, nr_vcpus);
+ if (!write_exact(io_fd, &rec_size, sizeof(uint32_t))) {
+ ERROR("error write vcpu ctxt size");
+ goto out;
+ }
+
+ if (!write_exact(io_fd, &(ctxt), sizeof(ctxt)) ) {
+ ERROR("write vmcs failed!\n");
+ goto out;
+ }
+ }
+
+ /* Success! */
+ rc = 0;
+
+ out:
+
+ if (live) {
+ if(xc_shadow_control(xc_handle, dom,
+ XEN_DOMCTL_SHADOW_OP_OFF,
+ NULL, 0, NULL, 0, NULL) < 0) {
+ DPRINTF("Warning - couldn't disable shadow mode");
+ }
+ }
+
+ free(page_array);
+
+ free(pfn_type);
+ free(pfn_batch);
+ free(to_send);
+ free(to_skip);
+
+ return !!rc;
+}
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 71d000ec08..a5df681e3e 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -313,6 +313,30 @@ int xc_domain_getinfolist(int xc_handle,
xc_domaininfo_t *info);
/**
+ * This function returns information about the context of a hvm domain
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm domid the domain to get information from
+ * @parm hvm_ctxt a pointer to a structure to store the execution context of the
+ * hvm domain
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_getcontext(int xc_handle,
+ uint32_t domid,
+ hvm_domain_context_t *hvm_ctxt);
+
+/**
+ * This function will set the context for hvm domain
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm domid the domain to set the hvm domain context for
+ * @parm hvm_ctxt pointer to the the hvm context with the values to set
+ * @return 0 on success, -1 on failure
+ */
+int xc_domain_hvm_setcontext(int xc_handle,
+ uint32_t domid,
+ hvm_domain_context_t *hvm_ctxt);
+
+/**
* This function returns information about the execution context of a
* particular vcpu of a domain.
*
diff --git a/tools/libxc/xenguest.h b/tools/libxc/xenguest.h
index 6d4576026d..7b5a2af0a4 100644
--- a/tools/libxc/xenguest.h
+++ b/tools/libxc/xenguest.h
@@ -11,6 +11,7 @@
#define XCFLAGS_LIVE 1
#define XCFLAGS_DEBUG 2
+#define XCFLAGS_HVM 4
/**
@@ -25,6 +26,13 @@ int xc_linux_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
int (*suspend)(int domid));
+/**
+ * This function will save a hvm domain running unmodified guest.
+ * @return 0 on success, -1 on failure
+ */
+int xc_hvm_save(int xc_handle, int io_fd, uint32_t dom, uint32_t max_iters,
+ uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */,
+ int (*suspend)(int domid));
/**
* This function will restore a saved domain running Linux.
@@ -43,6 +51,18 @@ int xc_linux_restore(int xc_handle, int io_fd, uint32_t dom,
unsigned long *console_mfn);
/**
+ * This function will restore a saved hvm domain running unmodified guest.
+ *
+ * @parm store_mfn pass mem size & returned with the mfn of the store page
+ * @return 0 on success, -1 on failure
+ */
+int xc_hvm_restore(int xc_handle, int io_fd, uint32_t dom,
+ unsigned long nr_pfns, unsigned int store_evtchn,
+ unsigned long *store_mfn, unsigned int console_evtchn,
+ unsigned long *console_mfn,
+ unsigned int pae, unsigned int apic);
+
+/**
* This function will create a domain for a paravirtualized Linux
* using file names pointing to kernel and ramdisk
*
diff --git a/tools/pygrub/src/pygrub b/tools/pygrub/src/pygrub
index a0de67df67..e28c19804a 100644
--- a/tools/pygrub/src/pygrub
+++ b/tools/pygrub/src/pygrub
@@ -503,7 +503,7 @@ def run_grub(file, entry, fs):
# If nothing has been specified, look for a Solaris domU. If found, perform the
# necessary tweaks.
def sniff_solaris(fs, cfg):
- if not fs.file_exists("/platform/i86xen/kernel/unix"):
+ if not fs.file_exists("/platform/i86xpv/kernel/unix"):
return cfg
# darned python
@@ -516,10 +516,10 @@ def sniff_solaris(fs, cfg):
longmode = True
if not cfg["kernel"]:
- cfg["kernel"] = "/platform/i86xen/kernel/unix"
+ cfg["kernel"] = "/platform/i86xpv/kernel/unix"
cfg["ramdisk"] = "/platform/i86pc/boot_archive"
if longmode:
- cfg["kernel"] = "/platform/i86xen/kernel/amd64/unix"
+ cfg["kernel"] = "/platform/i86xpv/kernel/amd64/unix"
cfg["ramdisk"] = "/platform/i86pc/amd64/boot_archive"
# Unpleasant. Typically we'll have 'root=foo -k' or 'root=foo /kernel -k',
diff --git a/tools/python/setup.py b/tools/python/setup.py
index 56dd3e4a0b..7b25025d9d 100644
--- a/tools/python/setup.py
+++ b/tools/python/setup.py
@@ -30,6 +30,13 @@ xs = Extension("xs",
libraries = libraries,
sources = [ "xen/lowlevel/xs/xs.c" ])
+scf = Extension("scf",
+ extra_compile_args = extra_compile_args,
+ include_dirs = include_dirs + [ "xen/lowlevel/scf" ],
+ library_dirs = library_dirs,
+ libraries = libraries,
+ sources = [ "xen/lowlevel/scf/scf.c" ])
+
acm = Extension("acm",
extra_compile_args = extra_compile_args,
include_dirs = include_dirs + [ "xen/lowlevel/acm" ],
@@ -37,6 +44,10 @@ acm = Extension("acm",
libraries = libraries,
sources = [ "xen/lowlevel/acm/acm.c" ])
+modules = [ xc, xs, acm ]
+if os.uname()[0] == 'SunOS':
+ modules.append(scf)
+
setup(name = 'xen',
version = '3.0',
description = 'Xen',
@@ -56,7 +67,7 @@ setup(name = 'xen',
'xen.xm.tests'
],
ext_package = "xen.lowlevel",
- ext_modules = [ xc, xs, acm ]
+ ext_modules = modules
)
os.chdir('logging')
diff --git a/tools/python/xen/lowlevel/scf/scf.c b/tools/python/xen/lowlevel/scf/scf.c
new file mode 100644
index 0000000000..a73ac20419
--- /dev/null
+++ b/tools/python/xen/lowlevel/scf/scf.c
@@ -0,0 +1,156 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <Python.h>
+
+#include <libscf.h>
+#include <stdio.h>
+
+#define XEND_FMRI "svc:/system/xen/xend:default"
+#define XEND_PG "config"
+
+static PyObject *scf_exc;
+
+static void *
+scf_exception(const char *err, const char *value)
+{
+ int scferr = scf_error();
+ const char *scfstrerr = scf_strerror(scferr);
+ PyObject *obj = Py_BuildValue("(isss)", scferr, err, scfstrerr, value);
+ PyErr_SetObject(scf_exc, obj);
+ return (NULL);
+}
+
+static PyObject *
+pyscf_get_bool(PyObject *o, PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "name", NULL };
+ scf_simple_prop_t *prop;
+ uint8_t *val;
+ char *name;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &name))
+ return (NULL);
+
+ prop = scf_simple_prop_get(NULL, XEND_FMRI, XEND_PG, name);
+
+ if (prop == NULL)
+ return (scf_exception("scf_simple_prop_get() failed", name));
+
+ if ((val = scf_simple_prop_next_boolean(prop)) == NULL)
+ return (scf_exception("scf_simple_prop_next_boolean() failed",
+ name));
+
+ if (*val) {
+ scf_simple_prop_free(prop);
+ Py_INCREF(Py_True);
+ return (Py_True);
+ }
+
+ scf_simple_prop_free(prop);
+ Py_INCREF(Py_False);
+ return (Py_False);
+}
+
+static PyObject *
+pyscf_get_int(PyObject *o, PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "name", NULL };
+ scf_simple_prop_t *prop;
+ PyObject *obj;
+ int64_t *val;
+ char *name;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &name))
+ return (NULL);
+
+ prop = scf_simple_prop_get(NULL, XEND_FMRI, XEND_PG, name);
+
+ if (prop == NULL)
+ return (scf_exception("scf_simple_prop_get() failed", name));
+
+ if ((val = scf_simple_prop_next_integer(prop)) == NULL)
+ return (scf_exception("scf_simple_prop_next_integer() failed",
+ name));
+
+ obj = PyInt_FromLong((long)*val);
+ scf_simple_prop_free(prop);
+ return (obj);
+}
+
+static PyObject *
+pyscf_get_string(PyObject *o, PyObject *args, PyObject *kwargs)
+{
+ static char *kwlist[] = { "name", NULL };
+ scf_simple_prop_t *prop;
+ PyObject *obj;
+ char *name;
+ char *str;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &name))
+ return (NULL);
+
+ prop = scf_simple_prop_get(NULL, XEND_FMRI, XEND_PG, name);
+
+ if (prop == NULL)
+ return (scf_exception("scf_simple_prop_get() failed", name));
+
+ if ((str = scf_simple_prop_next_astring(prop)) == NULL) {
+ scf_simple_prop_free(prop);
+ return (scf_exception("scf_simple_prop_next_astring() failed",
+ name));
+ }
+
+ obj = PyString_FromString(str);
+ scf_simple_prop_free(prop);
+ return (obj);
+}
+
+PyDoc_STRVAR(pyscf_get_bool__doc__,
+ "get_bool(name) - get the value of the named boolean property");
+PyDoc_STRVAR(pyscf_get_int__doc__,
+ "get_int(name) - get the value of the named integer property");
+PyDoc_STRVAR(pyscf_get_string__doc__,
+ "get_string(name) - get the value of the named string property");
+
+static struct PyMethodDef pyscf_module_methods[] = {
+ { "get_bool", (PyCFunction) pyscf_get_bool,
+ METH_VARARGS|METH_KEYWORDS, pyscf_get_bool__doc__ },
+ { "get_int", (PyCFunction) pyscf_get_int,
+ METH_VARARGS|METH_KEYWORDS, pyscf_get_int__doc__ },
+ { "get_string", (PyCFunction) pyscf_get_string,
+ METH_VARARGS|METH_KEYWORDS, pyscf_get_string__doc__ },
+ { NULL, NULL, 0, NULL }
+};
+
+PyMODINIT_FUNC
+initscf(void)
+{
+ PyObject *m;
+ m = Py_InitModule("scf", pyscf_module_methods);
+
+ scf_exc = PyErr_NewException("scf.error", NULL, NULL);
+ Py_INCREF(scf_exc);
+ PyModule_AddObject(m, "error", scf_exc);
+ PyModule_AddIntConstant(m, "SCF_ERROR_NOT_FOUND", SCF_ERROR_NOT_FOUND);
+}
diff --git a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c
index eaa8fe0846..fb6ee675d9 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -160,6 +160,20 @@ static PyObject *pyxc_domain_destroy(XcObject *self, PyObject *args)
return dom_op(self, args, xc_domain_destroy);
}
+static PyObject *pyxc_domain_shutdown(XcObject *self, PyObject *args)
+{
+ uint32_t dom, reason;
+
+ if (!PyArg_ParseTuple(args, "ii", &dom, &reason))
+ return NULL;
+
+ if (xc_domain_shutdown(self->xc_handle, dom, reason) != 0)
+ return pyxc_error_to_exception();
+
+ Py_INCREF(zero);
+ return zero;
+}
+
static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
PyObject *args,
@@ -1029,6 +1043,14 @@ static PyMethodDef pyxc_methods[] = {
" dom [int]: Identifier of domain to be destroyed.\n\n"
"Returns: [int] 0 on success; -1 on error.\n" },
+ { "domain_shutdown",
+ (PyCFunction)pyxc_domain_shutdown,
+ METH_VARARGS, "\n"
+ "Shutdown a domain.\n"
+ " dom [int, 0]: Domain identifier to use.\n"
+ " reason [int, 0]: Reason for shutdown.\n"
+ "Returns: [int] 0 on success; -1 on error.\n" },
+
{ "vcpu_setaffinity",
(PyCFunction)pyxc_vcpu_setaffinity,
METH_VARARGS | METH_KEYWORDS, "\n"
diff --git a/tools/python/xen/util/xmlrpclib2.py b/tools/python/xen/util/xmlrpclib2.py
index 3ba1b259b1..ac95d4cf39 100644
--- a/tools/python/xen/util/xmlrpclib2.py
+++ b/tools/python/xen/util/xmlrpclib2.py
@@ -256,6 +256,7 @@ class UnixXMLRPCRequestHandler(XMLRPCRequestHandler):
class UnixXMLRPCServer(TCPXMLRPCServer):
address_family = socket.AF_UNIX
+ allow_address_reuse = True
def __init__(self, addr, allowed, xenapi, logRequests = 1):
mkdir.parents(os.path.dirname(addr), stat.S_IRWXU, True)
diff --git a/tools/python/xen/web/httpserver.py b/tools/python/xen/web/httpserver.py
index 2326a2b875..14965e418f 100644
--- a/tools/python/xen/web/httpserver.py
+++ b/tools/python/xen/web/httpserver.py
@@ -294,8 +294,6 @@ class HttpServer:
backlog = 5
- closed = False
-
def __init__(self, root, interface, port=8080):
self.root = root
self.interface = interface
@@ -303,6 +301,7 @@ class HttpServer:
# ready indicates when we are ready to begin accept connections
# it should be set after a successful bind
self.ready = False
+ self.closed = False
def run(self):
self.bind()
@@ -317,7 +316,6 @@ class HttpServer:
def stop(self):
self.close()
-
def bind(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
flags = fcntl.fcntl(self.socket.fileno(), fcntl.F_GETFD)
@@ -334,7 +332,10 @@ class HttpServer:
def close(self):
self.closed = True
+ self.ready = False
try:
+ # shutdown socket explicitly to allow reuse
+ self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
except:
pass
@@ -345,6 +346,9 @@ class HttpServer:
def getResource(self, req):
return self.root.getRequestResource(req)
+ def shutdown(self):
+ self.close()
+
class UnixHttpServer(HttpServer):
diff --git a/tools/python/xen/xend/Vifctl.py b/tools/python/xen/xend/Vifctl.py
index 9320930b27..ba2105098c 100644
--- a/tools/python/xen/xend/Vifctl.py
+++ b/tools/python/xen/xend/Vifctl.py
@@ -20,7 +20,7 @@
"""
import os
-import XendRoot
+import XendOptions
def network(op):
@@ -30,7 +30,7 @@ def network(op):
"""
if op not in ['start', 'stop']:
raise ValueError('Invalid operation: ' + op)
- script = XendRoot.instance().get_network_script()
+ script = XendOptions.instance().get_network_script()
if script:
script.insert(1, op)
os.spawnv(os.P_WAIT, script[0], script)
diff --git a/tools/python/xen/xend/XendCheckpoint.py b/tools/python/xen/xend/XendCheckpoint.py
index 106e20fb3d..ae0ffee917 100644
--- a/tools/python/xen/xend/XendCheckpoint.py
+++ b/tools/python/xen/xend/XendCheckpoint.py
@@ -22,11 +22,14 @@ from xen.xend.XendConfig import XendConfig
from xen.xend.XendConstants import *
SIGNATURE = "LinuxGuestRecord"
+QEMU_SIGNATURE = "QemuDeviceModelRecord"
+dm_batch = 512
XC_SAVE = "xc_save"
XC_RESTORE = "xc_restore"
sizeof_int = calcsize("i")
+sizeof_unsigned_int = calcsize("I")
sizeof_unsigned_long = calcsize("L")
@@ -69,6 +72,11 @@ def save(fd, dominfo, network, live, dst):
"could not write guest state file: config len")
write_exact(fd, config, "could not write guest state file: config")
+ image_cfg = dominfo.info.get('image', {})
+ hvm = image_cfg.has_key('hvm')
+
+ if hvm:
+ log.info("save hvm domain")
# xc_save takes three customization parameters: maxit, max_f, and
# flags the last controls whether or not save is 'live', while the
# first two further customize behaviour when 'live' save is
@@ -76,7 +84,7 @@ def save(fd, dominfo, network, live, dst):
# libxenguest; see the comments and/or code in xc_linux_save() for
# more information.
cmd = [xen.util.auxbin.pathTo(XC_SAVE), str(fd),
- str(dominfo.getDomid()), "0", "0", str(int(live)) ]
+ str(dominfo.getDomid()), "0", "0", str(int(live) | (int(hvm) << 2)) ]
log.debug("[xc_save]: %s", string.join(cmd))
def saveInputHandler(line, tochild):
@@ -90,12 +98,29 @@ def save(fd, dominfo, network, live, dst):
log.info("Domain %d suspended.", dominfo.getDomid())
dominfo.migrateDevices(network, dst, DEV_MIGRATE_STEP3,
domain_name)
+ #send signal to device model for save
+ if hvm:
+ log.info("release_devices for hvm domain")
+ dominfo._releaseDevices(True)
tochild.write("done\n")
tochild.flush()
log.debug('Written done')
forkHelper(cmd, fd, saveInputHandler, False)
+ # put qemu device model state
+ if hvm:
+ write_exact(fd, QEMU_SIGNATURE, "could not write qemu signature")
+ qemu_fd = os.open("/tmp/xen.qemu-dm.%d" % dominfo.getDomid(), os.O_RDONLY)
+ while True:
+ buf = os.read(qemu_fd, dm_batch)
+ if len(buf):
+ write_exact(fd, buf, "could not write device model state")
+ else:
+ break
+ os.close(qemu_fd)
+ os.remove("/tmp/xen.qemu-dm.%d" % dominfo.getDomid())
+
dominfo.destroyDomain()
try:
dominfo.setName(domain_name)
@@ -149,19 +174,43 @@ def restore(xd, fd, dominfo = None, paused = False):
nr_pfns = (dominfo.getMemoryTarget() + 3) / 4
+ # if hvm, pass mem size to calculate the store_mfn
+ image_cfg = dominfo.info.get('image', {})
+ is_hvm = image_cfg.has_key('hvm')
+ if is_hvm:
+ hvm = dominfo.info['memory_static_min']
+ apic = dominfo.info['image']['hvm'].get('apic', 0)
+ pae = dominfo.info['image']['hvm'].get('pae', 0)
+ log.info("restore hvm domain %d, mem=%d, apic=%d, pae=%d",
+ dominfo.domid, hvm, apic, pae)
+ else:
+ hvm = 0
+ apic = 0
+ pae = 0
+
try:
l = read_exact(fd, sizeof_unsigned_long,
"not a valid guest state file: pfn count read")
max_pfn = unpack("L", l)[0] # native sizeof long
+
if max_pfn > 16*1024*1024: # XXX
raise XendError(
"not a valid guest state file: pfn count out of range")
- balloon.free(xc.pages_to_kib(nr_pfns))
+ shadow = dominfo.info['shadow_memory']
+ log.debug("restore:shadow=0x%x, _static_max=0x%x, _static_min=0x%x, "
+ "nr_pfns=0x%x.", dominfo.info['shadow_memory'],
+ dominfo.info['memory_static_max'],
+ dominfo.info['memory_static_min'], nr_pfns)
+
+ balloon.free(xc.pages_to_kib(nr_pfns) + shadow * 1024)
+
+ shadow_cur = xc.shadow_mem_control(dominfo.getDomid(), shadow)
+ dominfo.info['shadow_memory'] = shadow_cur
cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
fd, dominfo.getDomid(), max_pfn,
- store_port, console_port])
+ store_port, console_port, hvm, pae, apic])
log.debug("[xc_restore]: %s", string.join(cmd))
handler = RestoreInputHandler()
@@ -171,10 +220,30 @@ def restore(xd, fd, dominfo = None, paused = False):
if handler.store_mfn is None or handler.console_mfn is None:
raise XendError('Could not read store/console MFN')
- os.read(fd, 1) # Wait for source to close connection
dominfo.waitForDevices() # Wait for backends to set up
if not paused:
dominfo.unpause()
+
+ # get qemu state and create a tmp file for dm restore
+ if is_hvm:
+ qemu_signature = read_exact(fd, len(QEMU_SIGNATURE),
+ "invalid device model signature read")
+ if qemu_signature != QEMU_SIGNATURE:
+ raise XendError("not a valid device model state: found '%s'" %
+ qemu_signature)
+ qemu_fd = os.open("/tmp/xen.qemu-dm.%d" % dominfo.getDomid(),
+ os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
+ while True:
+ buf = os.read(fd, dm_batch)
+ if len(buf):
+ write_exact(qemu_fd, buf,
+ "could not write dm state to tmp file")
+ else:
+ break
+ os.close(qemu_fd)
+
+
+ os.read(fd, 1) # Wait for source to close connection
dominfo.completeRestore(handler.store_mfn, handler.console_mfn)
diff --git a/tools/python/xen/xend/XendConfig.py b/tools/python/xen/xend/XendConfig.py
index fd9afb0b3a..1098417542 100644
--- a/tools/python/xen/xend/XendConfig.py
+++ b/tools/python/xen/xend/XendConfig.py
@@ -78,6 +78,25 @@ def scrub_password(data):
else:
return data
+#
+# CPU fields:
+#
+# vcpus_number -- the maximum number of vcpus that this domain may ever have.
+# aka XendDomainInfo.getVCpuCount().
+# vcpus -- the legacy configuration name for above.
+# max_vcpu_id -- vcpus_number - 1. This is given to us by Xen.
+#
+# cpus -- the list of pCPUs available to each vCPU.
+#
+# vcpu_avail: a bitmap telling the guest domain whether it may use each of
+# its VCPUs. This is translated to
+# <dompath>/cpu/<id>/availability = {online,offline} for use
+# by the guest domain.
+# online_vpcus -- the number of VCPUs currently up, as reported by Xen. This
+# is changed by changing vcpu_avail, and waiting for the
+# domain to respond.
+#
+
# Mapping from XendConfig configuration keys to the old
# legacy configuration keys that map directly.
@@ -185,7 +204,7 @@ LEGACY_CFG_TYPES = {
'uuid': str,
'name': str,
'vcpus': int,
- 'vcpu_avail': int,
+ 'vcpu_avail': long,
'memory': int,
'shadow_memory': int,
'maxmem': int,
@@ -355,9 +374,6 @@ class XendConfig(dict):
'cpu_weight': 256,
'cpu_cap': 0,
'vcpus_number': 1,
- 'online_vcpus': 1,
- 'max_vcpu_id': 0,
- 'vcpu_avail': 1,
'console_refs': [],
'vif_refs': [],
'vbd_refs': [],
@@ -389,7 +405,7 @@ class XendConfig(dict):
event)
def _vcpus_sanity_check(self):
- if self.get('vcpus_number') != None:
+ if 'vcpus_number' in self and 'vcpu_avail' not in self:
self['vcpu_avail'] = (1 << self['vcpus_number']) - 1
def _uuid_sanity_check(self):
@@ -405,7 +421,7 @@ class XendConfig(dict):
def _dominfo_to_xapi(self, dominfo):
self['domid'] = dominfo['domid']
self['online_vcpus'] = dominfo['online_vcpus']
- self['max_vcpu_id'] = dominfo['max_vcpu_id']
+ self['vcpus_number'] = dominfo['max_vcpu_id'] + 1
self['memory_dynamic_min'] = (dominfo['mem_kb'] + 1023)/1024
self['memory_dynamic_max'] = (dominfo['maxmem_kb'] + 1023)/1024
self['cpu_time'] = dominfo['cpu_time']/1e9
@@ -636,9 +652,6 @@ class XendConfig(dict):
self['memory_dynamic_max'] = self['memory_static_max']
self['memory_dynamic_min'] = self['memory_static_min']
- # make sure max_vcpu_id is set correctly
- self['max_vcpu_id'] = self['vcpus_number'] - 1
-
# set device references in the configuration
self['devices'] = cfg.get('devices', {})
@@ -720,13 +733,11 @@ class XendConfig(dict):
_set_cfg_if_exists('on_xend_stop')
_set_cfg_if_exists('on_xend_start')
_set_cfg_if_exists('vcpu_avail')
- _set_cfg_if_exists('max_vcpu_id') # needed for vcpuDomDetails
_set_cfg_if_exists('cpu_weight')
_set_cfg_if_exists('cpu_cap')
# Parse and store runtime configuration
_set_cfg_if_exists('start_time')
- _set_cfg_if_exists('online_vcpus')
_set_cfg_if_exists('cpu_time')
_set_cfg_if_exists('shutdown_reason')
_set_cfg_if_exists('up_time')
@@ -1115,19 +1126,17 @@ class XendConfig(dict):
# configuration
log.debug("update_with_image_sxp(%s)" % scrub_password(image_sxp))
- kernel_args = ""
+ # user-specified args must come last: previous releases did this and
+ # some domU kernels rely upon the ordering.
+ kernel_args = sxp.child_value(image_sxp, 'args', '')
# attempt to extract extra arguments from SXP config
arg_ip = sxp.child_value(image_sxp, 'ip')
if arg_ip and not re.search(r'ip=[^ ]+', kernel_args):
- kernel_args += 'ip=%s ' % arg_ip
+ kernel_args = 'ip=%s ' % arg_ip + kernel_args
arg_root = sxp.child_value(image_sxp, 'root')
if arg_root and not re.search(r'root=', kernel_args):
- kernel_args += 'root=%s ' % arg_root
-
- # user-specified args must come last: previous releases did this and
- # some domU kernels rely upon the ordering.
- kernel_args += sxp.child_value(image_sxp, 'args', '')
+ kernel_args = 'root=%s ' % arg_root + kernel_args
if bootloader:
self['_temp_using_bootloader'] = '1'
diff --git a/tools/python/xen/xend/XendDomain.py b/tools/python/xen/xend/XendDomain.py
index 4257057398..05d669de08 100644
--- a/tools/python/xen/xend/XendDomain.py
+++ b/tools/python/xen/xend/XendDomain.py
@@ -32,7 +32,7 @@ import threading
import xen.lowlevel.xc
-from xen.xend import XendRoot, XendCheckpoint, XendDomainInfo
+from xen.xend import XendOptions, XendCheckpoint, XendDomainInfo
from xen.xend.PrettyPrint import prettyprint
from xen.xend.XendConfig import XendConfig
from xen.xend.XendError import XendError, XendInvalidDomain, VmError
@@ -51,7 +51,7 @@ from xen.util import mkdir, security
from xen.xend import uuid
xc = xen.lowlevel.xc.xc()
-xroot = XendRoot.instance()
+xoptions = XendOptions.instance()
__all__ = [ "XendDomain" ]
@@ -214,7 +214,7 @@ class XendDomain:
@rtype: String
@return: Path.
"""
- dom_path = xroot.get_xend_domains_path()
+ dom_path = xoptions.get_xend_domains_path()
if domuuid:
dom_path = os.path.join(dom_path, domuuid)
return dom_path
@@ -361,7 +361,7 @@ class XendDomain:
def _setDom0CPUCount(self):
"""Sets the number of VCPUs dom0 has. Retreived from the
- Xend configuration, L{XendRoot}.
+ Xend configuration, L{XendOptions}.
@requires: Expects to be protected by domains_lock.
@rtype: None
@@ -369,7 +369,7 @@ class XendDomain:
dom0 = self.privilegedDomain()
# get max number of vcpus to use for dom0 from config
- target = int(xroot.get_dom0_vcpus())
+ target = int(xoptions.get_dom0_vcpus())
log.debug("number of vcpus to use is %d", target)
# target == 0 means use all processors
@@ -1164,7 +1164,7 @@ class XendDomain:
dominfo.checkLiveMigrateMemory()
if port == 0:
- port = xroot.get_xend_relocation_port()
+ port = xoptions.get_xend_relocation_port()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((dst, port))
diff --git a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py
index f80c3f5be7..27cba0049c 100644
--- a/tools/python/xen/xend/XendDomainInfo.py
+++ b/tools/python/xen/xend/XendDomainInfo.py
@@ -38,7 +38,7 @@ from xen.util.blkif import blkdev_uname_to_file
from xen.util import security
from xen.xend import balloon, sxp, uuid, image, arch, osdep
-from xen.xend import XendRoot, XendNode, XendConfig
+from xen.xend import XendOptions, XendNode, XendConfig
from xen.xend.XendConfig import scrub_password
from xen.xend.XendBootloader import bootloader
@@ -54,31 +54,12 @@ MIGRATE_TIMEOUT = 30.0
BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
xc = xen.lowlevel.xc.xc()
-xroot = XendRoot.instance()
+xoptions = XendOptions.instance()
log = logging.getLogger("xend.XendDomainInfo")
#log.setLevel(logging.TRACE)
-#
-# There are a number of CPU-related fields:
-#
-# vcpus: the number of virtual CPUs this domain is configured to use.
-# vcpu_avail: a bitmap telling the guest domain whether it may use each of
-# its VCPUs. This is translated to
-# <dompath>/cpu/<id>/availability = {online,offline} for use
-# by the guest domain.
-# cpumap: a list of bitmaps, one for each VCPU, giving the physical
-# CPUs that that VCPU may use.
-# cpu: a configuration setting requesting that VCPU 0 is pinned to
-# the specified physical CPU.
-#
-# vcpus and vcpu_avail settings persist with the VM (i.e. they are persistent
-# across save, restore, migrate, and restart). The other settings are only
-# specific to the domain, so are lost when the VM moves.
-#
-
-
def create(config):
"""Creates and start a VM using the supplied configuration.
@@ -451,6 +432,16 @@ class XendDomainInfo:
self._removeVm('xend/previous_restart_time')
self.storeDom("control/shutdown", reason)
+ ## shutdown hypercall for hvm domain desides xenstore write
+ image_cfg = self.info.get('image', {})
+ hvm = image_cfg.has_key('hvm')
+ if hvm:
+ for code in DOMAIN_SHUTDOWN_REASONS.keys():
+ if DOMAIN_SHUTDOWN_REASONS[code] == reason:
+ break
+ xc.domain_shutdown(self.domid, code)
+
+
def pause(self):
"""Pause domain
@@ -614,7 +605,7 @@ class XendDomainInfo:
['name', self.info['name_label']],
['vcpu_count', self.info['vcpus_number']]]
- for i in range(0, self.info['max_vcpu_id']+1):
+ for i in range(0, self.info['vcpus_number']):
info = xc.vcpu_getinfo(self.domid, i)
sxpr.append(['vcpu',
@@ -739,7 +730,7 @@ class XendDomainInfo:
'domid': str(self.domid),
'vm': self.vmpath,
'name': self.info['name_label'],
- 'console/limit': str(xroot.get_console_limit() * 1024),
+ 'console/limit': str(xoptions.get_console_limit() * 1024),
'memory/target': str(self.info['memory_static_min'] * 1024)
}
@@ -898,8 +889,9 @@ class XendDomainInfo:
self._writeDom(self._vcpuDomDetails())
else:
self.info['vcpus_number'] = vcpus
- self.info['online_vcpus'] = vcpus
xen.xend.XendDomain.instance().managed_config_save(self)
+ log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
+ vcpus)
def getLabel(self):
return security.get_security_info(self.info, 'label')
@@ -976,7 +968,7 @@ class XendDomainInfo:
self.info['name_label'], self.domid)
self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
- if xroot.get_enable_dump():
+ if xoptions.get_enable_dump():
try:
self.dumpCore()
except XendError:
@@ -1228,8 +1220,11 @@ class XendDomainInfo:
if self.image:
self.image.createDeviceModel()
- def _releaseDevices(self):
+ def _releaseDevices(self, suspend = False):
"""Release all domain's devices. Nothrow guarantee."""
+ if suspend and self.image:
+ self.image.destroy(suspend)
+ return
while True:
t = xstransact("%s/device" % self.dompath)
@@ -1381,7 +1376,7 @@ class XendDomainInfo:
# this is done prior to memory allocation to aide in memory
# distribution for NUMA systems.
if self.info['cpus'] is not None and len(self.info['cpus']) > 0:
- for v in range(0, self.info['max_vcpu_id']+1):
+ for v in range(0, self.info['vcpus_number']):
xc.vcpu_setaffinity(self.domid, v, self.info['cpus'])
# Use architecture- and image-specific calculations to determine
@@ -1395,6 +1390,7 @@ class XendDomainInfo:
self.info['shadow_memory'] * 1024,
self.info['memory_static_max'] * 1024)
+ log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
# Round shadow up to a multiple of a MiB, as shadow_mem_control
# takes MiB and we must not round down and end up under-providing.
shadow = ((shadow + 1023) / 1024) * 1024
@@ -1494,6 +1490,16 @@ class XendDomainInfo:
self.console_mfn = console_mfn
self._introduceDomain()
+ image_cfg = self.info.get('image', {})
+ hvm = image_cfg.has_key('hvm')
+ if hvm:
+ self.image = image.create(self,
+ self.info,
+ self.info['image'],
+ self.info['devices'])
+ if self.image:
+ self.image.createDeviceModel(True)
+ self.image.register_shutdown_watch()
self._storeDomDetails()
self._registerWatches()
self.refreshShutdown()
@@ -2028,8 +2034,8 @@ class XendDomainInfo:
# TODO: spec says that key is int, however, python does not allow
# non-string keys to dictionaries.
vcpu_util = {}
- if 'max_vcpu_id' in self.info and self.domid != None:
- for i in range(0, self.info['max_vcpu_id']+1):
+ if 'vcpus_number' in self.info and self.domid != None:
+ for i in range(0, self.info['vcpus_number']):
info = xc.vcpu_getinfo(self.domid, i)
vcpu_util[str(i)] = info['cpu_time']/1000000000.0
diff --git a/tools/python/xen/xend/XendNode.py b/tools/python/xen/xend/XendNode.py
index 932c05d207..30953f6a53 100644
--- a/tools/python/xen/xend/XendNode.py
+++ b/tools/python/xen/xend/XendNode.py
@@ -24,7 +24,7 @@ from xen.util import Brctl
from xen.xend import uuid
from xen.xend.XendError import XendError, NetworkAlreadyConnected
-from xen.xend.XendRoot import instance as xendroot
+from xen.xend.XendOptions import instance as xendoptions
from xen.xend.XendStorageRepository import XendStorageRepository
from xen.xend.XendLogging import log
from xen.xend.XendPIF import *
@@ -45,7 +45,7 @@ class XendNode:
"""
self.xc = xen.lowlevel.xc.xc()
- self.state_store = XendStateStore(xendroot().get_xend_state_path())
+ self.state_store = XendStateStore(xendoptions().get_xend_state_path())
# load host state from XML file
saved_host = self.state_store.load_state('host')
diff --git a/tools/python/xen/xend/XendRoot.py b/tools/python/xen/xend/XendOptions.py
index 640c96f5f8..a51ecb670a 100644
--- a/tools/python/xen/xend/XendRoot.py
+++ b/tools/python/xen/xend/XendOptions.py
@@ -20,7 +20,7 @@
Creates the servers and handles configuration.
Other classes get config variables by importing this module,
-using instance() to get a XendRoot instance, and then
+using instance() to get a XendOptions instance, and then
the config functions (e.g. get_xend_port()) to get
configured values.
"""
@@ -33,14 +33,11 @@ import sys
from xen.xend import sxp, osdep, XendLogging
from xen.xend.XendError import XendError
-class XendRoot:
- """Root of the management classes."""
+if os.uname()[0] == 'SunOS':
+ from xen.lowlevel import scf
- """Default path to the config file."""
- config_default = "/etc/xen/xend-config.sxp"
-
- """Environment variable used to override config_default."""
- config_var = "XEND_CONFIG"
+class XendOptions:
+ """Configuration options."""
"""Where network control scripts live."""
network_script_dir = osdep.scripts_dir
@@ -75,10 +72,10 @@ class XendRoot:
xend_relocation_address_default = ''
"""Default port xend serves HTTP at. """
- xend_port_default = '8000'
+ xend_port_default = 8000
"""Default port xend serves relocation at. """
- xend_relocation_port_default = '8002'
+ xend_relocation_port_default = 8002
xend_relocation_hosts_allow_default = ''
@@ -92,9 +89,9 @@ class XendRoot:
"""Default path the unix-domain server listens at."""
xend_unix_path_default = '/var/lib/xend/xend-socket'
- dom0_min_mem_default = '0'
+ dom0_min_mem_default = 0
- dom0_vcpus_default = '0'
+ dom0_vcpus_default = 0
vncpasswd_default = None
@@ -107,16 +104,17 @@ class XendRoot:
"""Default xend management state storage."""
xend_state_path_default = '/var/lib/xend/state'
- components = {}
+ """Default type of backend network interfaces"""
+ netback_type = osdep.netback_type
+
+ """Default script to configure a backend network interface"""
+ vif_script = osdep.vif_script
def __init__(self):
- self.config_path = None
- self.config = None
self.configure()
-
def _logError(self, fmt, *args):
- """Logging function to log to stderr. We use this for XendRoot log
+ """Logging function to log to stderr. We use this for XendOptions log
messages because they may be logged before the logger has been
configured. Other components can safely use the logger.
"""
@@ -125,81 +123,25 @@ class XendRoot:
def configure(self):
self.set_config()
- XendLogging.init(self.get_config_value("logfile",
+ XendLogging.init(self.get_config_string("logfile",
self.logfile_default),
- self.get_config_value("loglevel",
+ self.get_config_string("loglevel",
self.loglevel_default))
-
def set_config(self):
- """If the config file exists, read it. If not, ignore it.
-
- The config file is a sequence of sxp forms.
- """
- self.config_path = os.getenv(self.config_var, self.config_default)
- if os.path.exists(self.config_path):
- try:
- fin = file(self.config_path, 'rb')
- try:
- config = sxp.parse(fin)
- finally:
- fin.close()
- if config is None:
- config = ['xend-config']
- else:
- config.insert(0, 'xend-config')
- self.config = config
- except Exception, ex:
- self._logError('Reading config file %s: %s',
- self.config_path, str(ex))
- raise
- else:
- self._logError('Config file does not exist: %s',
- self.config_path)
- self.config = ['xend-config']
-
- def get_config(self, name=None):
- """Get the configuration element with the given name, or
- the whole configuration if no name is given.
-
- @param name: element name (optional)
- @return: config or none
- """
- if name is None:
- val = self.config
- else:
- val = sxp.child(self.config, name)
- return val
-
- def get_config_value(self, name, val=None):
- """Get the value of an atomic configuration element.
-
- @param name: element name
- @param val: default value (optional, defaults to None)
- @return: value
- """
- return sxp.child_value(self.config, name, val=val)
+ raise NotImplementedError()
def get_config_bool(self, name, val=None):
- v = string.lower(str(self.get_config_value(name, val)))
- if v in ['yes', 'y', '1', 'on', 'true', 't']:
- return True
- if v in ['no', 'n', '0', 'off', 'false', 'f']:
- return False
- raise XendError("invalid xend config %s: expected bool: %s" % (name, v))
-
+ raise NotImplementedError()
+
def get_config_int(self, name, val=None):
- v = self.get_config_value(name, val)
- try:
- return int(v)
- except Exception:
- raise XendError("invalid xend config %s: expected int: %s" % (name, v))
+ raise NotImplementedError()
+
+ def get_config_string(self, name, val=None):
+ raise NotImplementedError()
def get_xen_api_server(self):
- """Get the Xen-API server configuration.
- """
- return self.get_config_value('xen-api-server',
- self.xen_api_server_default)
+ raise NotImplementedError()
def get_xend_http_server(self):
"""Get the flag indicating whether xend should run an http server.
@@ -232,7 +174,7 @@ class XendRoot:
self.xend_relocation_port_default)
def get_xend_relocation_hosts_allow(self):
- return self.get_config_value("xend-relocation-hosts-allow",
+ return self.get_config_string("xend-relocation-hosts-allow",
self.xend_relocation_hosts_allow_default)
def get_xend_address(self):
@@ -241,7 +183,7 @@ class XendRoot:
If this is set to 'localhost' only the localhost will be able to connect
to the HTTP port.
"""
- return self.get_config_value('xend-address', self.xend_address_default)
+ return self.get_config_string('xend-address', self.xend_address_default)
def get_xend_relocation_address(self):
"""Get the address xend listens at for its relocation server port.
@@ -249,7 +191,7 @@ class XendRoot:
If this is set to 'localhost' only the localhost will be able to connect
to the relocation port.
"""
- return self.get_config_value('xend-relocation-address', self.xend_relocation_address_default)
+ return self.get_config_string('xend-relocation-address', self.xend_relocation_address_default)
def get_xend_unix_server(self):
"""Get the flag indicating whether xend should run a unix-domain server.
@@ -259,23 +201,23 @@ class XendRoot:
def get_xend_unix_path(self):
"""Get the path the xend unix-domain server listens at.
"""
- return self.get_config_value("xend-unix-path", self.xend_unix_path_default)
+ return self.get_config_string("xend-unix-path", self.xend_unix_path_default)
def get_xend_domains_path(self):
""" Get the path for persistent domain configuration storage
"""
- return self.get_config_value("xend-domains-path", self.xend_domains_path_default)
+ return self.get_config_string("xend-domains-path", self.xend_domains_path_default)
def get_xend_state_path(self):
""" Get the path for persistent domain configuration storage
"""
- return self.get_config_value("xend-state-path", self.xend_state_path_default)
+ return self.get_config_string("xend-state-path", self.xend_state_path_default)
def get_network_script(self):
"""@return the script used to alter the network configuration when
Xend starts and stops, or None if no such script is specified."""
- s = self.get_config_value('network-script')
+ s = self.get_config_string('network-script')
if s:
result = s.split(" ")
@@ -286,13 +228,13 @@ class XendRoot:
def get_external_migration_tool(self):
"""@return the name of the tool to handle virtual TPM migration."""
- return self.get_config_value('external-migration-tool', self.external_migration_tool_default)
+ return self.get_config_string('external-migration-tool', self.external_migration_tool_default)
def get_enable_dump(self):
return self.get_config_bool('enable-dump', 'no')
def get_vif_script(self):
- return self.get_config_value('vif-script', 'vif-bridge')
+ return self.get_config_string('vif-script', self.vif_script)
def get_dom0_min_mem(self):
return self.get_config_int('dom0-min-mem', self.dom0_min_mem_default)
@@ -304,19 +246,128 @@ class XendRoot:
return self.get_config_int('console-limit', 1024)
def get_vnclisten_address(self):
- return self.get_config_value('vnc-listen', self.xend_vnc_listen_default)
+ return self.get_config_string('vnc-listen', self.xend_vnc_listen_default)
def get_vncpasswd_default(self):
- return self.get_config_value('vncpasswd',
+ return self.get_config_string('vncpasswd',
self.vncpasswd_default)
+class XendOptionsFile(XendOptions):
+
+ """Default path to the config file."""
+ config_default = "/etc/xen/xend-config.sxp"
+
+ """Environment variable used to override config_default."""
+ config_var = "XEND_CONFIG"
+
+ def set_config(self):
+ """If the config file exists, read it. If not, ignore it.
+
+ The config file is a sequence of sxp forms.
+ """
+ self.config_path = os.getenv(self.config_var, self.config_default)
+ if os.path.exists(self.config_path):
+ try:
+ fin = file(self.config_path, 'rb')
+ try:
+ config = sxp.parse(fin)
+ finally:
+ fin.close()
+ if config is None:
+ config = ['xend-config']
+ else:
+ config.insert(0, 'xend-config')
+ self.config = config
+ except Exception, ex:
+ self._logError('Reading config file %s: %s',
+ self.config_path, str(ex))
+ raise
+ else:
+ self._logError('Config file does not exist: %s',
+ self.config_path)
+ self.config = ['xend-config']
+
+ def get_config_value(self, name, val=None):
+ """Get the value of an atomic configuration element.
+
+ @param name: element name
+ @param val: default value (optional, defaults to None)
+ @return: value
+ """
+ return sxp.child_value(self.config, name, val=val)
+
+ def get_config_bool(self, name, val=None):
+ v = string.lower(str(self.get_config_value(name, val)))
+ if v in ['yes', 'y', '1', 'on', 'true', 't']:
+ return True
+ if v in ['no', 'n', '0', 'off', 'false', 'f']:
+ return False
+ raise XendError("invalid xend config %s: expected bool: %s" % (name, v))
+
+ def get_config_int(self, name, val=None):
+ v = self.get_config_value(name, val)
+ try:
+ return int(v)
+ except Exception:
+ raise XendError("invalid xend config %s: expected int: %s" % (name, v))
+
+ def get_config_string(self, name, val=None):
+ return self.get_config_value(name, val)
+
+ def get_xen_api_server(self):
+ """Get the Xen-API server configuration.
+ """
+ return self.get_config_value('xen-api-server',
+ self.xen_api_server_default)
+
+if os.uname()[0] == 'SunOS':
+ class XendOptionsSMF(XendOptions):
+
+ def set_config(self):
+ pass
+
+ def get_config_bool(self, name, val=None):
+ try:
+ return scf.get_bool(name)
+ except scf.error, e:
+ if e[0] == scf.SCF_ERROR_NOT_FOUND:
+ return val
+ else:
+ raise XendError("option %s: %s:%s" % (name, e[1], e[2]))
+
+ def get_config_int(self, name, val=None):
+ try:
+ return scf.get_int(name)
+ except scf.error, e:
+ if e[0] == scf.SCF_ERROR_NOT_FOUND:
+ return val
+ else:
+ raise XendError("option %s: %s:%s" % (name, e[1], e[2]))
+
+ def get_config_string(self, name, val=None):
+ try:
+ return scf.get_string(name)
+ except scf.error, e:
+ if e[0] == scf.SCF_ERROR_NOT_FOUND:
+ return val
+ else:
+ raise XendError("option %s: %s:%s" % (name, e[1], e[2]))
+
+ def get_xen_api_server(self):
+ # When the new server is a supported configuration, we should
+ # expand this.
+ return [["unix"]]
+
def instance():
- """Get an instance of XendRoot.
+ """Get an instance of XendOptions.
Use this instead of the constructor.
"""
global inst
try:
inst
except:
- inst = XendRoot()
+ if os.uname()[0] == 'SunOS':
+ inst = XendOptionsSMF()
+ else:
+ inst = XendOptionsFile()
return inst
diff --git a/tools/python/xen/xend/XendProtocol.py b/tools/python/xen/xend/XendProtocol.py
index f09f6762d3..34af4a4b2b 100644
--- a/tools/python/xen/xend/XendProtocol.py
+++ b/tools/python/xen/xend/XendProtocol.py
@@ -24,7 +24,7 @@ import types
from encode import *
from xen.xend import sxp
-from xen.xend import XendRoot
+from xen.xend import XendOptions
DEBUG = 0
@@ -34,7 +34,7 @@ HTTP_ACCEPTED = 202
HTTP_NO_CONTENT = 204
-xroot = XendRoot.instance()
+xoptions = XendOptions.instance()
class XendError(RuntimeError):
@@ -218,7 +218,7 @@ class UnixXendClientProtocol(HttpXendClientProtocol):
def __init__(self, path=None):
if path is None:
- path = xroot.get_xend_unix_path()
+ path = xoptions.get_xend_unix_path()
self.path = path
def makeConnection(self, _):
diff --git a/tools/python/xen/xend/balloon.py b/tools/python/xen/xend/balloon.py
index 3c167f3b27..e7c2693ab3 100644
--- a/tools/python/xen/xend/balloon.py
+++ b/tools/python/xen/xend/balloon.py
@@ -22,7 +22,7 @@ import time
import xen.lowlevel.xc
import XendDomain
-import XendRoot
+import XendOptions
from XendLogging import log
from XendError import VmError
@@ -107,11 +107,11 @@ def free(need_mem):
# usage, so we recheck the required alloc each time around the loop, but
# track the last used value so that we don't trigger too many watches.
- xroot = XendRoot.instance()
+ xoptions = XendOptions.instance()
xc = xen.lowlevel.xc.xc()
try:
- dom0_min_mem = xroot.get_dom0_min_mem() * 1024
+ dom0_min_mem = xoptions.get_dom0_min_mem() * 1024
retries = 0
sleep_time = SLEEP_TIME_GROWTH
diff --git a/tools/python/xen/xend/image.py b/tools/python/xen/xend/image.py
index 650c449415..5076d03080 100644
--- a/tools/python/xen/xend/image.py
+++ b/tools/python/xen/xend/image.py
@@ -173,7 +173,7 @@ class ImageHandler:
"""Build the domain. Define in subclass."""
raise NotImplementedError()
- def createDeviceModel(self):
+ def createDeviceModel(self, restore = False):
"""Create device model for the domain (define in subclass if needed)."""
pass
@@ -377,11 +377,12 @@ class HVMImageHandler(ImageHandler):
# xm config file
def parseDeviceModelArgs(self, imageConfig, deviceConfig):
dmargs = [ 'boot', 'fda', 'fdb', 'soundhw',
- 'localtime', 'serial', 'stdvga', 'isa', 'vcpus',
+ 'localtime', 'serial', 'stdvga', 'isa',
'acpi', 'usb', 'usbdevice', 'keymap' ]
- ret = []
hvmDeviceConfig = imageConfig['hvm']['devices']
-
+
+ ret = ['-vcpus', str(self.vm.getVCpuCount())]
+
for a in dmargs:
v = hvmDeviceConfig.get(a)
@@ -461,14 +462,14 @@ class HVMImageHandler(ImageHandler):
vnclisten = imageConfig.get('vnclisten')
if not(vnclisten):
- vnclisten = (xen.xend.XendRoot.instance().
+ vnclisten = (xen.xend.XendOptions.instance().
get_vnclisten_address())
if vnclisten:
ret += ['-vnclisten', vnclisten]
vncpasswd = vncpasswd_vmconfig
if vncpasswd is None:
- vncpasswd = (xen.xend.XendRoot.instance().
+ vncpasswd = (xen.xend.XendOptions.instance().
get_vncpasswd_default())
if vncpasswd is None:
raise VmError('vncpasswd is not set up in ' +
@@ -478,7 +479,7 @@ class HVMImageHandler(ImageHandler):
return ret
- def createDeviceModel(self):
+ def createDeviceModel(self, restore = False):
if self.pid:
return
# Execute device model.
@@ -487,6 +488,8 @@ class HVMImageHandler(ImageHandler):
args = args + ([ "-d", "%d" % self.vm.getDomid(),
"-m", "%s" % (self.getRequiredInitialReservation() / 1024)])
args = args + self.dmargs
+ if restore:
+ args = args + ([ "-loadvm", "/tmp/xen.qemu-dm.%d" % self.vm.getDomid() ])
env = dict(os.environ)
if self.display:
env['DISPLAY'] = self.display
@@ -505,12 +508,16 @@ class HVMImageHandler(ImageHandler):
self.register_reboot_feature_watch()
self.pid = self.vm.gatherDom(('image/device-model-pid', int))
- def destroy(self):
+ def destroy(self, suspend = False):
self.unregister_shutdown_watch()
self.unregister_reboot_feature_watch();
if self.pid:
try:
- os.kill(self.pid, signal.SIGKILL)
+ sig = signal.SIGKILL
+ if suspend:
+ log.info("use sigusr1 to signal qemu %d", self.pid)
+ sig = signal.SIGUSR1
+ os.kill(self.pid, sig)
except OSError, exn:
log.exception(exn)
try:
@@ -599,6 +606,9 @@ class IA64_HVM_ImageHandler(HVMImageHandler):
extra_pages = 1024 + 3
return mem_kb + extra_pages * page_kb
+ def getRequiredInitialReservation(self):
+ return self.vm.getMemoryTarget()
+
def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
# Explicit shadow memory is not a concept
return 0
diff --git a/tools/python/xen/xend/osdep.py b/tools/python/xen/xend/osdep.py
index 1dd3c7c1dd..2136465ea7 100644
--- a/tools/python/xen/xend/osdep.py
+++ b/tools/python/xen/xend/osdep.py
@@ -33,9 +33,19 @@ _pygrub_path = {
"SunOS": "/usr/lib/xen/bin/pygrub"
}
+_netback_type = {
+ "SunOS": "SUNW_mac"
+}
+
+_vif_script = {
+ "SunOS": "vif-vnic"
+}
+
def _get(var, default=None):
return var.get(os.uname()[0], default)
scripts_dir = _get(_scripts_dir, "/etc/xen/scripts")
xend_autorestart = _get(_xend_autorestart)
pygrub_path = _get(_pygrub_path, "/usr/bin/pygrub")
+netback_type = _get(_netback_type, "netfront")
+vif_script = _get(_vif_script, "vif-bridge")
diff --git a/tools/python/xen/xend/server/DevController.py b/tools/python/xen/xend/server/DevController.py
index 6ac7885036..7e411a8027 100644
--- a/tools/python/xen/xend/server/DevController.py
+++ b/tools/python/xen/xend/server/DevController.py
@@ -19,7 +19,7 @@
from threading import Event
import types
-from xen.xend import sxp, XendRoot
+from xen.xend import sxp, XendOptions
from xen.xend.XendError import VmError
from xen.xend.XendLogging import log
@@ -50,7 +50,7 @@ xenbusState = {
'Closed' : 6,
}
-xroot = XendRoot.instance()
+xoptions = XendOptions.instance()
xenbusState.update(dict(zip(xenbusState.values(), xenbusState.keys())))
@@ -324,7 +324,7 @@ class DevController:
Make sure that the migration has finished and only
then return from the call.
"""
- tool = xroot.get_external_migration_tool()
+ tool = xoptions.get_external_migration_tool()
if tool:
log.info("Calling external migration tool for step %d" % step)
fd = os.popen("%s -type %s -step %d -host %s -domname %s" %
@@ -341,7 +341,7 @@ class DevController:
""" Recover from device migration. The given step was the
last one that was successfully executed.
"""
- tool = xroot.get_external_migration_tool()
+ tool = xoptions.get_external_migration_tool()
if tool:
log.info("Calling external migration tool")
fd = os.popen("%s -type %s -step %d -host %s -domname %s -recover" %
diff --git a/tools/python/xen/xend/server/SrvRoot.py b/tools/python/xen/xend/server/SrvRoot.py
index 51b4c4126f..bd194f3a38 100644
--- a/tools/python/xen/xend/server/SrvRoot.py
+++ b/tools/python/xen/xend/server/SrvRoot.py
@@ -25,7 +25,7 @@ class SrvRoot(SrvDir):
"""Server sub-components. Each entry is (name, class), where
'name' is the entry name and 'class' is the name of its class.
"""
- #todo Get this list from the XendRoot config.
+ #todo Get this list from the XendOptions config.
subdirs = [
('node', 'SrvNode' ),
('domain', 'SrvDomainDir' ),
diff --git a/tools/python/xen/xend/server/SrvServer.py b/tools/python/xen/xend/server/SrvServer.py
index 82df82e0f8..d4aae088e6 100644
--- a/tools/python/xen/xend/server/SrvServer.py
+++ b/tools/python/xen/xend/server/SrvServer.py
@@ -48,7 +48,7 @@ from threading import Thread
from xen.web.httpserver import HttpServer, UnixHttpServer
-from xen.xend import XendNode, XendRoot, XendAPI
+from xen.xend import XendNode, XendOptions, XendAPI
from xen.xend import Vifctl
from xen.xend.XendLogging import log
from xen.xend.XendClient import XEN_API_SOCKET
@@ -57,7 +57,7 @@ from xen.web.SrvDir import SrvDir
from SrvRoot import SrvRoot
from XMLRPCServer import XMLRPCServer
-xroot = XendRoot.instance()
+xoptions = XendOptions.instance()
class XendServers:
@@ -65,6 +65,7 @@ class XendServers:
def __init__(self, root):
self.servers = []
self.root = root
+ self.running = False
self.cleaningUp = False
self.reloadingConfig = False
@@ -79,6 +80,7 @@ class XendServers:
server.shutdown()
except:
pass
+ self.running = False
def reloadConfig(self, signum = 0, frame = None):
log.debug("SrvServer.reloadConfig()")
@@ -107,13 +109,12 @@ class XendServers:
if server.ready:
continue
- thread = Thread(target=server.run, name=server.__class__.__name__)
- if isinstance(server, HttpServer):
- thread.setDaemon(True)
+ thread = Thread(target=server.run,
+ name=server.__class__.__name__)
+ thread.setDaemon(True)
thread.start()
threads.append(thread)
-
# check for when all threads have initialized themselves and then
# close the status pipe
@@ -143,47 +144,32 @@ class XendServers:
status.close()
status = None
- # Interruptible Thread.join - Python Bug #1167930
- # Replaces: for t in threads: t.join()
- # Reason: The above will cause python signal handlers to be
- # blocked so we're not able to catch SIGTERM in any
- # way for cleanup
- runningThreads = threads
- while len(runningThreads) > 0:
- try:
- for t in threads:
- t.join(1.0)
- runningThreads = [t for t in threads
- if t.isAlive() and not t.isDaemon()]
- if self.cleaningUp and len(runningThreads) > 0:
- log.debug("Waiting for %s." %
- [x.getName() for x in runningThreads])
- except:
- pass
-
+ # loop to keep main thread alive until it receives a SIGTERM
+ self.running = True
+ while self.running:
+ time.sleep(100000000)
+
if self.reloadingConfig:
log.info("Restarting all XML-RPC and Xen-API servers...")
self.cleaningUp = False
self.reloadingConfig = False
- xroot.set_config()
- new_servers = [x for x in self.servers
- if isinstance(x, HttpServer)]
- self.servers = new_servers
+ xoptions.set_config()
+ self.servers = []
_loadConfig(self, self.root, True)
else:
break
def _loadConfig(servers, root, reload):
- if not reload and xroot.get_xend_http_server():
+ if xoptions.get_xend_http_server():
servers.add(HttpServer(root,
- xroot.get_xend_address(),
- xroot.get_xend_port()))
- if not reload and xroot.get_xend_unix_server():
- path = xroot.get_xend_unix_path()
+ xoptions.get_xend_address(),
+ xoptions.get_xend_port()))
+ if xoptions.get_xend_unix_server():
+ path = xoptions.get_xend_unix_path()
log.info('unix path=' + path)
servers.add(UnixHttpServer(root, path))
- api_cfg = xroot.get_xen_api_server()
+ api_cfg = xoptions.get_xen_api_server()
if api_cfg:
try:
addrs = [(str(x[0]).split(':'),
@@ -218,10 +204,10 @@ def _loadConfig(servers, root, reload):
except TypeError, exn:
log.error('Xen-API server configuration %s is invalid.', api_cfg)
- if xroot.get_xend_tcp_xmlrpc_server():
+ if xoptions.get_xend_tcp_xmlrpc_server():
servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False, True))
- if xroot.get_xend_unix_xmlrpc_server():
+ if xoptions.get_xend_unix_xmlrpc_server():
servers.add(XMLRPCServer(XendAPI.AUTH_PAM, False))
diff --git a/tools/python/xen/xend/server/XMLRPCServer.py b/tools/python/xen/xend/server/XMLRPCServer.py
index 05646fcea6..130da9aea8 100644
--- a/tools/python/xen/xend/server/XMLRPCServer.py
+++ b/tools/python/xen/xend/server/XMLRPCServer.py
@@ -179,21 +179,24 @@ class XMLRPCServer:
# Custom runloop so we can cleanup when exiting.
# -----------------------------------------------------------------
try:
- self.server.socket.settimeout(1.0)
while self.running:
self.server.handle_request()
finally:
- self.cleanup()
+ self.shutdown()
def cleanup(self):
- log.debug("XMLRPCServer.cleanup()")
+ log.debug('XMLRPCServer.cleanup()')
try:
- self.server.socket.close()
+ if hasattr(self, 'server'):
+ # shutdown socket explicitly to allow reuse
+ self.server.socket.shutdown(socket.SHUT_RDWR)
+ self.server.socket.close()
except Exception, exn:
log.exception(exn)
pass
def shutdown(self):
self.running = False
- self.ready = False
-
+ if self.ready:
+ self.ready = False
+ self.cleanup()
diff --git a/tools/python/xen/xend/server/netif.py b/tools/python/xen/xend/server/netif.py
index c229ed7b20..bc755e9c9d 100644
--- a/tools/python/xen/xend/server/netif.py
+++ b/tools/python/xen/xend/server/netif.py
@@ -24,10 +24,10 @@ import os
import random
import re
-from xen.xend import XendRoot
+from xen.xend import XendOptions
from xen.xend.server.DevController import DevController
-xroot = XendRoot.instance()
+xoptions = XendOptions.instance()
def randomMAC():
"""Generate a random MAC address.
@@ -138,8 +138,8 @@ class NetifController(DevController):
def getDeviceDetails(self, config):
"""@see DevController.getDeviceDetails"""
- script = os.path.join(xroot.network_script_dir,
- config.get('script', xroot.get_vif_script()))
+ script = os.path.join(xoptions.network_script_dir,
+ config.get('script', xoptions.get_vif_script()))
typ = config.get('type')
bridge = config.get('bridge')
mac = config.get('mac')
@@ -150,9 +150,8 @@ class NetifController(DevController):
devid = self.allocateDeviceID()
- # The default type is 'netfront'.
if not typ:
- typ = 'netfront'
+ typ = xoptions.netback_type
if not mac:
mac = randomMAC()
@@ -190,7 +189,7 @@ class NetifController(DevController):
(script, ip, bridge, mac, typ, vifname, rate, uuid) = devinfo
if script:
- network_script_dir = xroot.network_script_dir + os.sep
+ network_script_dir = xoptions.network_script_dir + os.sep
result['script'] = script.replace(network_script_dir, "")
if ip:
result['ip'] = ip
diff --git a/tools/python/xen/xend/server/relocate.py b/tools/python/xen/xend/server/relocate.py
index 1b7ed157e9..3cdd43a5eb 100644
--- a/tools/python/xen/xend/server/relocate.py
+++ b/tools/python/xen/xend/server/relocate.py
@@ -24,7 +24,7 @@ from xen.web import protocol, tcp, unix
from xen.xend import sxp
from xen.xend import XendDomain
-from xen.xend import XendRoot
+from xen.xend import XendOptions
from xen.xend.XendError import XendError
from xen.xend.XendLogging import log
@@ -114,15 +114,15 @@ class RelocationProtocol(protocol.Protocol):
def listenRelocation():
- xroot = XendRoot.instance()
- if xroot.get_xend_unix_server():
+ xoptions = XendOptions.instance()
+ if xoptions.get_xend_unix_server():
path = '/var/lib/xend/relocation-socket'
unix.UnixListener(path, RelocationProtocol)
- if xroot.get_xend_relocation_server():
- port = xroot.get_xend_relocation_port()
- interface = xroot.get_xend_relocation_address()
+ if xoptions.get_xend_relocation_server():
+ port = xoptions.get_xend_relocation_port()
+ interface = xoptions.get_xend_relocation_address()
- hosts_allow = xroot.get_xend_relocation_hosts_allow()
+ hosts_allow = xoptions.get_xend_relocation_hosts_allow()
if hosts_allow == '':
hosts_allow = None
else:
diff --git a/tools/python/xen/xend/server/tests/test_controllers.py b/tools/python/xen/xend/server/tests/test_controllers.py
index aa57f6321c..dd2677500d 100644
--- a/tools/python/xen/xend/server/tests/test_controllers.py
+++ b/tools/python/xen/xend/server/tests/test_controllers.py
@@ -2,9 +2,9 @@ import os
import re
import unittest
-import xen.xend.XendRoot
+import xen.xend.XendOptions
-xen.xend.XendRoot.XendRoot.config_default = '/dev/null'
+xen.xend.XendOptions.XendOptions.config_default = '/dev/null'
from xen.xend.server import netif
@@ -13,7 +13,7 @@ FAKE_DOMID = 42
FAKE_DEVID = 63
-xroot = xen.xend.XendRoot.instance()
+xoptions = xen.xend.XendOptions.instance()
class test_controllers(unittest.TestCase):
@@ -36,8 +36,8 @@ class test_controllers(unittest.TestCase):
self.assertEqual(backdets['handle'], str(FAKE_DEVID))
self.assertEqual(backdets['script'],
- os.path.join(xroot.network_script_dir,
- xroot.get_vif_script()))
+ os.path.join(xoptions.network_script_dir,
+ xoptions.get_vif_script()))
self.assertValidMac(backdets['mac'], expectedMac)
self.assertEqual(frontdets['handle'], str(FAKE_DEVID))
diff --git a/tools/python/xen/xend/server/tpmif.py b/tools/python/xen/xend/server/tpmif.py
index c2a5f1f64b..b977110a43 100644
--- a/tools/python/xen/xend/server/tpmif.py
+++ b/tools/python/xen/xend/server/tpmif.py
@@ -20,7 +20,7 @@
"""Support for virtual TPM interfaces."""
-from xen.xend import XendRoot
+from xen.xend import XendOptions
from xen.xend.XendLogging import log
from xen.xend.XendError import XendError
from xen.xend.XendConstants import DEV_MIGRATE_TEST, VTPM_DELETE_SCRIPT
@@ -29,7 +29,7 @@ from xen.xend.server.DevController import DevController
import os
import re
-xroot = XendRoot.instance()
+xoptions = XendOptions.instance()
def destroy_vtpmstate(name):
if os.path.exists(VTPM_DELETE_SCRIPT):
@@ -88,7 +88,7 @@ class TPMifController(DevController):
def migrate(self, deviceConfig, network, dst, step, domName):
"""@see DevContoller.migrate"""
if network:
- tool = xroot.get_external_migration_tool()
+ tool = xoptions.get_external_migration_tool()
if tool != '':
log.info("Request to network-migrate device to %s. step=%d.",
dst, step)
@@ -116,7 +116,7 @@ class TPMifController(DevController):
def recover_migrate(self, deviceConfig, network, dst, step, domName):
"""@see DevContoller.recover_migrate"""
if network:
- tool = xroot.get_external_migration_tool()
+ tool = xoptions.get_external_migration_tool()
if tool != '':
log.info("Request to recover network-migrated device. last good step=%d.",
step)
diff --git a/tools/python/xen/xend/server/vfbif.py b/tools/python/xen/xend/server/vfbif.py
index 816dcd5a6f..dfa94c2989 100644
--- a/tools/python/xen/xend/server/vfbif.py
+++ b/tools/python/xen/xend/server/vfbif.py
@@ -52,7 +52,7 @@ class VfbifController(DevController):
if config.has_key("vncpasswd"):
passwd = config["vncpasswd"]
else:
- passwd = xen.xend.XendRoot.instance().get_vncpasswd_default()
+ passwd = xen.xend.XendOptions.instance().get_vncpasswd_default()
if passwd:
self.vm.storeVm("vncpasswd", passwd)
log.debug("Stored a VNC password for vfb access")
@@ -66,7 +66,7 @@ class VfbifController(DevController):
elif config.has_key("vncdisplay"):
args += ["--vncport", "%d" % (5900 + int(config["vncdisplay"]))]
vnclisten = config.get("vnclisten",
- xen.xend.XendRoot.instance().get_vnclisten_address())
+ xen.xend.XendOptions.instance().get_vnclisten_address())
args += [ "--listen", vnclisten ]
spawn_detached(args[0], args + std_args, os.environ)
elif t == "sdl":
diff --git a/tools/python/xen/xm/create.py b/tools/python/xen/xm/create.py
index e0fe857885..fb42f44c64 100644
--- a/tools/python/xen/xm/create.py
+++ b/tools/python/xen/xm/create.py
@@ -190,6 +190,10 @@ gopts.var('vcpus', val='VCPUS',
fn=set_int, default=1,
use="# of Virtual CPUS in domain.")
+gopts.var('vcpu_avail', val='VCPUS',
+ fn=set_long, default=None,
+ use="Bitmask for virtual CPUs to make available immediately.")
+
gopts.var('cpu_cap', val='CAP',
fn=set_int, default=None,
use="""Set the maximum amount of cpu.
@@ -740,7 +744,7 @@ def make_config(vals):
map(add_conf, ['name', 'memory', 'maxmem', 'shadow_memory',
'restart', 'on_poweroff',
- 'on_reboot', 'on_crash', 'vcpus', 'features',
+ 'on_reboot', 'on_crash', 'vcpus', 'vcpu_avail', 'features',
'on_xend_start', 'on_xend_stop'])
if vals.uuid is not None:
diff --git a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py
index 87a0602e2d..95990f8f62 100644
--- a/tools/python/xen/xm/main.py
+++ b/tools/python/xen/xm/main.py
@@ -693,12 +693,15 @@ def parse_doms_info(info):
up_time = time.time() - start_time
return {
- 'domid' : get_info('domid', str, ''),
- 'name' : get_info('name', str, '??'),
+ 'domid' : get_info('domid', str, ''),
+ 'name' : get_info('name', str, '??'),
'mem' : get_info('memory_dynamic_min', int, 0),
- 'vcpus' : get_info('online_vcpus', int, 0),
- 'state' : get_info('state', str, ''),
- 'cpu_time' : get_info('cpu_time', float, 0),
+ 'state' : get_info('state', str, ''),
+ 'cpu_time' : get_info('cpu_time', float, 0.0),
+ # VCPUs is the number online when the VM is up, or the number
+ # configured otherwise.
+ 'vcpus' : get_info('online_vcpus', int,
+ get_info('vcpus', int, 0)),
'up_time' : up_time,
'seclabel' : security.get_security_printlabel(info),
}
diff --git a/tools/python/xen/xm/opts.py b/tools/python/xen/xm/opts.py
index 7dd28bfa4e..1b80ceeff8 100644
--- a/tools/python/xen/xm/opts.py
+++ b/tools/python/xen/xm/opts.py
@@ -571,6 +571,14 @@ def set_int(opt, k, v):
opt.opts.err('Invalid value: ' + str(v))
opt.set(v)
+def set_long(opt, k, v):
+ """Set an option to a long integer value."""
+ try:
+ v = long(v)
+ except:
+ opt.opts.err('Invalid value: ' + str(v))
+ opt.set(v)
+
def set_float(opt, k, v):
"""Set an option to a float value."""
try:
diff --git a/tools/python/xen/xm/tests/test_create.py b/tools/python/xen/xm/tests/test_create.py
index ce8e192413..a25ebb1fb0 100644
--- a/tools/python/xen/xm/tests/test_create.py
+++ b/tools/python/xen/xm/tests/test_create.py
@@ -3,9 +3,9 @@ import os.path
import tempfile
import unittest
-import xen.xend.XendRoot
+import xen.xend.XendOptions
-xen.xend.XendRoot.XendRoot.config_default = '/dev/null'
+xen.xend.XendOptions.XendOptions.config_default = '/dev/null'
import xen.xm.create
diff --git a/tools/xcutils/xc_restore.c b/tools/xcutils/xc_restore.c
index c8b2b89331..8e26d9bb70 100644
--- a/tools/xcutils/xc_restore.c
+++ b/tools/xcutils/xc_restore.c
@@ -19,12 +19,13 @@ int
main(int argc, char **argv)
{
unsigned int xc_fd, io_fd, domid, nr_pfns, store_evtchn, console_evtchn;
+ unsigned int hvm, pae, apic;
int ret;
unsigned long store_mfn, console_mfn;
- if (argc != 6)
+ if (argc != 9)
errx(1,
- "usage: %s iofd domid nr_pfns store_evtchn console_evtchn",
+ "usage: %s iofd domid nr_pfns store_evtchn console_evtchn hvm pae apic",
argv[0]);
xc_fd = xc_interface_open();
@@ -36,9 +37,19 @@ main(int argc, char **argv)
nr_pfns = atoi(argv[3]);
store_evtchn = atoi(argv[4]);
console_evtchn = atoi(argv[5]);
+ hvm = atoi(argv[6]);
+ pae = atoi(argv[7]);
+ apic = atoi(argv[8]);
+
+ if (hvm) {
+ /* pass the memsize to xc_hvm_restore to find the store_mfn */
+ store_mfn = hvm;
+ ret = xc_hvm_restore(xc_fd, io_fd, domid, nr_pfns, store_evtchn,
+ &store_mfn, console_evtchn, &console_mfn, pae, apic);
+ } else
+ ret = xc_linux_restore(xc_fd, io_fd, domid, nr_pfns, store_evtchn,
+ &store_mfn, console_evtchn, &console_mfn);
- ret = xc_linux_restore(xc_fd, io_fd, domid, nr_pfns, store_evtchn,
- &store_mfn, console_evtchn, &console_mfn);
if (ret == 0) {
printf("store-mfn %li\n", store_mfn);
printf("console-mfn %li\n", console_mfn);
diff --git a/tools/xcutils/xc_save.c b/tools/xcutils/xc_save.c
index c9be3454d4..0c9530da4d 100644
--- a/tools/xcutils/xc_save.c
+++ b/tools/xcutils/xc_save.c
@@ -51,7 +51,10 @@ main(int argc, char **argv)
max_f = atoi(argv[4]);
flags = atoi(argv[5]);
- ret = xc_linux_save(xc_fd, io_fd, domid, maxit, max_f, flags, &suspend);
+ if (flags & XCFLAGS_HVM)
+ ret = xc_hvm_save(xc_fd, io_fd, domid, maxit, max_f, flags, &suspend);
+ else
+ ret = xc_linux_save(xc_fd, io_fd, domid, maxit, max_f, flags, &suspend);
xc_interface_close(xc_fd);
diff --git a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
index 409783c0c9..321a166d05 100644
--- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
+++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
@@ -182,12 +182,17 @@ static int get_hypercall_stubs(void)
static uint64_t get_callback_via(struct pci_dev *pdev)
{
#ifdef __ia64__
- int irq;
+ int irq, rid;
for (irq = 0; irq < 16; irq++) {
if (isa_irq_to_vector(irq) == pdev->irq)
return irq;
}
- return 0;
+ /* use Requester-ID as callback_irq */
+ /* RID: '<#bus(8)><#dev(5)><#func(3)>' (cf. PCI-Express spec) */
+ rid = ((pdev->bus->number & 0xff) << 8) | pdev->devfn;
+ printk(KERN_INFO DRV_NAME ":use Requester-ID(%04x) as callback irq\n",
+ rid);
+ return rid | IA64_CALLBACK_IRQ_RID;
#else /* !__ia64__ */
if (pdev->irq < 16)
return pdev->irq; /* ISA IRQ */
diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c
index e0f502f662..571f396e96 100644
--- a/xen/arch/ia64/asm-offsets.c
+++ b/xen/arch/ia64/asm-offsets.c
@@ -56,10 +56,12 @@ void foo(void)
DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct vcpu, arch._thread.on_ustack));
DEFINE(IA64_VCPU_DOMAIN_OFFSET, offsetof (struct vcpu, domain));
+ DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu, arch.hypercall_continuation));
DEFINE(IA64_VCPU_META_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_rr0));
DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
+ DEFINE(IA64_VCPU_EVENT_CALLBACK_IP_OFFSET, offsetof (struct vcpu, arch.event_callback_ip));
DEFINE(IA64_VCPU_IRR0_OFFSET, offsetof (struct vcpu, arch.irr[0]));
DEFINE(IA64_VCPU_IRR3_OFFSET, offsetof (struct vcpu, arch.irr[3]));
DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
diff --git a/xen/arch/ia64/asm-xsi-offsets.c b/xen/arch/ia64/asm-xsi-offsets.c
index c4bfdf2a3c..e3696909da 100755
--- a/xen/arch/ia64/asm-xsi-offsets.c
+++ b/xen/arch/ia64/asm-xsi-offsets.c
@@ -62,7 +62,7 @@ void foo(void)
DEFINE_MAPPED_REG_OFS(XSI_ITV_OFS, itv);
DEFINE_MAPPED_REG_OFS(XSI_PTA_OFS, pta);
DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
- DEFINE_MAPPED_REG_OFS(XSI_INCOMPL_REGFR_OFS, incomplete_regframe);
+ DEFINE_MAPPED_REG_OFS(XSI_VPSR_PP_OFS, vpsr_pp);
DEFINE_MAPPED_REG_OFS(XSI_METAPHYS_OFS, metaphysical_mode);
DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
diff --git a/xen/arch/ia64/linux-xen/Makefile b/xen/arch/ia64/linux-xen/Makefile
index 980ea8e746..f4ee8c396b 100644
--- a/xen/arch/ia64/linux-xen/Makefile
+++ b/xen/arch/ia64/linux-xen/Makefile
@@ -1,3 +1,6 @@
+subdir-y += sn
+
+obj-y += cmdline.o
obj-y += efi.o
obj-y += entry.o
obj-y += irq_ia64.o
diff --git a/xen/arch/ia64/linux-xen/README.origin b/xen/arch/ia64/linux-xen/README.origin
index e667b05603..bfc3981e15 100644
--- a/xen/arch/ia64/linux-xen/README.origin
+++ b/xen/arch/ia64/linux-xen/README.origin
@@ -5,6 +5,7 @@
# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
# easily updated to future versions of the corresponding Linux files.
+cmdline.c -> linux/lib/cmdline.c
efi.c -> linux/arch/ia64/kernel/efi.c
entry.h -> linux/arch/ia64/kernel/entry.h
entry.S -> linux/arch/ia64/kernel/entry.S
diff --git a/xen/arch/ia64/linux/cmdline.c b/xen/arch/ia64/linux-xen/cmdline.c
index b6dddcd808..48e6de1ccf 100644
--- a/xen/arch/ia64/linux/cmdline.c
+++ b/xen/arch/ia64/linux-xen/cmdline.c
@@ -15,7 +15,9 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#ifdef XEN
#include <xen/lib.h>
+#endif
/**
@@ -38,7 +40,11 @@ int get_option (char **str, int *pint)
if (!cur || !(*cur))
return 0;
+#ifndef XEN
*pint = simple_strtol (cur, str, 0);
+#else
+ *pint = simple_strtol (cur, (const char**)str, 0);
+#endif
if (cur == *str)
return 0;
if (**str == ',') {
@@ -96,7 +102,11 @@ char *get_options(const char *str, int nints, int *ints)
unsigned long long memparse (char *ptr, char **retptr)
{
+#ifndef XEN
unsigned long long ret = simple_strtoull (ptr, retptr, 0);
+#else
+ unsigned long long ret = simple_strtoull (ptr, (const char**)retptr, 0);
+#endif
switch (**retptr) {
case 'G':
diff --git a/xen/arch/ia64/linux-xen/entry.S b/xen/arch/ia64/linux-xen/entry.S
index 2c73fc5d2b..0f6aaf80d7 100644
--- a/xen/arch/ia64/linux-xen/entry.S
+++ b/xen/arch/ia64/linux-xen/entry.S
@@ -676,7 +676,9 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
mov r10=r0 // clear error indication in r10
+#ifndef XEN
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
+#endif
END(ia64_ret_from_syscall)
// fall through
/*
@@ -764,7 +766,9 @@ ENTRY(ia64_leave_syscall)
ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
nop.i 0
;;
+#ifndef XEN
mov r16=ar.bsp // M2 get existing backing store pointer
+#endif
ld8 r18=[r2],PT(R9)-PT(B6) // load b6
#ifndef XEN
(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
@@ -814,7 +818,11 @@ ENTRY(ia64_leave_syscall)
mov f8=f0 // F clear f8
;;
ld8.fill r12=[r2] // M0|1 restore r12 (sp)
+#ifdef XEN
+ ld8.fill r2=[r3] // M0|1
+#else
ld8.fill r15=[r3] // M0|1 restore r15
+#endif
mov b6=r18 // I0 restore b6
#ifdef XEN
@@ -827,7 +835,9 @@ ENTRY(ia64_leave_syscall)
srlz.d // M0 ensure interruption collection is off (for cover)
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
+#ifndef XEN
cover // B add current frame into dirty partition & set cr.ifs
+#endif
;;
(pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8
mov r19=ar.bsp // M2 get new backing store pointer
@@ -893,24 +903,19 @@ GLOBAL_ENTRY(ia64_leave_kernel)
.work_processed_kernel:
#ifdef XEN
;;
-(pUStk) ssm psr.i
-(pUStk) br.call.sptk.many b0=do_softirq
-(pUStk) rsm psr.i
- ;;
- alloc loc0=ar.pfs,0,1,1,0
- adds out0=16,r12
+(pUStk) ssm psr.i
+(pUStk) br.call.sptk.many b0=do_softirq
+(pUStk) ssm psr.i
+ ;;
+(pUStk) br.call.sptk.many b0=reflect_event
+ ;;
adds r7 = PT(EML_UNAT)+16,r12
;;
ld8 r7 = [r7]
;;
-(pUStk) br.call.sptk.many b0=reflect_event
-//(pUStk) br.call.sptk.many b0=deliver_pending_interrupt
- ;;
- mov ar.pfs=loc0
mov ar.unat=r7 /* load eml_unat */
mov r31=r0
-
#else
adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
@@ -1184,8 +1189,11 @@ skip_rbs_switch:
mov cr.ipsr=r29 // M2
mov ar.pfs=r26 // I0
(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
-
+#ifdef XEN
+ mov cr.ifs=r30 // M2
+#else
(p9) mov cr.ifs=r30 // M2
+#endif
mov b0=r21 // I0
(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
@@ -1195,7 +1203,11 @@ skip_rbs_switch:
;;
(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
nop 0
+#ifdef XEN
+(pLvSys)mov r15=r0
+#else
(pLvSys)mov r2=r0
+#endif
mov ar.rsc=r27 // M2
mov pr=r31,-1 // I0
@@ -1459,7 +1471,89 @@ GLOBAL_ENTRY(unw_init_running)
br.ret.sptk.many rp
END(unw_init_running)
-#ifndef XEN
+#ifdef XEN
+GLOBAL_ENTRY(ia64_do_multicall_call)
+ movl r2=ia64_hypercall_table;;
+ shladd r2=r38,3,r2;;
+ ld8 r2=[r2];;
+ mov b6=r2
+ br.sptk.many b6;;
+END(ia64_do_multicall_call)
+
+
+ .rodata
+ .align 8
+ .globl ia64_hypercall_table
+ia64_hypercall_table:
+ data8 do_ni_hypercall /* do_set_trap_table *//* 0 */
+ data8 do_ni_hypercall /* do_mmu_update */
+ data8 do_ni_hypercall /* do_set_gdt */
+ data8 do_ni_hypercall /* do_stack_switch */
+ data8 do_ni_hypercall /* do_set_callbacks */
+ data8 do_ni_hypercall /* do_fpu_taskswitch *//* 5 */
+ data8 do_sched_op_compat
+ data8 do_ni_hypercall
+ data8 do_ni_hypercall /* do_set_debugreg */
+ data8 do_ni_hypercall /* do_get_debugreg */
+ data8 do_ni_hypercall /* do_update_descriptor * 10 */
+ data8 do_ni_hypercall /* do_ni_hypercall */
+ data8 do_memory_op
+ data8 do_multicall
+ data8 do_ni_hypercall /* do_update_va_mapping */
+ data8 do_ni_hypercall /* do_set_timer_op */ /* 15 */
+ data8 do_ni_hypercall
+ data8 do_xen_version
+ data8 do_console_io
+ data8 do_ni_hypercall
+ data8 do_grant_table_op /* 20 */
+ data8 do_ni_hypercall /* do_vm_assist */
+ data8 do_ni_hypercall /* do_update_va_mapping_othe */
+ data8 do_ni_hypercall /* (x86 only) */
+ data8 do_ni_hypercall /* do_vcpu_op */
+ data8 do_ni_hypercall /* (x86_64 only) */ /* 25 */
+ data8 do_ni_hypercall /* do_mmuext_op */
+ data8 do_ni_hypercall /* do_acm_op */
+ data8 do_ni_hypercall /* do_nmi_op */
+ data8 do_sched_op
+ data8 do_callback_op /* */ /* 30 */
+ data8 do_xenoprof_op /* */
+ data8 do_event_channel_op
+ data8 do_physdev_op
+ data8 do_hvm_op /* */
+ data8 do_sysctl /* */ /* 35 */
+ data8 do_domctl /* */
+ data8 do_ni_hypercall /* */
+ data8 do_ni_hypercall /* */
+ data8 do_ni_hypercall /* */
+ data8 do_ni_hypercall /* */ /* 40 */
+ data8 do_ni_hypercall /* */
+ data8 do_ni_hypercall /* */
+ data8 do_ni_hypercall /* */
+ data8 do_ni_hypercall /* */
+ data8 do_ni_hypercall /* */ /* 45 */
+ data8 do_ni_hypercall /* */
+ data8 do_ni_hypercall /* */
+ data8 do_dom0vp_op /* dom0vp_op */
+ data8 do_pirq_guest_eoi /* arch_1 */
+ data8 do_ni_hypercall /* arch_2 */ /* 50 */
+ data8 do_ni_hypercall /* arch_3 */
+ data8 do_ni_hypercall /* arch_4 */
+ data8 do_ni_hypercall /* arch_5 */
+ data8 do_ni_hypercall /* arch_6 */
+ data8 do_ni_hypercall /* arch_7 */ /* 55 */
+ data8 do_ni_hypercall
+ data8 do_ni_hypercall
+ data8 do_ni_hypercall
+ data8 do_ni_hypercall
+ data8 do_ni_hypercall /* 60 */
+ data8 do_ni_hypercall
+ data8 do_ni_hypercall
+ data8 do_ni_hypercall
+
+ // guard against failures to increase NR_hypercalls
+ .org ia64_hypercall_table + 8*NR_hypercalls
+
+#else
.rodata
.align 8
.globl sys_call_table
diff --git a/xen/arch/ia64/linux-xen/sn/Makefile b/xen/arch/ia64/linux-xen/sn/Makefile
new file mode 100644
index 0000000000..96dd76f238
--- /dev/null
+++ b/xen/arch/ia64/linux-xen/sn/Makefile
@@ -0,0 +1 @@
+subdir-y += kernel
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/Makefile b/xen/arch/ia64/linux-xen/sn/kernel/Makefile
new file mode 100644
index 0000000000..dc0d142533
--- /dev/null
+++ b/xen/arch/ia64/linux-xen/sn/kernel/Makefile
@@ -0,0 +1,5 @@
+obj-y += sn2_smp.o
+obj-y += setup.o
+obj-y += iomv.o
+obj-y += irq.o
+obj-y += io_init.o
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/README.origin b/xen/arch/ia64/linux-xen/sn/kernel/README.origin
new file mode 100644
index 0000000000..312a90add1
--- /dev/null
+++ b/xen/arch/ia64/linux-xen/sn/kernel/README.origin
@@ -0,0 +1,12 @@
+# Source files in this directory are near-identical copies of linux-2.6.19
+# files:
+
+# NOTE: ALL changes to these files should be clearly marked
+# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
+# easily updated to future versions of the corresponding Linux files.
+
+io_init.c -> linux/arch/ia64/sn/kernel/io_init.c
+iomv.c -> linux/arch/ia64/sn/kernel/iomv.c
+irq.c -> linux/arch/ia64/sn/kernel/irq.c
+setup.c -> linux/arch/ia64/sn/kernel/setup.c
+sn2_smp.c -> linux/arch/ia64/sn/kernel/sn2/sn2_smp.c
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/io_init.c b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c
new file mode 100644
index 0000000000..3f1f5b7a3c
--- /dev/null
+++ b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c
@@ -0,0 +1,783 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/nodemask.h>
+#ifdef XEN
+#include <linux/init.h>
+#endif
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn_feature_sets.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/io.h>
+#include <asm/sn/l1.h>
+#include <asm/sn/module.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#ifndef XEN
+#include <asm/sn/pcidev.h>
+#endif
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn_sal.h>
+#ifndef XEN
+#include <asm/sn/tioca_provider.h>
+#include <asm/sn/tioce_provider.h>
+#endif
+#ifdef XEN
+#include "asm/sn/hubdev.h"
+#include "asm/sn/xwidgetdev.h"
+#else
+#include "xtalk/hubdev.h"
+#include "xtalk/xwidgetdev.h"
+#endif
+
+
+extern void sn_init_cpei_timer(void);
+extern void register_sn_procfs(void);
+#ifdef XEN
+extern void sn_irq_lh_init(void);
+#endif
+
+static struct list_head sn_sysdata_list;
+
+/* sysdata list struct */
+struct sysdata_el {
+ struct list_head entry;
+ void *sysdata;
+};
+
+struct slab_info {
+ struct hubdev_info hubdev;
+};
+
+struct brick {
+ moduleid_t id; /* Module ID of this module */
+ struct slab_info slab_info[MAX_SLABS + 1];
+};
+
+int sn_ioif_inited; /* SN I/O infrastructure initialized? */
+
+struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
+
+#ifndef XEN
+static int max_segment_number; /* Default highest segment number */
+static int max_pcibus_number = 255; /* Default highest pci bus number */
+
+/*
+ * Hooks and struct for unsupported pci providers
+ */
+
+static dma_addr_t
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
+{
+ return 0;
+}
+
+static void
+sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
+{
+ return;
+}
+
+static void *
+sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
+{
+ return NULL;
+}
+
+static struct sn_pcibus_provider sn_pci_default_provider = {
+ .dma_map = sn_default_pci_map,
+ .dma_map_consistent = sn_default_pci_map,
+ .dma_unmap = sn_default_pci_unmap,
+ .bus_fixup = sn_default_pci_bus_fixup,
+};
+#endif
+
+/*
+ * Retrieve the DMA Flush List given nasid, widget, and device.
+ * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
+ */
+static inline u64
+sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
+ u64 address)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
+ (u64) nasid, (u64) widget_num,
+ (u64) device_num, (u64) address, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+/*
+ * Retrieve the hub device info structure for the given nasid.
+ */
+static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
+ (u64) handle, (u64) address, 0, 0, 0, 0, 0);
+ return ret_stuff.v0;
+}
+
+/*
+ * Retrieve the pci bus information given the bus number.
+ */
+static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
+ (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
+ return ret_stuff.v0;
+}
+
+#ifndef XEN
+/*
+ * Retrieve the pci device information given the bus and device|function number.
+ */
+static inline u64
+sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
+ u64 sn_irq_info)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
+ (u64) segment, (u64) bus_number, (u64) devfn,
+ (u64) pci_dev,
+ sn_irq_info, 0, 0);
+ return ret_stuff.v0;
+}
+
+/*
+ * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
+ * device.
+ */
+inline struct pcidev_info *
+sn_pcidev_info_get(struct pci_dev *dev)
+{
+ struct pcidev_info *pcidev;
+
+ list_for_each_entry(pcidev,
+ &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
+ if (pcidev->pdi_linux_pcidev == dev) {
+ return pcidev;
+ }
+ }
+ return NULL;
+}
+
+/* Older PROM flush WAR
+ *
+ * 01/16/06 -- This war will be in place until a new official PROM is released.
+ * Additionally note that the struct sn_flush_device_war also has to be
+ * removed from arch/ia64/sn/include/xtalk/hubdev.h
+ */
+static u8 war_implemented = 0;
+
+static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
+ struct sn_flush_device_common *common)
+{
+ struct sn_flush_device_war *war_list;
+ struct sn_flush_device_war *dev_entry;
+ struct ia64_sal_retval isrv = {0,0,0,0};
+
+ if (!war_implemented) {
+ printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
+ "PROM flush WAR\n");
+ war_implemented = 1;
+ }
+
+ war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
+ if (!war_list)
+ BUG();
+
+ SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+ nasid, widget, __pa(war_list), 0, 0, 0 ,0);
+ if (isrv.status)
+ panic("sn_device_fixup_war failed: %s\n",
+ ia64_sal_strerror(isrv.status));
+
+ dev_entry = war_list + device;
+ memcpy(common,dev_entry, sizeof(*common));
+ kfree(war_list);
+
+ return isrv.status;
+}
+
+/*
+ * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
+ * each node in the system.
+ */
+static void __init sn_fixup_ionodes(void)
+{
+ struct sn_flush_device_kernel *sn_flush_device_kernel;
+ struct sn_flush_device_kernel *dev_entry;
+ struct hubdev_info *hubdev;
+ u64 status;
+ u64 nasid;
+ int i, widget, device, size;
+
+ /*
+ * Get SGI Specific HUB chipset information.
+ * Inform Prom that this kernel can support domain bus numbering.
+ */
+ for (i = 0; i < num_cnodes; i++) {
+ hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
+ nasid = cnodeid_to_nasid(i);
+ hubdev->max_segment_number = 0xffffffff;
+ hubdev->max_pcibus_number = 0xff;
+ status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
+ if (status)
+ continue;
+
+ /* Save the largest Domain and pcibus numbers found. */
+ if (hubdev->max_segment_number) {
+ /*
+ * Dealing with a Prom that supports segments.
+ */
+ max_segment_number = hubdev->max_segment_number;
+ max_pcibus_number = hubdev->max_pcibus_number;
+ }
+
+ /* Attach the error interrupt handlers */
+ if (nasid & 1)
+ ice_error_init(hubdev);
+ else
+ hub_error_init(hubdev);
+
+ for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
+ hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
+
+ if (!hubdev->hdi_flush_nasid_list.widget_p)
+ continue;
+
+ size = (HUB_WIDGET_ID_MAX + 1) *
+ sizeof(struct sn_flush_device_kernel *);
+ hubdev->hdi_flush_nasid_list.widget_p =
+ kzalloc(size, GFP_KERNEL);
+ if (!hubdev->hdi_flush_nasid_list.widget_p)
+ BUG();
+
+ for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
+ size = DEV_PER_WIDGET *
+ sizeof(struct sn_flush_device_kernel);
+ sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
+ if (!sn_flush_device_kernel)
+ BUG();
+
+ dev_entry = sn_flush_device_kernel;
+ for (device = 0; device < DEV_PER_WIDGET;
+ device++,dev_entry++) {
+ size = sizeof(struct sn_flush_device_common);
+ dev_entry->common = kzalloc(size, GFP_KERNEL);
+ if (!dev_entry->common)
+ BUG();
+
+ if (sn_prom_feature_available(
+ PRF_DEVICE_FLUSH_LIST))
+ status = sal_get_device_dmaflush_list(
+ nasid, widget, device,
+ (u64)(dev_entry->common));
+ else
+#ifdef XEN
+ BUG();
+#else
+ status = sn_device_fixup_war(nasid,
+ widget, device,
+ dev_entry->common);
+#endif
+ if (status != SALRET_OK)
+ panic("SAL call failed: %s\n",
+ ia64_sal_strerror(status));
+
+ spin_lock_init(&dev_entry->sfdl_flush_lock);
+ }
+
+ if (sn_flush_device_kernel)
+ hubdev->hdi_flush_nasid_list.widget_p[widget] =
+ sn_flush_device_kernel;
+ }
+ }
+}
+
+/*
+ * sn_pci_window_fixup() - Create a pci_window for each device resource.
+ * Until ACPI support is added, we need this code
+ * to setup pci_windows for use by
+ * pcibios_bus_to_resource(),
+ * pcibios_resource_to_bus(), etc.
+ */
+static void
+sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
+ s64 * pci_addrs)
+{
+ struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
+ unsigned int i;
+ unsigned int idx;
+ unsigned int new_count;
+ struct pci_window *new_window;
+
+ if (count == 0)
+ return;
+ idx = controller->windows;
+ new_count = controller->windows + count;
+ new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
+ if (new_window == NULL)
+ BUG();
+ if (controller->window) {
+ memcpy(new_window, controller->window,
+ sizeof(struct pci_window) * controller->windows);
+ kfree(controller->window);
+ }
+
+ /* Setup a pci_window for each device resource. */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ if (pci_addrs[i] == -1)
+ continue;
+
+ new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
+ new_window[idx].resource = dev->resource[i];
+ idx++;
+ }
+
+ controller->windows = new_count;
+ controller->window = new_window;
+}
+
+void sn_pci_unfixup_slot(struct pci_dev *dev)
+{
+ struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
+
+ sn_irq_unfixup(dev);
+ pci_dev_put(host_pci_dev);
+ pci_dev_put(dev);
+}
+
+/*
+ * sn_pci_fixup_slot() - This routine sets up a slot's resources
+ * consistent with the Linux PCI abstraction layer. Resources acquired
+ * from our PCI provider include PIO maps to BAR space and interrupt
+ * objects.
+ */
+void sn_pci_fixup_slot(struct pci_dev *dev)
+{
+ unsigned int count = 0;
+ int idx;
+ int segment = pci_domain_nr(dev->bus);
+ int status = 0;
+ struct pcibus_bussoft *bs;
+ struct pci_bus *host_pci_bus;
+ struct pci_dev *host_pci_dev;
+ struct pcidev_info *pcidev_info;
+ s64 pci_addrs[PCI_ROM_RESOURCE + 1];
+ struct sn_irq_info *sn_irq_info;
+ unsigned long size;
+ unsigned int bus_no, devfn;
+
+ pci_dev_get(dev); /* for the sysdata pointer */
+ pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
+ if (!pcidev_info)
+ BUG(); /* Cannot afford to run out of memory */
+
+ sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+ if (!sn_irq_info)
+ BUG(); /* Cannot afford to run out of memory */
+
+ /* Call to retrieve pci device information needed by kernel. */
+ status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
+ dev->devfn,
+ (u64) __pa(pcidev_info),
+ (u64) __pa(sn_irq_info));
+ if (status)
+ BUG(); /* Cannot get platform pci device information */
+
+ /* Add pcidev_info to list in sn_pci_controller struct */
+ list_add_tail(&pcidev_info->pdi_list,
+ &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
+
+ /* Copy over PIO Mapped Addresses */
+ for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
+ unsigned long start, end, addr;
+
+ if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
+ pci_addrs[idx] = -1;
+ continue;
+ }
+
+ start = dev->resource[idx].start;
+ end = dev->resource[idx].end;
+ size = end - start;
+ if (size == 0) {
+ pci_addrs[idx] = -1;
+ continue;
+ }
+ pci_addrs[idx] = start;
+ count++;
+ addr = pcidev_info->pdi_pio_mapped_addr[idx];
+ addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
+ dev->resource[idx].start = addr;
+ dev->resource[idx].end = addr + size;
+ if (dev->resource[idx].flags & IORESOURCE_IO)
+ dev->resource[idx].parent = &ioport_resource;
+ else
+ dev->resource[idx].parent = &iomem_resource;
+ }
+ /* Create a pci_window in the pci_controller struct for
+ * each device resource.
+ */
+ if (count > 0)
+ sn_pci_window_fixup(dev, count, pci_addrs);
+
+ /*
+ * Using the PROMs values for the PCI host bus, get the Linux
+ * PCI host_pci_dev struct and set up host bus linkages
+ */
+
+ bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
+ devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
+ host_pci_bus = pci_find_bus(segment, bus_no);
+ host_pci_dev = pci_get_slot(host_pci_bus, devfn);
+
+ pcidev_info->host_pci_dev = host_pci_dev;
+ pcidev_info->pdi_linux_pcidev = dev;
+ pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
+ bs = SN_PCIBUS_BUSSOFT(dev->bus);
+ pcidev_info->pdi_pcibus_info = bs;
+
+ if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
+ SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
+ } else {
+ SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
+ }
+
+ /* Only set up IRQ stuff if this device has a host bus context */
+ if (bs && sn_irq_info->irq_irq) {
+ pcidev_info->pdi_sn_irq_info = sn_irq_info;
+ dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
+ sn_irq_fixup(dev, sn_irq_info);
+ } else {
+ pcidev_info->pdi_sn_irq_info = NULL;
+ kfree(sn_irq_info);
+ }
+}
+
+/*
+ * sn_pci_controller_fixup() - This routine sets up a bus's resources
+ * consistent with the Linux PCI abstraction layer.
+ */
+void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
+{
+ int status;
+ int nasid, cnode;
+ struct pci_controller *controller;
+ struct sn_pci_controller *sn_controller;
+ struct pcibus_bussoft *prom_bussoft_ptr;
+ struct hubdev_info *hubdev_info;
+ void *provider_soft;
+ struct sn_pcibus_provider *provider;
+
+ status = sal_get_pcibus_info((u64) segment, (u64) busnum,
+ (u64) ia64_tpa(&prom_bussoft_ptr));
+ if (status > 0)
+ return; /*bus # does not exist */
+ prom_bussoft_ptr = __va(prom_bussoft_ptr);
+
+ /* Allocate a sn_pci_controller, which has a pci_controller struct
+ * as the first member.
+ */
+ sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
+ if (!sn_controller)
+ BUG();
+ INIT_LIST_HEAD(&sn_controller->pcidev_info);
+ controller = &sn_controller->pci_controller;
+ controller->segment = segment;
+
+ if (bus == NULL) {
+ bus = pci_scan_bus(busnum, &pci_root_ops, controller);
+ if (bus == NULL)
+ goto error_return; /* error, or bus already scanned */
+ bus->sysdata = NULL;
+ }
+
+ if (bus->sysdata)
+ goto error_return; /* sysdata already alloc'd */
+
+ /*
+ * Per-provider fixup. Copies the contents from prom to local
+ * area and links SN_PCIBUS_BUSSOFT().
+ */
+
+ if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
+ goto error_return; /* unsupported asic type */
+
+ if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
+ goto error_return; /* no further fixup necessary */
+
+ provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
+ if (provider == NULL)
+ goto error_return; /* no provider registerd for this asic */
+
+ bus->sysdata = controller;
+ if (provider->bus_fixup)
+ provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
+ else
+ provider_soft = NULL;
+
+ if (provider_soft == NULL) {
+ /* fixup failed or not applicable */
+ bus->sysdata = NULL;
+ goto error_return;
+ }
+
+ /*
+ * Setup pci_windows for legacy IO and MEM space.
+ * (Temporary until ACPI support is in place.)
+ */
+ controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
+ if (controller->window == NULL)
+ BUG();
+ controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
+ controller->window[0].resource.name = "legacy_io";
+ controller->window[0].resource.flags = IORESOURCE_IO;
+ controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
+ controller->window[0].resource.end =
+ controller->window[0].resource.start + 0xffff;
+ controller->window[0].resource.parent = &ioport_resource;
+ controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
+ controller->window[1].resource.name = "legacy_mem";
+ controller->window[1].resource.flags = IORESOURCE_MEM;
+ controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
+ controller->window[1].resource.end =
+ controller->window[1].resource.start + (1024 * 1024) - 1;
+ controller->window[1].resource.parent = &iomem_resource;
+ controller->windows = 2;
+
+ /*
+ * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
+ * after this point.
+ */
+
+ PCI_CONTROLLER(bus)->platform_data = provider_soft;
+ nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+ SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
+ &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
+
+ /*
+ * If the node information we obtained during the fixup phase is invalid
+ * then set controller->node to -1 (undetermined)
+ */
+ if (controller->node >= num_online_nodes()) {
+ struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
+
+ printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
+ "L_IO=%lx L_MEM=%lx BASE=%lx\n",
+ b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
+ b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
+ printk(KERN_WARNING "on node %d but only %d nodes online."
+ "Association set to undetermined.\n",
+ controller->node, num_online_nodes());
+ controller->node = -1;
+ }
+ return;
+
+error_return:
+
+ kfree(sn_controller);
+ return;
+}
+
+void sn_bus_store_sysdata(struct pci_dev *dev)
+{
+ struct sysdata_el *element;
+
+ element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
+ if (!element) {
+ dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
+ return;
+ }
+ element->sysdata = SN_PCIDEV_INFO(dev);
+ list_add(&element->entry, &sn_sysdata_list);
+}
+
+void sn_bus_free_sysdata(void)
+{
+ struct sysdata_el *element;
+ struct list_head *list, *safe;
+
+ list_for_each_safe(list, safe, &sn_sysdata_list) {
+ element = list_entry(list, struct sysdata_el, entry);
+ list_del(&element->entry);
+ list_del(&(((struct pcidev_info *)
+ (element->sysdata))->pdi_list));
+ kfree(element->sysdata);
+ kfree(element);
+ }
+ return;
+}
+#endif
+
+/*
+ * Ugly hack to get PCI setup until we have a proper ACPI namespace.
+ */
+
+#define PCI_BUSES_TO_SCAN 256
+
+static int __init sn_pci_init(void)
+{
+#ifndef XEN
+ int i, j;
+ struct pci_dev *pci_dev = NULL;
+#endif
+
+ if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
+ return 0;
+
+#ifndef XEN
+ /*
+ * prime sn_pci_provider[]. Individial provider init routines will
+ * override their respective default entries.
+ */
+
+ for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
+ sn_pci_provider[i] = &sn_pci_default_provider;
+
+ pcibr_init_provider();
+ tioca_init_provider();
+ tioce_init_provider();
+#endif
+
+ /*
+ * This is needed to avoid bounce limit checks in the blk layer
+ */
+ ia64_max_iommu_merge_mask = ~PAGE_MASK;
+#ifndef XEN
+ sn_fixup_ionodes();
+#endif
+ sn_irq_lh_init();
+ INIT_LIST_HEAD(&sn_sysdata_list);
+#ifndef XEN
+ sn_init_cpei_timer();
+
+#ifdef CONFIG_PROC_FS
+ register_sn_procfs();
+#endif
+
+ /* busses are not known yet ... */
+ for (i = 0; i <= max_segment_number; i++)
+ for (j = 0; j <= max_pcibus_number; j++)
+ sn_pci_controller_fixup(i, j, NULL);
+
+ /*
+ * Generic Linux PCI Layer has created the pci_bus and pci_dev
+ * structures - time for us to add our SN PLatform specific
+ * information.
+ */
+
+ while ((pci_dev =
+ pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
+ sn_pci_fixup_slot(pci_dev);
+#endif
+
+ sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
+
+ return 0;
+}
+
+/*
+ * hubdev_init_node() - Creates the HUB data structure and link them to it's
+ * own NODE specific data area.
+ */
+void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
+{
+ struct hubdev_info *hubdev_info;
+ int size;
+#ifndef XEN
+ pg_data_t *pg;
+#else
+ struct pglist_data *pg;
+#endif
+
+ size = sizeof(struct hubdev_info);
+
+ if (node >= num_online_nodes()) /* Headless/memless IO nodes */
+ pg = NODE_DATA(0);
+ else
+ pg = NODE_DATA(node);
+
+ hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
+
+ npda->pdinfo = (void *)hubdev_info;
+}
+
+geoid_t
+cnodeid_get_geoid(cnodeid_t cnode)
+{
+ struct hubdev_info *hubdev;
+
+ hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+ return hubdev->hdi_geoid;
+}
+
+#ifndef XEN
+void sn_generate_path(struct pci_bus *pci_bus, char *address)
+{
+ nasid_t nasid;
+ cnodeid_t cnode;
+ geoid_t geoid;
+ moduleid_t moduleid;
+ u16 bricktype;
+
+ nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ geoid = cnodeid_get_geoid(cnode);
+ moduleid = geo_module(geoid);
+
+ sprintf(address, "module_%c%c%c%c%.2d",
+ '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
+ '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
+ '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
+ MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
+
+ /* Tollhouse requires slot id to be displayed */
+ bricktype = MODULE_GET_BTYPE(moduleid);
+ if ((bricktype == L1_BRICKTYPE_191010) ||
+ (bricktype == L1_BRICKTYPE_1932))
+ sprintf(address, "%s^%d", address, geo_slot(geoid));
+}
+#endif
+
+#ifdef XEN
+__initcall(sn_pci_init);
+#else
+subsys_initcall(sn_pci_init);
+#endif
+#ifndef XEN
+EXPORT_SYMBOL(sn_pci_fixup_slot);
+EXPORT_SYMBOL(sn_pci_unfixup_slot);
+EXPORT_SYMBOL(sn_pci_controller_fixup);
+EXPORT_SYMBOL(sn_bus_store_sysdata);
+EXPORT_SYMBOL(sn_bus_free_sysdata);
+EXPORT_SYMBOL(sn_generate_path);
+#endif
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/iomv.c b/xen/arch/ia64/linux-xen/sn/kernel/iomv.c
new file mode 100644
index 0000000000..3e38cb05e0
--- /dev/null
+++ b/xen/arch/ia64/linux-xen/sn/kernel/iomv.c
@@ -0,0 +1,82 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#ifndef XEN
+#include <asm/vga.h>
+#endif
+#include <asm/sn/nodepda.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/shub_mmr.h>
+
+#define IS_LEGACY_VGA_IOPORT(p) \
+ (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
+
+#ifdef XEN
+#define vga_console_iobase 0
+#endif
+
+/**
+ * sn_io_addr - convert an in/out port to an i/o address
+ * @port: port to convert
+ *
+ * Legacy in/out instructions are converted to ld/st instructions
+ * on IA64. This routine will convert a port number into a valid
+ * SN i/o address. Used by sn_in*() and sn_out*().
+ */
+void *sn_io_addr(unsigned long port)
+{
+ if (!IS_RUNNING_ON_SIMULATOR()) {
+ if (IS_LEGACY_VGA_IOPORT(port))
+ port += vga_console_iobase;
+ /* On sn2, legacy I/O ports don't point at anything */
+ if (port < (64 * 1024))
+ return NULL;
+ return ((void *)(port | __IA64_UNCACHED_OFFSET));
+ } else {
+ /* but the simulator uses them... */
+ unsigned long addr;
+
+ /*
+ * word align port, but need more than 10 bits
+ * for accessing registers in bedrock local block
+ * (so we don't do port&0xfff)
+ */
+ addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12);
+ if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7)
+ addr |= port;
+ return (void *)addr;
+ }
+}
+
+EXPORT_SYMBOL(sn_io_addr);
+
+/**
+ * __sn_mmiowb - I/O space memory barrier
+ *
+ * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
+ * for details.
+ *
+ * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
+ * See PV 871084 for details about the WAR about zero value.
+ *
+ */
+void __sn_mmiowb(void)
+{
+ volatile unsigned long *adr = pda->pio_write_status_addr;
+ unsigned long val = pda->pio_write_status_val;
+
+ while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
+ cpu_relax();
+}
+
+EXPORT_SYMBOL(__sn_mmiowb);
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/irq.c b/xen/arch/ia64/linux-xen/sn/kernel/irq.c
new file mode 100644
index 0000000000..cc126d71ee
--- /dev/null
+++ b/xen/arch/ia64/linux-xen/sn/kernel/irq.c
@@ -0,0 +1,542 @@
+/*
+ * Platform dependent support for SGI SN
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#ifdef XEN
+#include <linux/pci.h>
+#include <asm/hw_irq.h>
+#endif
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#ifndef XEN
+#include <asm/sn/pcidev.h>
+#endif
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/sn_sal.h>
+
+#ifdef XEN
+#define move_native_irq(foo) do {} while(0)
+#endif
+
+static void force_interrupt(int irq);
+#ifndef XEN
+static void register_intr_pda(struct sn_irq_info *sn_irq_info);
+static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
+#endif
+
+int sn_force_interrupt_flag = 1;
+extern int sn_ioif_inited;
+struct list_head **sn_irq_lh;
+static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
+
+u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info,
+ int req_irq, nasid_t req_nasid,
+ int req_slice)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+ (u64) SAL_INTR_ALLOC, (u64) local_nasid,
+ (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
+ (u64) req_nasid, (u64) req_slice);
+
+ return ret_stuff.status;
+}
+
+void sn_intr_free(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+ (u64) SAL_INTR_FREE, (u64) local_nasid,
+ (u64) local_widget, (u64) sn_irq_info->irq_irq,
+ (u64) sn_irq_info->irq_cookie, 0, 0);
+}
+
+static unsigned int sn_startup_irq(unsigned int irq)
+{
+ return 0;
+}
+
+static void sn_shutdown_irq(unsigned int irq)
+{
+}
+
+static void sn_disable_irq(unsigned int irq)
+{
+}
+
+static void sn_enable_irq(unsigned int irq)
+{
+}
+
+static void sn_ack_irq(unsigned int irq)
+{
+ u64 event_occurred, mask;
+
+ irq = irq & 0xff;
+ event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
+ mask = event_occurred & SH_ALL_INT_MASK;
+ HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
+ __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
+
+ move_native_irq(irq);
+}
+
+static void sn_end_irq(unsigned int irq)
+{
+ int ivec;
+ u64 event_occurred;
+
+ ivec = irq & 0xff;
+ if (ivec == SGI_UART_VECTOR) {
+ event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
+ /* If the UART bit is set here, we may have received an
+ * interrupt from the UART that the driver missed. To
+ * make sure, we IPI ourselves to force us to look again.
+ */
+ if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
+ platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
+ IA64_IPI_DM_INT, 0);
+ }
+ }
+ __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
+ if (sn_force_interrupt_flag)
+ force_interrupt(irq);
+}
+
+#ifndef XEN
+static void sn_irq_info_free(struct rcu_head *head);
+
+struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
+ nasid_t nasid, int slice)
+{
+ int vector;
+ int cpuphys;
+ int64_t bridge;
+ int local_widget, status;
+ nasid_t local_nasid;
+ struct sn_irq_info *new_irq_info;
+ struct sn_pcibus_provider *pci_provider;
+
+ new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+ if (new_irq_info == NULL)
+ return NULL;
+
+ memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
+
+ bridge = (u64) new_irq_info->irq_bridge;
+ if (!bridge) {
+ kfree(new_irq_info);
+ return NULL; /* irq is not a device interrupt */
+ }
+
+ local_nasid = NASID_GET(bridge);
+
+ if (local_nasid & 1)
+ local_widget = TIO_SWIN_WIDGETNUM(bridge);
+ else
+ local_widget = SWIN_WIDGETNUM(bridge);
+
+ vector = sn_irq_info->irq_irq;
+ /* Free the old PROM new_irq_info structure */
+ sn_intr_free(local_nasid, local_widget, new_irq_info);
+ /* Update kernels new_irq_info with new target info */
+ unregister_intr_pda(new_irq_info);
+
+ /* allocate a new PROM new_irq_info struct */
+ status = sn_intr_alloc(local_nasid, local_widget,
+ new_irq_info, vector,
+ nasid, slice);
+
+ /* SAL call failed */
+ if (status) {
+ kfree(new_irq_info);
+ return NULL;
+ }
+
+ cpuphys = nasid_slice_to_cpuid(nasid, slice);
+ new_irq_info->irq_cpuid = cpuphys;
+ register_intr_pda(new_irq_info);
+
+ pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
+
+ /*
+ * If this represents a line interrupt, target it. If it's
+ * an msi (irq_int_bit < 0), it's already targeted.
+ */
+ if (new_irq_info->irq_int_bit >= 0 &&
+ pci_provider && pci_provider->target_interrupt)
+ (pci_provider->target_interrupt)(new_irq_info);
+
+ spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+ list_replace(&sn_irq_info->list, &new_irq_info->list);
+#else
+ list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+#endif
+ spin_unlock(&sn_irq_info_lock);
+#ifndef XEN
+ call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+#endif
+
+#ifdef CONFIG_SMP
+ set_irq_affinity_info((vector & 0xff), cpuphys, 0);
+#endif
+
+ return new_irq_info;
+}
+
+static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+ nasid_t nasid;
+ int slice;
+
+ nasid = cpuid_to_nasid(first_cpu(mask));
+ slice = cpuid_to_slice(first_cpu(mask));
+
+ list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
+ sn_irq_lh[irq], list)
+ (void)sn_retarget_vector(sn_irq_info, nasid, slice);
+}
+#endif
+
+struct hw_interrupt_type irq_type_sn = {
+#ifndef XEN
+ .name = "SN hub",
+#endif
+ .startup = sn_startup_irq,
+ .shutdown = sn_shutdown_irq,
+ .enable = sn_enable_irq,
+ .disable = sn_disable_irq,
+ .ack = sn_ack_irq,
+ .end = sn_end_irq,
+#ifndef XEN
+ .set_affinity = sn_set_affinity_irq
+#endif
+};
+
+unsigned int sn_local_vector_to_irq(u8 vector)
+{
+ return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
+}
+
+void sn_irq_init(void)
+{
+#ifndef XEN
+ int i;
+ irq_desc_t *base_desc = irq_desc;
+
+ ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
+ ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
+
+ for (i = 0; i < NR_IRQS; i++) {
+ if (base_desc[i].chip == &no_irq_type) {
+ base_desc[i].chip = &irq_type_sn;
+ }
+ }
+#endif
+}
+
+#ifndef XEN
+static void register_intr_pda(struct sn_irq_info *sn_irq_info)
+{
+ int irq = sn_irq_info->irq_irq;
+ int cpu = sn_irq_info->irq_cpuid;
+
+ if (pdacpu(cpu)->sn_last_irq < irq) {
+ pdacpu(cpu)->sn_last_irq = irq;
+ }
+
+ if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
+ pdacpu(cpu)->sn_first_irq = irq;
+}
+
+static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
+{
+ int irq = sn_irq_info->irq_irq;
+ int cpu = sn_irq_info->irq_cpuid;
+ struct sn_irq_info *tmp_irq_info;
+ int i, foundmatch;
+
+#ifndef XEN
+ rcu_read_lock();
+#else
+ spin_lock(&sn_irq_info_lock);
+#endif
+ if (pdacpu(cpu)->sn_last_irq == irq) {
+ foundmatch = 0;
+ for (i = pdacpu(cpu)->sn_last_irq - 1;
+ i && !foundmatch; i--) {
+#ifdef XEN
+ list_for_each_entry(tmp_irq_info,
+ sn_irq_lh[i],
+ list) {
+#else
+ list_for_each_entry_rcu(tmp_irq_info,
+ sn_irq_lh[i],
+ list) {
+#endif
+ if (tmp_irq_info->irq_cpuid == cpu) {
+ foundmatch = 1;
+ break;
+ }
+ }
+ }
+ pdacpu(cpu)->sn_last_irq = i;
+ }
+
+ if (pdacpu(cpu)->sn_first_irq == irq) {
+ foundmatch = 0;
+ for (i = pdacpu(cpu)->sn_first_irq + 1;
+ i < NR_IRQS && !foundmatch; i++) {
+#ifdef XEN
+ list_for_each_entry(tmp_irq_info,
+ sn_irq_lh[i],
+ list) {
+#else
+ list_for_each_entry_rcu(tmp_irq_info,
+ sn_irq_lh[i],
+ list) {
+#endif
+ if (tmp_irq_info->irq_cpuid == cpu) {
+ foundmatch = 1;
+ break;
+ }
+ }
+ }
+ pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
+ }
+#ifndef XEN
+ rcu_read_unlock();
+#else
+ spin_unlock(&sn_irq_info_lock);
+#endif
+}
+#endif /* XEN */
+
+#ifndef XEN
+static void sn_irq_info_free(struct rcu_head *head)
+{
+ struct sn_irq_info *sn_irq_info;
+
+ sn_irq_info = container_of(head, struct sn_irq_info, rcu);
+ kfree(sn_irq_info);
+}
+#endif
+
+#ifndef XEN
+void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
+{
+ nasid_t nasid = sn_irq_info->irq_nasid;
+ int slice = sn_irq_info->irq_slice;
+ int cpu = nasid_slice_to_cpuid(nasid, slice);
+
+ pci_dev_get(pci_dev);
+ sn_irq_info->irq_cpuid = cpu;
+ sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
+
+ /* link it into the sn_irq[irq] list */
+ spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+ list_add(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+#else
+ list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+#endif
+#ifndef XEN
+ reserve_irq_vector(sn_irq_info->irq_irq);
+#endif
+ spin_unlock(&sn_irq_info_lock);
+
+ register_intr_pda(sn_irq_info);
+}
+
+void sn_irq_unfixup(struct pci_dev *pci_dev)
+{
+ struct sn_irq_info *sn_irq_info;
+
+ /* Only cleanup IRQ stuff if this device has a host bus context */
+ if (!SN_PCIDEV_BUSSOFT(pci_dev))
+ return;
+
+ sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
+ if (!sn_irq_info)
+ return;
+ if (!sn_irq_info->irq_irq) {
+ kfree(sn_irq_info);
+ return;
+ }
+
+ unregister_intr_pda(sn_irq_info);
+ spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+ list_del(&sn_irq_info->list);
+#else
+ list_del_rcu(&sn_irq_info->list);
+#endif
+ spin_unlock(&sn_irq_info_lock);
+ if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
+ free_irq_vector(sn_irq_info->irq_irq);
+#ifndef XEN
+ call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+#endif
+ pci_dev_put(pci_dev);
+
+}
+#endif
+
+static inline void
+sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
+{
+ struct sn_pcibus_provider *pci_provider;
+
+ pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
+ if (pci_provider && pci_provider->force_interrupt)
+ (*pci_provider->force_interrupt)(sn_irq_info);
+}
+
+static void force_interrupt(int irq)
+{
+ struct sn_irq_info *sn_irq_info;
+
+#ifndef XEN
+ if (!sn_ioif_inited)
+ return;
+#endif
+
+#ifdef XEN
+ spin_lock(&sn_irq_info_lock);
+#else
+ rcu_read_lock();
+#endif
+#ifdef XEN
+ list_for_each_entry(sn_irq_info, sn_irq_lh[irq], list)
+#else
+ list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
+#endif
+ sn_call_force_intr_provider(sn_irq_info);
+
+#ifdef XEN
+ spin_unlock(&sn_irq_info_lock);
+#else
+ rcu_read_unlock();
+#endif
+}
+
+#ifndef XEN
+/*
+ * Check for lost interrupts. If the PIC int_status reg. says that
+ * an interrupt has been sent, but not handled, and the interrupt
+ * is not pending in either the cpu irr regs or in the soft irr regs,
+ * and the interrupt is not in service, then the interrupt may have
+ * been lost. Force an interrupt on that pin. It is possible that
+ * the interrupt is in flight, so we may generate a spurious interrupt,
+ * but we should never miss a real lost interrupt.
+ */
+static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
+{
+ u64 regval;
+ struct pcidev_info *pcidev_info;
+ struct pcibus_info *pcibus_info;
+
+ /*
+ * Bridge types attached to TIO (anything but PIC) do not need this WAR
+ * since they do not target Shub II interrupt registers. If that
+ * ever changes, this check needs to accomodate.
+ */
+ if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
+ return;
+
+ pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ if (!pcidev_info)
+ return;
+
+ pcibus_info =
+ (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
+ pdi_pcibus_info;
+ regval = pcireg_intr_status_get(pcibus_info);
+
+ if (!ia64_get_irr(irq_to_vector(irq))) {
+ if (!test_bit(irq, pda->sn_in_service_ivecs)) {
+ regval &= 0xff;
+ if (sn_irq_info->irq_int_bit & regval &
+ sn_irq_info->irq_last_intr) {
+ regval &= ~(sn_irq_info->irq_int_bit & regval);
+ sn_call_force_intr_provider(sn_irq_info);
+ }
+ }
+ }
+ sn_irq_info->irq_last_intr = regval;
+}
+#endif
+
+void sn_lb_int_war_check(void)
+{
+#ifndef XEN
+ struct sn_irq_info *sn_irq_info;
+ int i;
+
+#ifdef XEN
+ if (pda->sn_first_irq == 0)
+#else
+ if (!sn_ioif_inited || pda->sn_first_irq == 0)
+#endif
+ return;
+
+#ifdef XEN
+ spin_lock(&sn_irq_info_lock);
+#else
+ rcu_read_lock();
+#endif
+ for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
+#ifdef XEN
+ list_for_each_entry(sn_irq_info, sn_irq_lh[i], list) {
+#else
+ list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
+#endif
+ sn_check_intr(i, sn_irq_info);
+ }
+ }
+#ifdef XEN
+ spin_unlock(&sn_irq_info_lock);
+#else
+ rcu_read_unlock();
+#endif
+#endif
+}
+
+void __init sn_irq_lh_init(void)
+{
+ int i;
+
+ sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
+ if (!sn_irq_lh)
+ panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
+
+ for (i = 0; i < NR_IRQS; i++) {
+ sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!sn_irq_lh[i])
+ panic("SN PCI INIT: Failed IRQ memory allocation\n");
+
+ INIT_LIST_HEAD(sn_irq_lh[i]);
+ }
+}
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/setup.c b/xen/arch/ia64/linux-xen/sn/kernel/setup.c
new file mode 100644
index 0000000000..e1d48fa9fd
--- /dev/null
+++ b/xen/arch/ia64/linux-xen/sn/kernel/setup.c
@@ -0,0 +1,808 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#ifndef XEN
+#include <linux/kdev_t.h>
+#endif
+#include <linux/string.h>
+#ifndef XEN
+#include <linux/screen_info.h>
+#endif
+#include <linux/console.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/serial.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/mmzone.h>
+#include <linux/interrupt.h>
+#include <linux/acpi.h>
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#ifndef XEN
+#include <linux/root_dev.h>
+#endif
+#include <linux/nodemask.h>
+#include <linux/pm.h>
+#include <linux/efi.h>
+
+#include <asm/io.h>
+#include <asm/sal.h>
+#include <asm/machvec.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#ifndef XEN
+#include <asm/vga.h>
+#endif
+#include <asm/sn/arch.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/leds.h>
+#ifndef XEN
+#include <asm/sn/bte.h>
+#endif
+#include <asm/sn/shub_mmr.h>
+#ifndef XEN
+#include <asm/sn/clksupport.h>
+#endif
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/sn_feature_sets.h>
+#ifndef XEN
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#else
+#include "asm/sn/xwidgetdev.h"
+#include "asm/sn/hubdev.h"
+#endif
+#include <asm/sn/klconfig.h>
+#ifdef XEN
+#include <asm/sn/shubio.h>
+
+/* Xen has no clue about NUMA .... grrrr */
+#define pxm_to_node(foo) 0
+#define node_to_pxm(foo) 0
+#define numa_node_id() 0
+#endif
+
+
+DEFINE_PER_CPU(struct pda_s, pda_percpu);
+
+#define MAX_PHYS_MEMORY (1UL << IA64_MAX_PHYS_BITS) /* Max physical address supported */
+
+extern void bte_init_node(nodepda_t *, cnodeid_t);
+
+extern void sn_timer_init(void);
+extern unsigned long last_time_offset;
+extern void (*ia64_mark_idle) (int);
+extern void snidle(int);
+extern unsigned long long (*ia64_printk_clock)(void);
+
+unsigned long sn_rtc_cycles_per_second;
+EXPORT_SYMBOL(sn_rtc_cycles_per_second);
+
+DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
+EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
+
+DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
+
+DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
+EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
+
+char sn_system_serial_number_string[128];
+EXPORT_SYMBOL(sn_system_serial_number_string);
+u64 sn_partition_serial_number;
+EXPORT_SYMBOL(sn_partition_serial_number);
+u8 sn_partition_id;
+EXPORT_SYMBOL(sn_partition_id);
+u8 sn_system_size;
+EXPORT_SYMBOL(sn_system_size);
+u8 sn_sharing_domain_size;
+EXPORT_SYMBOL(sn_sharing_domain_size);
+u8 sn_coherency_id;
+EXPORT_SYMBOL(sn_coherency_id);
+u8 sn_region_size;
+EXPORT_SYMBOL(sn_region_size);
+int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
+
+short physical_node_map[MAX_NUMALINK_NODES];
+static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS];
+
+EXPORT_SYMBOL(physical_node_map);
+
+int num_cnodes;
+
+static void sn_init_pdas(char **);
+static void build_cnode_tables(void);
+
+static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
+
+#ifndef XEN
+/*
+ * The format of "screen_info" is strange, and due to early i386-setup
+ * code. This is just enough to make the console code think we're on a
+ * VGA color display.
+ */
+struct screen_info sn_screen_info = {
+ .orig_x = 0,
+ .orig_y = 0,
+ .orig_video_mode = 3,
+ .orig_video_cols = 80,
+ .orig_video_ega_bx = 3,
+ .orig_video_lines = 25,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16
+};
+#endif
+
+/*
+ * This routine can only be used during init, since
+ * smp_boot_data is an init data structure.
+ * We have to use smp_boot_data.cpu_phys_id to find
+ * the physical id of the processor because the normal
+ * cpu_physical_id() relies on data structures that
+ * may not be initialized yet.
+ */
+
+static int __init pxm_to_nasid(int pxm)
+{
+ int i;
+ int nid;
+
+ nid = pxm_to_node(pxm);
+ for (i = 0; i < num_node_memblks; i++) {
+ if (node_memblk[i].nid == nid) {
+ return NASID_GET(node_memblk[i].start_paddr);
+ }
+ }
+ return -1;
+}
+
+/**
+ * early_sn_setup - early setup routine for SN platforms
+ *
+ * Sets up an initial console to aid debugging. Intended primarily
+ * for bringup. See start_kernel() in init/main.c.
+ */
+
+void __init early_sn_setup(void)
+{
+ efi_system_table_t *efi_systab;
+ efi_config_table_t *config_tables;
+ struct ia64_sal_systab *sal_systab;
+ struct ia64_sal_desc_entry_point *ep;
+ char *p;
+ int i, j;
+
+ /*
+ * Parse enough of the SAL tables to locate the SAL entry point. Since, console
+ * IO on SN2 is done via SAL calls, early_printk won't work without this.
+ *
+ * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
+ * Any changes to those file may have to be made hereas well.
+ */
+ efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
+ config_tables = __va(efi_systab->tables);
+ for (i = 0; i < efi_systab->nr_tables; i++) {
+ if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
+ 0) {
+ sal_systab = __va(config_tables[i].table);
+ p = (char *)(sal_systab + 1);
+ for (j = 0; j < sal_systab->entry_count; j++) {
+ if (*p == SAL_DESC_ENTRY_POINT) {
+ ep = (struct ia64_sal_desc_entry_point
+ *)p;
+ ia64_sal_handler_init(__va
+ (ep->sal_proc),
+ __va(ep->gp));
+ return;
+ }
+ p += SAL_DESC_SIZE(*p);
+ }
+ }
+ }
+ /* Uh-oh, SAL not available?? */
+ printk(KERN_ERR "failed to find SAL entry point\n");
+}
+
+extern int platform_intr_list[];
+static int __initdata shub_1_1_found;
+
+/*
+ * sn_check_for_wars
+ *
+ * Set flag for enabling shub specific wars
+ */
+
+static inline int __init is_shub_1_1(int nasid)
+{
+ unsigned long id;
+ int rev;
+
+ if (is_shub2())
+ return 0;
+ id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
+ rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
+ return rev <= 2;
+}
+
+static void __init sn_check_for_wars(void)
+{
+ int cnode;
+
+ if (is_shub2()) {
+ /* none yet */
+ } else {
+ for_each_online_node(cnode) {
+ if (is_shub_1_1(cnodeid_to_nasid(cnode)))
+ shub_1_1_found = 1;
+ }
+ }
+}
+
+#ifndef XEN
+/*
+ * Scan the EFI PCDP table (if it exists) for an acceptable VGA console
+ * output device. If one exists, pick it and set sn_legacy_{io,mem} to
+ * reflect the bus offsets needed to address it.
+ *
+ * Since pcdp support in SN is not supported in the 2.4 kernel (or at least
+ * the one lbs is based on) just declare the needed structs here.
+ *
+ * Reference spec http://www.dig64.org/specifications/DIG64_PCDPv20.pdf
+ *
+ * Returns 0 if no acceptable vga is found, !0 otherwise.
+ *
+ * Note: This stuff is duped here because Altix requires the PCDP to
+ * locate a usable VGA device due to lack of proper ACPI support. Structures
+ * could be used from drivers/firmware/pcdp.h, but it was decided that moving
+ * this file to a more public location just for Altix use was undesireable.
+ */
+
+struct hcdp_uart_desc {
+ u8 pad[45];
+};
+
+struct pcdp {
+ u8 signature[4]; /* should be 'HCDP' */
+ u32 length;
+ u8 rev; /* should be >=3 for pcdp, <3 for hcdp */
+ u8 sum;
+ u8 oem_id[6];
+ u64 oem_tableid;
+ u32 oem_rev;
+ u32 creator_id;
+ u32 creator_rev;
+ u32 num_type0;
+ struct hcdp_uart_desc uart[0]; /* num_type0 of these */
+ /* pcdp descriptors follow */
+} __attribute__((packed));
+
+struct pcdp_device_desc {
+ u8 type;
+ u8 primary;
+ u16 length;
+ u16 index;
+ /* interconnect specific structure follows */
+ /* device specific structure follows that */
+} __attribute__((packed));
+
+struct pcdp_interface_pci {
+ u8 type; /* 1 == pci */
+ u8 reserved;
+ u16 length;
+ u8 segment;
+ u8 bus;
+ u8 dev;
+ u8 fun;
+ u16 devid;
+ u16 vendid;
+ u32 acpi_interrupt;
+ u64 mmio_tra;
+ u64 ioport_tra;
+ u8 flags;
+ u8 translation;
+} __attribute__((packed));
+
+struct pcdp_vga_device {
+ u8 num_eas_desc;
+ /* ACPI Extended Address Space Desc follows */
+} __attribute__((packed));
+
+/* from pcdp_device_desc.primary */
+#define PCDP_PRIMARY_CONSOLE 0x01
+
+/* from pcdp_device_desc.type */
+#define PCDP_CONSOLE_INOUT 0x0
+#define PCDP_CONSOLE_DEBUG 0x1
+#define PCDP_CONSOLE_OUT 0x2
+#define PCDP_CONSOLE_IN 0x3
+#define PCDP_CONSOLE_TYPE_VGA 0x8
+
+#define PCDP_CONSOLE_VGA (PCDP_CONSOLE_TYPE_VGA | PCDP_CONSOLE_OUT)
+
+/* from pcdp_interface_pci.type */
+#define PCDP_IF_PCI 1
+
+/* from pcdp_interface_pci.translation */
+#define PCDP_PCI_TRANS_IOPORT 0x02
+#define PCDP_PCI_TRANS_MMIO 0x01
+
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+static void
+sn_scan_pcdp(void)
+{
+ u8 *bp;
+ struct pcdp *pcdp;
+ struct pcdp_device_desc device;
+ struct pcdp_interface_pci if_pci;
+ extern struct efi efi;
+
+ if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
+ return; /* no hcdp/pcdp table */
+
+ pcdp = __va(efi.hcdp);
+
+ if (pcdp->rev < 3)
+ return; /* only support PCDP (rev >= 3) */
+
+ for (bp = (u8 *)&pcdp->uart[pcdp->num_type0];
+ bp < (u8 *)pcdp + pcdp->length;
+ bp += device.length) {
+ memcpy(&device, bp, sizeof(device));
+ if (! (device.primary & PCDP_PRIMARY_CONSOLE))
+ continue; /* not primary console */
+
+ if (device.type != PCDP_CONSOLE_VGA)
+ continue; /* not VGA descriptor */
+
+ memcpy(&if_pci, bp+sizeof(device), sizeof(if_pci));
+ if (if_pci.type != PCDP_IF_PCI)
+ continue; /* not PCI interconnect */
+
+ if (if_pci.translation & PCDP_PCI_TRANS_IOPORT)
+ vga_console_iobase =
+ if_pci.ioport_tra | __IA64_UNCACHED_OFFSET;
+
+ if (if_pci.translation & PCDP_PCI_TRANS_MMIO)
+ vga_console_membase =
+ if_pci.mmio_tra | __IA64_UNCACHED_OFFSET;
+
+ break; /* once we find the primary, we're done */
+ }
+}
+#endif
+
+static unsigned long sn2_rtc_initial;
+
+static unsigned long long ia64_sn2_printk_clock(void)
+{
+ unsigned long rtc_now = rtc_time();
+
+ return (rtc_now - sn2_rtc_initial) *
+ (1000000000 / sn_rtc_cycles_per_second);
+}
+#endif
+
+/**
+ * sn_setup - SN platform setup routine
+ * @cmdline_p: kernel command line
+ *
+ * Handles platform setup for SN machines. This includes determining
+ * the RTC frequency (via a SAL call), initializing secondary CPUs, and
+ * setting up per-node data areas. The console is also initialized here.
+ */
+#ifdef XEN
+void __cpuinit sn_cpu_init(void);
+#endif
+
+void __init sn_setup(char **cmdline_p)
+{
+#ifndef XEN
+ long status, ticks_per_sec, drift;
+#else
+ unsigned long status, ticks_per_sec, drift;
+#endif
+ u32 version = sn_sal_rev();
+#ifndef XEN
+ extern void sn_cpu_init(void);
+
+ sn2_rtc_initial = rtc_time();
+ ia64_sn_plat_set_error_handling_features(); // obsolete
+ ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
+ ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
+
+
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+ /*
+ * Handle SN vga console.
+ *
+ * SN systems do not have enough ACPI table information
+ * being passed from prom to identify VGA adapters and the legacy
+ * addresses to access them. Until that is done, SN systems rely
+ * on the PCDP table to identify the primary VGA console if one
+ * exists.
+ *
+ * However, kernel PCDP support is optional, and even if it is built
+ * into the kernel, it will not be used if the boot cmdline contains
+ * console= directives.
+ *
+ * So, to work around this mess, we duplicate some of the PCDP code
+ * here so that the primary VGA console (as defined by PCDP) will
+ * work on SN systems even if a different console (e.g. serial) is
+ * selected on the boot line (or CONFIG_EFI_PCDP is off).
+ */
+
+ if (! vga_console_membase)
+ sn_scan_pcdp();
+
+ if (vga_console_membase) {
+ /* usable vga ... make tty0 the preferred default console */
+ if (!strstr(*cmdline_p, "console="))
+ add_preferred_console("tty", 0, NULL);
+ } else {
+ printk(KERN_DEBUG "SGI: Disabling VGA console\n");
+ if (!strstr(*cmdline_p, "console="))
+ add_preferred_console("ttySG", 0, NULL);
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#else
+ conswitchp = NULL;
+#endif /* CONFIG_DUMMY_CONSOLE */
+ }
+#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
+
+ MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
+#endif
+
+ /*
+ * Build the tables for managing cnodes.
+ */
+ build_cnode_tables();
+
+ status =
+ ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
+ &drift);
+ if (status != 0 || ticks_per_sec < 100000) {
+ printk(KERN_WARNING
+ "unable to determine platform RTC clock frequency, guessing.\n");
+ /* PROM gives wrong value for clock freq. so guess */
+ sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
+ } else
+ sn_rtc_cycles_per_second = ticks_per_sec;
+#ifndef XEN
+
+ platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
+
+ ia64_printk_clock = ia64_sn2_printk_clock;
+#endif
+
+ printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
+
+ /*
+ * we set the default root device to /dev/hda
+ * to make simulation easy
+ */
+#ifndef XEN
+ ROOT_DEV = Root_HDA1;
+#endif
+
+ /*
+ * Create the PDAs and NODEPDAs for all the cpus.
+ */
+ sn_init_pdas(cmdline_p);
+
+#ifndef XEN
+ ia64_mark_idle = &snidle;
+#endif
+
+ /*
+ * For the bootcpu, we do this here. All other cpus will make the
+ * call as part of cpu_init in slave cpu initialization.
+ */
+ sn_cpu_init();
+
+#ifndef XEN
+#ifdef CONFIG_SMP
+ init_smp_config();
+#endif
+ screen_info = sn_screen_info;
+
+ sn_timer_init();
+
+ /*
+ * set pm_power_off to a SAL call to allow
+ * sn machines to power off. The SAL call can be replaced
+ * by an ACPI interface call when ACPI is fully implemented
+ * for sn.
+ */
+ pm_power_off = ia64_sn_power_down;
+ current->thread.flags |= IA64_THREAD_MIGRATION;
+#endif
+}
+
+/**
+ * sn_init_pdas - setup node data areas
+ *
+ * One time setup for Node Data Area. Called by sn_setup().
+ */
+static void __init sn_init_pdas(char **cmdline_p)
+{
+ cnodeid_t cnode;
+
+ /*
+ * Allocate & initalize the nodepda for each node.
+ */
+ for_each_online_node(cnode) {
+ nodepdaindr[cnode] =
+ alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
+ memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
+ memset(nodepdaindr[cnode]->phys_cpuid, -1,
+ sizeof(nodepdaindr[cnode]->phys_cpuid));
+ spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
+ }
+
+ /*
+ * Allocate & initialize nodepda for TIOs. For now, put them on node 0.
+ */
+ for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
+ nodepdaindr[cnode] =
+ alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
+ memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
+ }
+
+ /*
+ * Now copy the array of nodepda pointers to each nodepda.
+ */
+ for (cnode = 0; cnode < num_cnodes; cnode++)
+ memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
+ sizeof(nodepdaindr));
+
+#ifndef XEN
+ /*
+ * Set up IO related platform-dependent nodepda fields.
+ * The following routine actually sets up the hubinfo struct
+ * in nodepda.
+ */
+ for_each_online_node(cnode) {
+ bte_init_node(nodepdaindr[cnode], cnode);
+ }
+
+ /*
+ * Initialize the per node hubdev. This includes IO Nodes and
+ * headless/memless nodes.
+ */
+ for (cnode = 0; cnode < num_cnodes; cnode++) {
+ hubdev_init_node(nodepdaindr[cnode], cnode);
+ }
+#endif
+}
+
+/**
+ * sn_cpu_init - initialize per-cpu data areas
+ * @cpuid: cpuid of the caller
+ *
+ * Called during cpu initialization on each cpu as it starts.
+ * Currently, initializes the per-cpu data area for SNIA.
+ * Also sets up a few fields in the nodepda. Also known as
+ * platform_cpu_init() by the ia64 machvec code.
+ */
+void __cpuinit sn_cpu_init(void)
+{
+ int cpuid;
+ int cpuphyid;
+ int nasid;
+ int subnode;
+ int slice;
+ int cnode;
+ int i;
+ static int wars_have_been_checked;
+
+ cpuid = smp_processor_id();
+#ifndef XEN
+ if (cpuid == 0 && IS_MEDUSA()) {
+ if (ia64_sn_is_fake_prom())
+ sn_prom_type = 2;
+ else
+ sn_prom_type = 1;
+ printk(KERN_INFO "Running on medusa with %s PROM\n",
+ (sn_prom_type == 1) ? "real" : "fake");
+ }
+#endif
+
+ memset(pda, 0, sizeof(pda));
+ if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
+ &sn_hub_info->nasid_bitmask,
+ &sn_hub_info->nasid_shift,
+ &sn_system_size, &sn_sharing_domain_size,
+ &sn_partition_id, &sn_coherency_id,
+ &sn_region_size))
+ BUG();
+ sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
+
+ /*
+ * Don't check status. The SAL call is not supported on all PROMs
+ * but a failure is harmless.
+ */
+ (void) ia64_sn_set_cpu_number(cpuid);
+
+ /*
+ * The boot cpu makes this call again after platform initialization is
+ * complete.
+ */
+ if (nodepdaindr[0] == NULL)
+ return;
+
+ for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
+ if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
+ break;
+
+ cpuphyid = get_sapicid();
+
+ if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
+ BUG();
+
+ for (i=0; i < MAX_NUMNODES; i++) {
+ if (nodepdaindr[i]) {
+ nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
+ nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
+ nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
+ }
+ }
+
+ cnode = nasid_to_cnodeid(nasid);
+
+ sn_nodepda = nodepdaindr[cnode];
+
+ pda->led_address =
+ (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
+ pda->led_state = LED_ALWAYS_SET;
+ pda->hb_count = HZ / 2;
+ pda->hb_state = 0;
+ pda->idle_flag = 0;
+
+ if (cpuid != 0) {
+ /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
+ memcpy(sn_cnodeid_to_nasid,
+ (&per_cpu(__sn_cnodeid_to_nasid, 0)),
+ sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
+ }
+
+ /*
+ * Check for WARs.
+ * Only needs to be done once, on BSP.
+ * Has to be done after loop above, because it uses this cpu's
+ * sn_cnodeid_to_nasid table which was just initialized if this
+ * isn't cpu 0.
+ * Has to be done before assignment below.
+ */
+ if (!wars_have_been_checked) {
+ sn_check_for_wars();
+ wars_have_been_checked = 1;
+ }
+ sn_hub_info->shub_1_1_found = shub_1_1_found;
+
+ /*
+ * Set up addresses of PIO/MEM write status registers.
+ */
+ {
+ u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
+ u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
+ SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
+ u64 *pio;
+ pio = is_shub1() ? pio1 : pio2;
+ pda->pio_write_status_addr =
+ (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
+ pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
+ }
+
+#ifndef XEN /* local_node_data is not allocated .... yet */
+ /*
+ * WAR addresses for SHUB 1.x.
+ */
+ if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
+ int buddy_nasid;
+ buddy_nasid =
+ cnodeid_to_nasid(numa_node_id() ==
+ num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
+ pda->pio_shub_war_cam_addr =
+ (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
+ SH1_PI_CAM_CONTROL);
+ }
+#endif
+}
+
+/*
+ * Build tables for converting between NASIDs and cnodes.
+ */
+static inline int __init board_needs_cnode(int type)
+{
+ return (type == KLTYPE_SNIA || type == KLTYPE_TIO);
+}
+
+void __init build_cnode_tables(void)
+{
+ int nasid;
+ int node;
+ lboard_t *brd;
+
+ memset(physical_node_map, -1, sizeof(physical_node_map));
+ memset(sn_cnodeid_to_nasid, -1,
+ sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
+
+ /*
+ * First populate the tables with C/M bricks. This ensures that
+ * cnode == node for all C & M bricks.
+ */
+ for_each_online_node(node) {
+ nasid = pxm_to_nasid(node_to_pxm(node));
+ sn_cnodeid_to_nasid[node] = nasid;
+ physical_node_map[nasid] = node;
+ }
+
+ /*
+ * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node
+ * limit on the number of nodes, we can't use the generic node numbers
+ * for this. Note that num_cnodes is incremented below as TIOs or
+ * headless/memoryless nodes are discovered.
+ */
+ num_cnodes = num_online_nodes();
+
+ /* fakeprom does not support klgraph */
+ if (IS_RUNNING_ON_FAKE_PROM())
+ return;
+
+ /* Find TIOs & headless/memoryless nodes and add them to the tables */
+ for_each_online_node(node) {
+ kl_config_hdr_t *klgraph_header;
+ nasid = cnodeid_to_nasid(node);
+ klgraph_header = ia64_sn_get_klconfig_addr(nasid);
+ if (klgraph_header == NULL)
+ BUG();
+ brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
+ while (brd) {
+ if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
+ sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid;
+ physical_node_map[brd->brd_nasid] = num_cnodes++;
+ }
+ brd = find_lboard_next(brd);
+ }
+ }
+}
+
+int
+nasid_slice_to_cpuid(int nasid, int slice)
+{
+ long cpu;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ if (cpuid_to_nasid(cpu) == nasid &&
+ cpuid_to_slice(cpu) == slice)
+ return cpu;
+
+ return -1;
+}
+
+int sn_prom_feature_available(int id)
+{
+ if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS)
+ return 0;
+ return test_bit(id, sn_prom_features);
+}
+EXPORT_SYMBOL(sn_prom_feature_available);
+
diff --git a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
new file mode 100644
index 0000000000..954a2f4bd8
--- /dev/null
+++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
@@ -0,0 +1,548 @@
+/*
+ * SN2 Platform specific SMP Support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/nodemask.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/sal.h>
+#include <asm/system.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/tlb.h>
+#include <asm/numa.h>
+#include <asm/hw_irq.h>
+#include <asm/current.h>
+#ifdef XEN
+#include <asm/sn/arch.h>
+#endif
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/rw_mmr.h>
+
+DEFINE_PER_CPU(struct ptc_stats, ptcstats);
+DECLARE_PER_CPU(struct ptc_stats, ptcstats);
+
+static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
+
+extern unsigned long
+sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long);
+void
+sn2_ptc_deadlock_recovery(short *, short, short, int,
+ volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long);
+
+/*
+ * Note: some is the following is captured here to make degugging easier
+ * (the macros make more sense if you see the debug patch - not posted)
+ */
+#define sn2_ptctest 0
+#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
+#define max_active_pio(sh1) ((sh1) ? 32 : 7)
+#define reset_max_active_on_deadlock() 1
+#ifndef XEN
+#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
+#else
+#define PTC_LOCK(sh1) &sn2_global_ptc_lock
+#endif
+
+struct ptc_stats {
+ unsigned long ptc_l;
+ unsigned long change_rid;
+ unsigned long shub_ptc_flushes;
+ unsigned long nodes_flushed;
+ unsigned long deadlocks;
+ unsigned long deadlocks2;
+ unsigned long lock_itc_clocks;
+ unsigned long shub_itc_clocks;
+ unsigned long shub_itc_clocks_max;
+ unsigned long shub_ptc_flushes_not_my_mm;
+};
+
+#define sn2_ptctest 0
+
+static inline unsigned long wait_piowc(void)
+{
+ volatile unsigned long *piows;
+ unsigned long zeroval, ws;
+
+ piows = pda->pio_write_status_addr;
+ zeroval = pda->pio_write_status_val;
+ do {
+ cpu_relax();
+ } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
+ return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
+}
+
+#ifndef XEN /* No idea if Xen will ever support this */
+/**
+ * sn_migrate - SN-specific task migration actions
+ * @task: Task being migrated to new CPU
+ *
+ * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
+ * Context switching user threads which have memory-mapped MMIO may cause
+ * PIOs to issue from seperate CPUs, thus the PIO writes must be drained
+ * from the previous CPU's Shub before execution resumes on the new CPU.
+ */
+void sn_migrate(struct task_struct *task)
+{
+ pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu);
+ volatile unsigned long *adr = last_pda->pio_write_status_addr;
+ unsigned long val = last_pda->pio_write_status_val;
+
+ /* Drain PIO writes from old CPU's Shub */
+ while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK)
+ != val))
+ cpu_relax();
+}
+
+void sn_tlb_migrate_finish(struct mm_struct *mm)
+{
+ /* flush_tlb_mm is inefficient if more than 1 users of mm */
+#ifndef XEN
+ if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
+#else
+ if (mm == &current->arch.mm && mm && atomic_read(&mm->mm_users) == 1)
+#endif
+ flush_tlb_mm(mm);
+}
+#endif
+
+/**
+ * sn2_global_tlb_purge - globally purge translation cache of virtual address range
+ * @mm: mm_struct containing virtual address range
+ * @start: start of virtual address range
+ * @end: end of virtual address range
+ * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
+ *
+ * Purges the translation caches of all processors of the given virtual address
+ * range.
+ *
+ * Note:
+ * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
+ * - cpu_vm_mask is converted into a nodemask of the nodes containing the
+ * cpus in cpu_vm_mask.
+ * - if only one bit is set in cpu_vm_mask & it is the current cpu & the
+ * process is purging its own virtual address range, then only the
+ * local TLB needs to be flushed. This flushing can be done using
+ * ptc.l. This is the common case & avoids the global spinlock.
+ * - if multiple cpus have loaded the context, then flushing has to be
+ * done with ptc.g/MMRs under protection of the global ptc_lock.
+ */
+
+#ifdef XEN /* Xen is soooooooo stupid! */
+// static cpumask_t mask_all = CPU_MASK_ALL;
+#endif
+
+void
+#ifndef XEN
+sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
+#else
+sn2_global_tlb_purge(unsigned long start,
+#endif
+ unsigned long end, unsigned long nbits)
+{
+ int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
+#ifndef XEN
+ int mymm = (mm == current->active_mm && mm == current->mm);
+#else
+ // struct mm_struct *mm;
+ int mymm = 0;
+#endif
+ int use_cpu_ptcga;
+ volatile unsigned long *ptc0, *ptc1;
+ unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
+ short nasids[MAX_NUMNODES], nix;
+ nodemask_t nodes_flushed;
+ int active, max_active, deadlock;
+
+ nodes_clear(nodes_flushed);
+ i = 0;
+
+#ifndef XEN /* One day Xen will grow up! */
+ for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
+ cnode = cpu_to_node(cpu);
+ node_set(cnode, nodes_flushed);
+ lcpu = cpu;
+ i++;
+ }
+#else
+ for_each_cpu(cpu) {
+ cnode = cpu_to_node(cpu);
+ node_set(cnode, nodes_flushed);
+ lcpu = cpu;
+ i++;
+ }
+#endif
+
+ if (i == 0)
+ return;
+
+ preempt_disable();
+
+ if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) {
+ do {
+ ia64_ptcl(start, nbits << 2);
+ start += (1UL << nbits);
+ } while (start < end);
+ ia64_srlz_i();
+ __get_cpu_var(ptcstats).ptc_l++;
+ preempt_enable();
+ return;
+ }
+
+#ifndef XEN
+ if (atomic_read(&mm->mm_users) == 1 && mymm) {
+#ifndef XEN /* I hate Xen! */
+ flush_tlb_mm(mm);
+#else
+ flush_tlb_mask(mask_all);
+#endif
+ __get_cpu_var(ptcstats).change_rid++;
+ preempt_enable();
+ return;
+ }
+#endif
+
+ itc = ia64_get_itc();
+ nix = 0;
+ for_each_node_mask(cnode, nodes_flushed)
+ nasids[nix++] = cnodeid_to_nasid(cnode);
+
+#ifndef XEN
+ rr_value = (mm->context << 3) | REGION_NUMBER(start);
+#else
+ rr_value = REGION_NUMBER(start);
+#endif
+
+ shub1 = is_shub1();
+ if (shub1) {
+ data0 = (1UL << SH1_PTC_0_A_SHFT) |
+ (nbits << SH1_PTC_0_PS_SHFT) |
+ (rr_value << SH1_PTC_0_RID_SHFT) |
+ (1UL << SH1_PTC_0_START_SHFT);
+#ifndef XEN
+ ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+ ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+#else
+ ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+ ptc1 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+#endif
+ } else {
+ data0 = (1UL << SH2_PTC_A_SHFT) |
+ (nbits << SH2_PTC_PS_SHFT) |
+ (1UL << SH2_PTC_START_SHFT);
+#ifndef XEN
+ ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
+#else
+ ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
+#endif
+ (rr_value << SH2_PTC_RID_SHFT));
+ ptc1 = NULL;
+ }
+
+
+ mynasid = get_nasid();
+ use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
+ max_active = max_active_pio(shub1);
+
+ itc = ia64_get_itc();
+ spin_lock_irqsave(PTC_LOCK(shub1), flags);
+ itc2 = ia64_get_itc();
+
+ __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
+ __get_cpu_var(ptcstats).shub_ptc_flushes++;
+ __get_cpu_var(ptcstats).nodes_flushed += nix;
+ if (!mymm)
+ __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
+
+ if (use_cpu_ptcga && !mymm) {
+ old_rr = ia64_get_rr(start);
+ ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
+ ia64_srlz_d();
+ }
+
+ wait_piowc();
+ do {
+ if (shub1)
+ data1 = start | (1UL << SH1_PTC_1_START_SHFT);
+ else
+ data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
+ deadlock = 0;
+ active = 0;
+ for (ibegin = 0, i = 0; i < nix; i++) {
+ nasid = nasids[i];
+ if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
+ ia64_ptcga(start, nbits << 2);
+ ia64_srlz_i();
+ } else {
+ ptc0 = CHANGE_NASID(nasid, ptc0);
+ if (ptc1)
+ ptc1 = CHANGE_NASID(nasid, ptc1);
+ pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
+ active++;
+ }
+ if (active >= max_active || i == (nix - 1)) {
+ if ((deadlock = wait_piowc())) {
+ sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
+ if (reset_max_active_on_deadlock())
+ max_active = 1;
+ }
+ active = 0;
+ ibegin = i + 1;
+ }
+ }
+ start += (1UL << nbits);
+ } while (start < end);
+
+ itc2 = ia64_get_itc() - itc2;
+ __get_cpu_var(ptcstats).shub_itc_clocks += itc2;
+ if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
+ __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
+
+ if (old_rr) {
+ ia64_set_rr(start, old_rr);
+ ia64_srlz_d();
+ }
+
+ spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
+
+ preempt_enable();
+}
+
+/*
+ * sn2_ptc_deadlock_recovery
+ *
+ * Recover from PTC deadlocks conditions. Recovery requires stepping thru each
+ * TLB flush transaction. The recovery sequence is somewhat tricky & is
+ * coded in assembly language.
+ */
+
+void
+sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
+ volatile unsigned long *ptc0, unsigned long data0,
+ volatile unsigned long *ptc1, unsigned long data1)
+{
+ short nasid, i;
+ unsigned long *piows, zeroval, n;
+
+ __get_cpu_var(ptcstats).deadlocks++;
+
+ piows = (unsigned long *) pda->pio_write_status_addr;
+ zeroval = pda->pio_write_status_val;
+
+
+ for (i=ib; i <= ie; i++) {
+ nasid = nasids[i];
+ if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
+ continue;
+ ptc0 = CHANGE_NASID(nasid, ptc0);
+ if (ptc1)
+ ptc1 = CHANGE_NASID(nasid, ptc1);
+
+ n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
+ __get_cpu_var(ptcstats).deadlocks2 += n;
+ }
+
+}
+
+/**
+ * sn_send_IPI_phys - send an IPI to a Nasid and slice
+ * @nasid: nasid to receive the interrupt (may be outside partition)
+ * @physid: physical cpuid to receive the interrupt.
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ *
+ * Sends an IPI (interprocessor interrupt) to the processor specified by
+ * @physid
+ *
+ * @delivery_mode can be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
+{
+ long val;
+ unsigned long flags = 0;
+ volatile long *p;
+
+ p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
+ val = (1UL << SH_IPI_INT_SEND_SHFT) |
+ (physid << SH_IPI_INT_PID_SHFT) |
+ ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
+ ((long)vector << SH_IPI_INT_IDX_SHFT) |
+ (0x000feeUL << SH_IPI_INT_BASE_SHFT);
+
+ mb();
+ if (enable_shub_wars_1_1()) {
+ spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+ }
+ pio_phys_write_mmr(p, val);
+ if (enable_shub_wars_1_1()) {
+ wait_piowc();
+ spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+ }
+
+}
+
+EXPORT_SYMBOL(sn_send_IPI_phys);
+
+/**
+ * sn2_send_IPI - send an IPI to a processor
+ * @cpuid: target of the IPI
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ * @redirect: redirect the IPI?
+ *
+ * Sends an IPI (InterProcessor Interrupt) to the processor specified by
+ * @cpuid. @vector specifies the command to send, while @delivery_mode can
+ * be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
+{
+ long physid;
+ int nasid;
+
+ physid = cpu_physical_id(cpuid);
+#ifdef XEN
+ if (!sn_nodepda) {
+ ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
+ } else
+#endif
+ nasid = cpuid_to_nasid(cpuid);
+
+ /* the following is used only when starting cpus at boot time */
+ if (unlikely(nasid == -1))
+ ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
+
+ sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
+}
+
+#ifdef CONFIG_PROC_FS
+
+#define PTC_BASENAME "sgi_sn/ptc_statistics"
+
+static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
+{
+ if (*offset < NR_CPUS)
+ return offset;
+ return NULL;
+}
+
+static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
+{
+ (*offset)++;
+ if (*offset < NR_CPUS)
+ return offset;
+ return NULL;
+}
+
+static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
+{
+}
+
+static int sn2_ptc_seq_show(struct seq_file *file, void *data)
+{
+ struct ptc_stats *stat;
+ int cpu;
+
+ cpu = *(loff_t *) data;
+
+ if (!cpu) {
+ seq_printf(file,
+ "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n");
+ seq_printf(file, "# ptctest %d\n", sn2_ptctest);
+ }
+
+ if (cpu < NR_CPUS && cpu_online(cpu)) {
+ stat = &per_cpu(ptcstats, cpu);
+ seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
+ stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
+ stat->deadlocks,
+ 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
+ stat->shub_ptc_flushes_not_my_mm,
+ stat->deadlocks2);
+ }
+ return 0;
+}
+
+static struct seq_operations sn2_ptc_seq_ops = {
+ .start = sn2_ptc_seq_start,
+ .next = sn2_ptc_seq_next,
+ .stop = sn2_ptc_seq_stop,
+ .show = sn2_ptc_seq_show
+};
+
+static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &sn2_ptc_seq_ops);
+}
+
+static struct file_operations proc_sn2_ptc_operations = {
+ .open = sn2_ptc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static struct proc_dir_entry *proc_sn2_ptc;
+
+static int __init sn2_ptc_init(void)
+{
+ if (!ia64_platform_is("sn2"))
+ return 0;
+
+ if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
+ printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
+ return -EINVAL;
+ }
+ proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
+ spin_lock_init(&sn2_global_ptc_lock);
+ return 0;
+}
+
+static void __exit sn2_ptc_exit(void)
+{
+ remove_proc_entry(PTC_BASENAME, NULL);
+}
+
+module_init(sn2_ptc_init);
+module_exit(sn2_ptc_exit);
+#endif /* CONFIG_PROC_FS */
+
diff --git a/xen/arch/ia64/linux/Makefile b/xen/arch/ia64/linux/Makefile
index 6acc02583d..4e55bcb3a4 100644
--- a/xen/arch/ia64/linux/Makefile
+++ b/xen/arch/ia64/linux/Makefile
@@ -1,6 +1,9 @@
+subdir-y += dig
+subdir-y += hp
+subdir-y += sn
+
obj-y += bitop.o
obj-y += clear_page.o
-obj-y += cmdline.o
obj-y += copy_page_mck.o
obj-y += efi_stub.o
obj-y += extable.o
@@ -23,6 +26,7 @@ obj-y += __udivdi3.o
obj-y += __moddi3.o
obj-y += __umoddi3.o
obj-y += carta_random.o
+obj-y += io.o
## variants of divide/modulo
## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
diff --git a/xen/arch/ia64/linux/README.origin b/xen/arch/ia64/linux/README.origin
index 87da72ac8f..f5f4677115 100644
--- a/xen/arch/ia64/linux/README.origin
+++ b/xen/arch/ia64/linux/README.origin
@@ -4,7 +4,6 @@ NOTE: DO NOT commit changes to these files! If a file
needs to be changed, move it to ../linux-xen and follow
the instructions in the README there.
-cmdline.c -> linux/lib/cmdline.c
efi_stub.S -> linux/arch/ia64/kernel/efi_stub.S
extable.c -> linux/arch/ia64/mm/extable.c
hpsim.S -> linux/arch/ia64/hp/sim/hpsim.S
@@ -27,3 +26,6 @@ strlen.S -> linux/arch/ia64/lib/strlen.S
# The files below are from Linux-2.6.16.33
carta_random.S -> linux/arch/ia64/lib/carta_random.S
+
+# The files below are from Linux-2.6.19
+io.c -> linux/arch/ia64/lib/io.c
diff --git a/xen/arch/ia64/linux/dig/Makefile b/xen/arch/ia64/linux/dig/Makefile
new file mode 100644
index 0000000000..38f6b9375d
--- /dev/null
+++ b/xen/arch/ia64/linux/dig/Makefile
@@ -0,0 +1 @@
+obj-y += machvec.o
diff --git a/xen/arch/ia64/linux/dig/README.origin b/xen/arch/ia64/linux/dig/README.origin
new file mode 100644
index 0000000000..0976d1dd60
--- /dev/null
+++ b/xen/arch/ia64/linux/dig/README.origin
@@ -0,0 +1,7 @@
+Source files in this directory are identical copies of linux-2.6.19 files:
+
+NOTE: DO NOT commit changes to these files! If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
+
+machvec.c -> linux/arch/ia64/dig/machvec.c
diff --git a/xen/arch/ia64/linux/dig/machvec.c b/xen/arch/ia64/linux/dig/machvec.c
new file mode 100644
index 0000000000..0c55bdafb4
--- /dev/null
+++ b/xen/arch/ia64/linux/dig/machvec.c
@@ -0,0 +1,3 @@
+#define MACHVEC_PLATFORM_NAME dig
+#define MACHVEC_PLATFORM_HEADER <asm/machvec_dig.h>
+#include <asm/machvec_init.h>
diff --git a/xen/arch/ia64/linux/hp/Makefile b/xen/arch/ia64/linux/hp/Makefile
new file mode 100644
index 0000000000..fc712eb29b
--- /dev/null
+++ b/xen/arch/ia64/linux/hp/Makefile
@@ -0,0 +1 @@
+subdir-y += zx1
diff --git a/xen/arch/ia64/linux/hp/zx1/Makefile b/xen/arch/ia64/linux/hp/zx1/Makefile
new file mode 100644
index 0000000000..c80bcd4ed7
--- /dev/null
+++ b/xen/arch/ia64/linux/hp/zx1/Makefile
@@ -0,0 +1 @@
+obj-y += hpzx1_machvec.o
diff --git a/xen/arch/ia64/linux/hp/zx1/README.origin b/xen/arch/ia64/linux/hp/zx1/README.origin
new file mode 100644
index 0000000000..598e743752
--- /dev/null
+++ b/xen/arch/ia64/linux/hp/zx1/README.origin
@@ -0,0 +1,7 @@
+Source files in this directory are identical copies of linux-2.6.19 files:
+
+NOTE: DO NOT commit changes to these files! If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
+
+hpzx1_machvec.c -> linux/arch/ia64/hp/zx1/hpzx1_machvec.c
diff --git a/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c b/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c
new file mode 100644
index 0000000000..32518b0f92
--- /dev/null
+++ b/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c
@@ -0,0 +1,3 @@
+#define MACHVEC_PLATFORM_NAME hpzx1
+#define MACHVEC_PLATFORM_HEADER <asm/machvec_hpzx1.h>
+#include <asm/machvec_init.h>
diff --git a/xen/arch/ia64/linux/io.c b/xen/arch/ia64/linux/io.c
new file mode 100644
index 0000000000..bcd16f8ad9
--- /dev/null
+++ b/xen/arch/ia64/linux/io.c
@@ -0,0 +1,164 @@
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ * This needs to be optimized.
+ */
+void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
+{
+ char *dst = to;
+
+ while (count) {
+ count--;
+ *dst++ = readb(from++);
+ }
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ * This needs to be optimized.
+ */
+void memcpy_toio(volatile void __iomem *to, const void *from, long count)
+{
+ const char *src = from;
+
+ while (count) {
+ count--;
+ writeb(*src++, to++);
+ }
+}
+EXPORT_SYMBOL(memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ * This needs to be optimized.
+ */
+void memset_io(volatile void __iomem *dst, int c, long count)
+{
+ unsigned char ch = (char)(c & 0xff);
+
+ while (count) {
+ count--;
+ writeb(ch, dst);
+ dst++;
+ }
+}
+EXPORT_SYMBOL(memset_io);
+
+#ifdef CONFIG_IA64_GENERIC
+
+#undef __ia64_inb
+#undef __ia64_inw
+#undef __ia64_inl
+#undef __ia64_outb
+#undef __ia64_outw
+#undef __ia64_outl
+#undef __ia64_readb
+#undef __ia64_readw
+#undef __ia64_readl
+#undef __ia64_readq
+#undef __ia64_readb_relaxed
+#undef __ia64_readw_relaxed
+#undef __ia64_readl_relaxed
+#undef __ia64_readq_relaxed
+#undef __ia64_writeb
+#undef __ia64_writew
+#undef __ia64_writel
+#undef __ia64_writeq
+#undef __ia64_mmiowb
+
+unsigned int
+__ia64_inb (unsigned long port)
+{
+ return ___ia64_inb(port);
+}
+
+unsigned int
+__ia64_inw (unsigned long port)
+{
+ return ___ia64_inw(port);
+}
+
+unsigned int
+__ia64_inl (unsigned long port)
+{
+ return ___ia64_inl(port);
+}
+
+void
+__ia64_outb (unsigned char val, unsigned long port)
+{
+ ___ia64_outb(val, port);
+}
+
+void
+__ia64_outw (unsigned short val, unsigned long port)
+{
+ ___ia64_outw(val, port);
+}
+
+void
+__ia64_outl (unsigned int val, unsigned long port)
+{
+ ___ia64_outl(val, port);
+}
+
+unsigned char
+__ia64_readb (void __iomem *addr)
+{
+ return ___ia64_readb (addr);
+}
+
+unsigned short
+__ia64_readw (void __iomem *addr)
+{
+ return ___ia64_readw (addr);
+}
+
+unsigned int
+__ia64_readl (void __iomem *addr)
+{
+ return ___ia64_readl (addr);
+}
+
+unsigned long
+__ia64_readq (void __iomem *addr)
+{
+ return ___ia64_readq (addr);
+}
+
+unsigned char
+__ia64_readb_relaxed (void __iomem *addr)
+{
+ return ___ia64_readb (addr);
+}
+
+unsigned short
+__ia64_readw_relaxed (void __iomem *addr)
+{
+ return ___ia64_readw (addr);
+}
+
+unsigned int
+__ia64_readl_relaxed (void __iomem *addr)
+{
+ return ___ia64_readl (addr);
+}
+
+unsigned long
+__ia64_readq_relaxed (void __iomem *addr)
+{
+ return ___ia64_readq (addr);
+}
+
+void
+__ia64_mmiowb(void)
+{
+ ___ia64_mmiowb();
+}
+
+#endif /* CONFIG_IA64_GENERIC */
diff --git a/xen/arch/ia64/linux/sn/Makefile b/xen/arch/ia64/linux/sn/Makefile
new file mode 100644
index 0000000000..24687f3086
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/Makefile
@@ -0,0 +1,2 @@
+subdir-y += kernel
+subdir-y += pci
diff --git a/xen/arch/ia64/linux/sn/kernel/Makefile b/xen/arch/ia64/linux/sn/kernel/Makefile
new file mode 100644
index 0000000000..54a9da7ca7
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/kernel/Makefile
@@ -0,0 +1,3 @@
+obj-y += machvec.o
+obj-y += pio_phys.o
+obj-y += ptc_deadlock.o
diff --git a/xen/arch/ia64/linux/sn/kernel/README.origin b/xen/arch/ia64/linux/sn/kernel/README.origin
new file mode 100644
index 0000000000..9f77d132f9
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/kernel/README.origin
@@ -0,0 +1,9 @@
+Source files in this directory are identical copies of linux-2.6.19 files:
+
+NOTE: DO NOT commit changes to these files! If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
+
+machvec.c -> linux/arch/ia64/sn/kernel/machvec.c
+pio_phys.S -> linux/arch/ia64/sn/kernel/pio_phys.S
+ptc_deadlock.S -> linux/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
diff --git a/xen/arch/ia64/linux/sn/kernel/machvec.c b/xen/arch/ia64/linux/sn/kernel/machvec.c
new file mode 100644
index 0000000000..02bb915584
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/kernel/machvec.c
@@ -0,0 +1,11 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#define MACHVEC_PLATFORM_NAME sn2
+#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
+#include <asm/machvec_init.h>
diff --git a/xen/arch/ia64/linux/sn/kernel/pio_phys.S b/xen/arch/ia64/linux/sn/kernel/pio_phys.S
new file mode 100644
index 0000000000..3c7d48d6ec
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/kernel/pio_phys.S
@@ -0,0 +1,71 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ *
+ * This file contains macros used to access MMR registers via
+ * uncached physical addresses.
+ * pio_phys_read_mmr - read an MMR
+ * pio_phys_write_mmr - write an MMR
+ * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
+ * Second MMR will be skipped if address is NULL
+ *
+ * Addresses passed to these routines should be uncached physical addresses
+ * ie., 0x80000....
+ */
+
+
+
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+GLOBAL_ENTRY(pio_phys_read_mmr)
+ .prologue
+ .regstk 1,0,0,0
+ .body
+ mov r2=psr
+ rsm psr.i | psr.dt
+ ;;
+ srlz.d
+ ld8.acq r8=[r32]
+ ;;
+ mov psr.l=r2;;
+ srlz.d
+ br.ret.sptk.many rp
+END(pio_phys_read_mmr)
+
+GLOBAL_ENTRY(pio_phys_write_mmr)
+ .prologue
+ .regstk 2,0,0,0
+ .body
+ mov r2=psr
+ rsm psr.i | psr.dt
+ ;;
+ srlz.d
+ st8.rel [r32]=r33
+ ;;
+ mov psr.l=r2;;
+ srlz.d
+ br.ret.sptk.many rp
+END(pio_phys_write_mmr)
+
+GLOBAL_ENTRY(pio_atomic_phys_write_mmrs)
+ .prologue
+ .regstk 4,0,0,0
+ .body
+ mov r2=psr
+ cmp.ne p9,p0=r34,r0;
+ rsm psr.i | psr.dt | psr.ic
+ ;;
+ srlz.d
+ st8.rel [r32]=r33
+(p9) st8.rel [r34]=r35
+ ;;
+ mov psr.l=r2;;
+ srlz.d
+ br.ret.sptk.many rp
+END(pio_atomic_phys_write_mmrs)
+
+
diff --git a/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S b/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S
new file mode 100644
index 0000000000..bebbcc4f8d
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S
@@ -0,0 +1,92 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/types.h>
+#include <asm/sn/shub_mmr.h>
+
+#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
+#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
+#define ALIAS_OFFSET 8
+
+
+ .global sn2_ptc_deadlock_recovery_core
+ .proc sn2_ptc_deadlock_recovery_core
+
+sn2_ptc_deadlock_recovery_core:
+ .regstk 6,0,0,0
+
+ ptc0 = in0
+ data0 = in1
+ ptc1 = in2
+ data1 = in3
+ piowc = in4
+ zeroval = in5
+ piowcphy = r30
+ psrsave = r2
+ scr1 = r16
+ scr2 = r17
+ mask = r18
+
+
+ extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
+ dep piowcphy=-1,piowcphy,63,1
+ movl mask=WRITECOUNTMASK
+ mov r8=r0
+
+1:
+ cmp.ne p8,p9=r0,ptc1 // Test for shub type (ptc1 non-null on shub1)
+ // p8 = 1 if shub1, p9 = 1 if shub2
+
+ add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
+ mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
+(p8) st8.rel [scr2]=scr1;;
+(p9) ld8.acq scr1=[scr2];;
+
+5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
+ hint @pause
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b
+
+
+
+ ////////////// BEGIN PHYSICAL MODE ////////////////////
+ mov psrsave=psr // Disable IC (no PMIs)
+ rsm psr.i | psr.dt | psr.ic;;
+ srlz.i;;
+
+ st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
+
+5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
+ hint @pause
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b;;
+
+ tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1
+
+(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
+
+5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
+ hint @pause
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b
+
+ tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+
+ mov psr.l=psrsave;; // Reenable IC
+ srlz.i;;
+ ////////////// END PHYSICAL MODE ////////////////////
+
+(p8) add r8=1,r8
+(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
+
+ br.ret.sptk rp
+ .endp sn2_ptc_deadlock_recovery_core
diff --git a/xen/arch/ia64/linux/sn/pci/Makefile b/xen/arch/ia64/linux/sn/pci/Makefile
new file mode 100644
index 0000000000..4c866bc776
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/pci/Makefile
@@ -0,0 +1 @@
+subdir-y += pcibr
diff --git a/xen/arch/ia64/linux/sn/pci/pcibr/Makefile b/xen/arch/ia64/linux/sn/pci/pcibr/Makefile
new file mode 100644
index 0000000000..4039c25ab4
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/pci/pcibr/Makefile
@@ -0,0 +1 @@
+obj-y += pcibr_reg.o
diff --git a/xen/arch/ia64/linux/sn/pci/pcibr/README.origin b/xen/arch/ia64/linux/sn/pci/pcibr/README.origin
new file mode 100644
index 0000000000..45d03b049c
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/pci/pcibr/README.origin
@@ -0,0 +1,7 @@
+Source files in this directory are identical copies of linux-2.6.19 files:
+
+NOTE: DO NOT commit changes to these files! If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
+
+pcibr_reg.c -> linux/arch/ia64/sn/pci/pcibr/pcibr_reg.c
diff --git a/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c b/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c
new file mode 100644
index 0000000000..8b8bbd51d4
--- /dev/null
+++ b/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/sn/io.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pic.h>
+#include <asm/sn/tiocp.h>
+
+union br_ptr {
+ struct tiocp tio;
+ struct pic pic;
+};
+
+/*
+ * Control Register Access -- Read/Write 0000_0020
+ */
+void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ __sn_clrq_relaxed(&ptr->tio.cp_control, bits);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ __sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
+ break;
+ default:
+ panic
+ ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+}
+
+void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ __sn_setq_relaxed(&ptr->tio.cp_control, bits);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ __sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
+ break;
+ default:
+ panic
+ ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+}
+
+/*
+ * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
+ */
+u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+ u64 ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
+ break;
+ default:
+ panic
+ ("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+
+ /* Read of the Target Flush should always return zero */
+ if (ret != 0)
+ panic("pcireg_tflush_get:Target Flush failed\n");
+
+ return ret;
+}
+
+/*
+ * Interrupt Status Register Access -- Read Only 0000_0100
+ */
+u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+ u64 ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
+ break;
+ default:
+ panic
+ ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Interrupt Enable Register Access -- Read/Write 0000_0108
+ */
+void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ __sn_clrq_relaxed(&ptr->pic.p_int_enable, bits);
+ break;
+ default:
+ panic
+ ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+}
+
+void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ __sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
+ break;
+ default:
+ panic
+ ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+}
+
+/*
+ * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
+ */
+void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
+ u64 addr)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
+ TIOCP_HOST_INTR_ADDR);
+ __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
+ (addr & TIOCP_HOST_INTR_ADDR));
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ __sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
+ PIC_HOST_INTR_ADDR);
+ __sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
+ (addr & PIC_HOST_INTR_ADDR));
+ break;
+ default:
+ panic
+ ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+}
+
+/*
+ * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
+ */
+void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ writeq(1, &ptr->tio.cp_force_pin[int_n]);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ writeq(1, &ptr->pic.p_force_pin[int_n]);
+ break;
+ default:
+ panic
+ ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+}
+
+/*
+ * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
+ */
+u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+ u64 ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret =
+ __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret =
+ __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
+ break;
+ default:
+ panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr);
+ }
+
+ }
+ /* Read of the Write Buffer Flush should always return zero */
+ return ret;
+}
+
+void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
+ u64 val)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
+ break;
+ default:
+ panic
+ ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+}
+
+u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
+{
+ union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
+ u64 __iomem *ret = NULL;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = &ptr->tio.cp_int_ate_ram[ate_index];
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = &ptr->pic.p_int_ate_ram[ate_index];
+ break;
+ default:
+ panic
+ ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
+ ptr);
+ }
+ }
+ return ret;
+}
diff --git a/xen/arch/ia64/vmx/mmio.c b/xen/arch/ia64/vmx/mmio.c
index 737220df8f..08247e1c14 100644
--- a/xen/arch/ia64/vmx/mmio.c
+++ b/xen/arch/ia64/vmx/mmio.c
@@ -362,7 +362,7 @@ void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
temp += post_update;
vcpu_set_gr(vcpu,inst.M15.r3,temp,0);
- vmx_vcpu_increment_iip(vcpu);
+ vcpu_increment_iip(vcpu);
return;
}
// Floating-point Load Pair + Imm ldfp8 M12
@@ -382,9 +382,9 @@ void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
vcpu_set_fpreg(vcpu,inst.M12.f2,&v);
padr += 8;
vcpu_set_gr(vcpu,inst.M12.r3,padr,0);
- vmx_vcpu_increment_iip(vcpu);
+ vcpu_increment_iip(vcpu);
return;
- }
+ }
else{
panic_domain
(NULL,"This memory access instr can't be emulated: %lx pc=%lx\n ",
@@ -420,5 +420,5 @@ void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
*/
}
}
- vmx_vcpu_increment_iip(vcpu);
+ vcpu_increment_iip(vcpu);
}
diff --git a/xen/arch/ia64/vmx/vmx_process.c b/xen/arch/ia64/vmx/vmx_process.c
index 94011beb71..acce2c0bf9 100644
--- a/xen/arch/ia64/vmx/vmx_process.c
+++ b/xen/arch/ia64/vmx/vmx_process.c
@@ -93,7 +93,7 @@ void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
if (vector == IA64_FP_FAULT_VECTOR) {
status = handle_fpu_swa(1, regs, isr);
if (!status) {
- vmx_vcpu_increment_iip(vcpu);
+ vcpu_increment_iip(vcpu);
return;
} else if (IA64_RETRY == status)
return;
@@ -104,7 +104,7 @@ void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
if (!status)
return;
else if (IA64_RETRY == status) {
- vmx_vcpu_decrement_iip(vcpu);
+ vcpu_decrement_iip(vcpu);
return;
}
}
@@ -143,16 +143,16 @@ vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long is
/* Allow hypercalls only when cpl = 0. */
if (iim == d->arch.breakimm) {
ia64_hypercall(regs);
- vmx_vcpu_increment_iip(v);
+ vcpu_increment_iip(v);
return IA64_NO_FAULT;
}
else if(iim == DOMN_PAL_REQUEST){
pal_emul(v);
- vmx_vcpu_increment_iip(v);
+ vcpu_increment_iip(v);
return IA64_NO_FAULT;
}else if(iim == DOMN_SAL_REQUEST){
sal_emul(v);
- vmx_vcpu_increment_iip(v);
+ vcpu_increment_iip(v);
return IA64_NO_FAULT;
}
}
@@ -212,8 +212,17 @@ void leave_hypervisor_tail(struct pt_regs *regs)
if (callback_irq != 0 && local_events_need_delivery()) {
/* change level for para-device callback irq */
/* use level irq to send discrete event */
- viosapic_set_irq(d, callback_irq, 1);
- viosapic_set_irq(d, callback_irq, 0);
+ if (callback_irq & IA64_CALLBACK_IRQ_RID) {
+ /* case of using Requester-ID as callback irq */
+ /* RID: '<#bus(8)><#dev(5)><#func(3)>' */
+ int dev = (callback_irq >> 3) & 0x1f;
+ viosapic_set_pci_irq(d, dev, 0, 1);
+ viosapic_set_pci_irq(d, dev, 0, 0);
+ } else {
+ /* case of using GSI as callback irq */
+ viosapic_set_irq(d, callback_irq, 1);
+ viosapic_set_irq(d, callback_irq, 0);
+ }
}
}
diff --git a/xen/arch/ia64/vmx/vmx_vcpu.c b/xen/arch/ia64/vmx/vmx_vcpu.c
index 322afe3542..5c1ab3305f 100644
--- a/xen/arch/ia64/vmx/vmx_vcpu.c
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c
@@ -145,48 +145,6 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
return ;
}
-/* Adjust slot both in pt_regs and vpd, upon vpsr.ri which
- * should have sync with ipsr in entry.
- *
- * Clear some bits due to successfully emulation.
- */
-IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu)
-{
- // TODO: trap_bounce?? Eddie
- REGS *regs = vcpu_regs(vcpu);
- IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
-
- if (ipsr->ri == 2) {
- ipsr->ri = 0;
- regs->cr_iip += 16;
- } else {
- ipsr->ri++;
- }
-
- ipsr->val &=
- (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
- IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
- ));
-
- return (IA64_NO_FAULT);
-}
-
-
-IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu)
-{
- REGS *regs = vcpu_regs(vcpu);
- IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
-
- if (ipsr->ri == 0) {
- ipsr->ri = 2;
- regs->cr_iip -= 16;
- } else {
- ipsr->ri--;
- }
- return (IA64_NO_FAULT);
-}
-
-
IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
{
REGS *regs = vcpu_regs(vcpu);
@@ -199,15 +157,12 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
return (IA64_NO_FAULT);
}
-
struct virtual_platform_def *
vmx_vcpu_get_plat(VCPU *vcpu)
{
return &(vcpu->domain->arch.vmx_platform);
}
-
-
IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
{
ia64_rr oldrr,newrr;
diff --git a/xen/arch/ia64/vmx/vmx_virt.c b/xen/arch/ia64/vmx/vmx_virt.c
index 54c2e67b25..9926c6ed8f 100644
--- a/xen/arch/ia64/vmx/vmx_virt.c
+++ b/xen/arch/ia64/vmx/vmx_virt.c
@@ -1568,7 +1568,7 @@ if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
#endif
if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) {
- vmx_vcpu_increment_iip(vcpu);
+ vcpu_increment_iip(vcpu);
}
recover_if_physical_mode(vcpu);
diff --git a/xen/arch/ia64/xen/dom0_ops.c b/xen/arch/ia64/xen/dom0_ops.c
index e21c96f827..a2c0903ed6 100644
--- a/xen/arch/ia64/xen/dom0_ops.c
+++ b/xen/arch/ia64/xen/dom0_ops.c
@@ -103,16 +103,6 @@ long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
ret = -EINVAL;
break;
}
- if (!d->arch.is_vti) {
- struct vcpu *v;
- for_each_vcpu(d, v) {
- BUG_ON(v->arch.privregs == NULL);
- free_domheap_pages(virt_to_page(v->arch.privregs),
- get_order_from_shift(XMAPPEDREGS_SHIFT));
- v->arch.privregs = NULL;
- relinquish_vcpu_resources(v);
- }
- }
d->arch.is_vti = 1;
vmx_setup_platform(d);
}
@@ -226,6 +216,7 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
#endif
pi->total_pages = total_pages;
pi->free_pages = avail_domheap_pages();
+ pi->scrub_pages = avail_scrub_pages();
pi->cpu_khz = local_cpu_data->proc_freq / 1000;
memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
//memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
diff --git a/xen/arch/ia64/xen/dom_fw.c b/xen/arch/ia64/xen/dom_fw.c
index cfb1cb13ce..b122a793ca 100644
--- a/xen/arch/ia64/xen/dom_fw.c
+++ b/xen/arch/ia64/xen/dom_fw.c
@@ -516,10 +516,10 @@ complete_dom0_memmap(struct domain *d,
{
efi_memory_desc_t *md;
u64 addr;
- int j;
void *efi_map_start, *efi_map_end, *p;
u64 efi_desc_size;
int i;
+ unsigned long dom_mem = maxmem - (d->tot_pages << PAGE_SHIFT);
/* Walk through all MDT entries.
Copy all interesting entries. */
@@ -533,6 +533,7 @@ complete_dom0_memmap(struct domain *d,
u64 start = md->phys_addr;
u64 size = md->num_pages << EFI_PAGE_SHIFT;
u64 end = start + size;
+ unsigned long flags;
switch (md->type) {
case EFI_RUNTIME_SERVICES_CODE:
@@ -540,9 +541,20 @@ complete_dom0_memmap(struct domain *d,
case EFI_ACPI_RECLAIM_MEMORY:
case EFI_ACPI_MEMORY_NVS:
case EFI_RESERVED_TYPE:
- /* Map into dom0 - All these are writable. */
- assign_domain_mach_page(d, start, size,
- ASSIGN_writable);
+ /*
+ * Map into dom0 - We must respect protection
+ * and cache attributes. Not all of these pages
+ * are writable!!!
+ */
+ flags = ASSIGN_writable; /* dummy - zero */
+ if (md->attribute & EFI_MEMORY_WP)
+ flags |= ASSIGN_readonly;
+ if ((md->attribute & EFI_MEMORY_UC) &&
+ !(md->attribute & EFI_MEMORY_WB))
+ flags |= ASSIGN_nocache;
+
+ assign_domain_mach_page(d, start, size, flags);
+
/* Fall-through. */
case EFI_MEMORY_MAPPED_IO:
/* Will be mapped with ioremap. */
@@ -566,26 +578,23 @@ complete_dom0_memmap(struct domain *d,
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
- /* Create dom0 MDT entries for conventional memory
- below 1MB. Without this Linux will assume VGA is
- present because 0xA0000 will always be either a hole
- in the MDT or an I/O region via the passthrough. */
-
- end = min(ONE_MB, end);
+ if (!(md->attribute & EFI_MEMORY_WB))
+ break;
- /* Avoid firmware and hypercall area.
- We know they are 0-based. */
- if (end < FW_END_PADDR || start >= ONE_MB)
+ start = max(FW_END_PADDR, start);
+ end = min(start + dom_mem, end);
+ if (end <= start)
break;
- if (start < FW_END_PADDR)
- start = FW_END_PADDR;
-
+
dom_md->type = EFI_CONVENTIONAL_MEMORY;
dom_md->phys_addr = start;
dom_md->virt_addr = 0;
dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
- dom_md->attribute = md->attribute;
+ dom_md->attribute = EFI_MEMORY_WB;
num_mds++;
+
+ dom_mem -= dom_md->num_pages << EFI_PAGE_SHIFT;
+ d->arch.convmem_end = end;
break;
case EFI_UNUSABLE_MEMORY:
@@ -604,66 +613,13 @@ complete_dom0_memmap(struct domain *d,
sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
efi_mdt_cmp, NULL);
- /* find gaps and fill them with conventional memory */
- i = num_mds;
- for (j = 0; j < num_mds; j++) {
- unsigned long end;
- unsigned long next_start;
-
- md = &tables->efi_memmap[j];
- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
- if (j + 1 < num_mds) {
- efi_memory_desc_t* next_md;
- next_md = &tables->efi_memmap[j + 1];
- next_start = next_md->phys_addr;
-
- /* Have just been sorted. */
- BUG_ON(end > next_start);
-
- /* No room for memory! */
- if (end == next_start)
- continue;
-
- if (next_start > maxmem)
- next_start = maxmem;
- }
- else
- next_start = maxmem;
-
- /* Avoid "legacy" low memory addresses
- and the HYPERCALL area. */
- if (end < ONE_MB)
- end = ONE_MB;
-
- // clip the range and align to PAGE_SIZE
- next_start = next_start & PAGE_MASK;
- end = PAGE_ALIGN(end);
-
- /* No room for memory. */
- if (end >= next_start)
- continue;
-
- MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
- end, next_start);
-
- if (next_start >= maxmem)
- break;
- }
- num_mds = i;
- BUG_ON(num_mds > NUM_MEM_DESCS);
- sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
- efi_mdt_cmp, NULL);
-
/* setup_guest() @ libxc/xc_linux_build() arranges memory for domU.
* however no one arranges memory for dom0,
* instead we allocate pages manually.
*/
for (i = 0; i < num_mds; i++) {
md = &tables->efi_memmap[i];
- if (md->phys_addr > maxmem)
- break;
-
+
if (md->type == EFI_LOADER_DATA ||
md->type == EFI_PAL_CODE ||
md->type == EFI_CONVENTIONAL_MEMORY) {
@@ -675,8 +631,6 @@ complete_dom0_memmap(struct domain *d,
/* md->num_pages = 0 is allowed. */
continue;
}
- if (end > (max_page << PAGE_SHIFT))
- end = (max_page << PAGE_SHIFT);
for (addr = start; addr < end; addr += PAGE_SIZE)
assign_new_domain0_page(d, addr);
@@ -863,6 +817,7 @@ dom_fw_init(struct domain *d,
/* Memory. */
MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
FW_END_PADDR, maxmem);
+ d->arch.convmem_end = maxmem;
/* Create an entry for IO ports. */
MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c
index 073614f4f6..f28d2040e0 100644
--- a/xen/arch/ia64/xen/domain.c
+++ b/xen/arch/ia64/xen/domain.c
@@ -305,24 +305,14 @@ void hlt_timer_fn(void *data)
void relinquish_vcpu_resources(struct vcpu *v)
{
- if (HAS_PERVCPU_VHPT(v->domain))
- pervcpu_vhpt_free(v);
- if (v->arch.privregs != NULL) {
- // this might be called by arch_do_domctl() with XEN_DOMCTL_arch_setup()
- // for domVTi.
- if (!(atomic_read(&v->domain->refcnt) & DOMAIN_DESTROYED)) {
- unsigned long i;
- for (i = 0; i < XMAPPEDREGS_SIZE; i += PAGE_SIZE)
- guest_physmap_remove_page(v->domain,
- IA64_XMAPPEDREGS_PADDR(v->vcpu_id) + i,
- virt_to_maddr(v->arch.privregs + i));
- }
-
- free_xenheap_pages(v->arch.privregs,
- get_order_from_shift(XMAPPEDREGS_SHIFT));
- v->arch.privregs = NULL;
- }
- kill_timer(&v->arch.hlt_timer);
+ if (HAS_PERVCPU_VHPT(v->domain))
+ pervcpu_vhpt_free(v);
+ if (v->arch.privregs != NULL) {
+ free_xenheap_pages(v->arch.privregs,
+ get_order_from_shift(XMAPPEDREGS_SHIFT));
+ v->arch.privregs = NULL;
+ }
+ kill_timer(&v->arch.hlt_timer);
}
struct vcpu *alloc_vcpu_struct(void)
@@ -361,36 +351,8 @@ void free_vcpu_struct(struct vcpu *v)
int vcpu_initialise(struct vcpu *v)
{
struct domain *d = v->domain;
- int rc, order, i;
if (!is_idle_domain(d)) {
- if (!d->arch.is_vti) {
- if (HAS_PERVCPU_VHPT(d))
- if ((rc = pervcpu_vhpt_alloc(v)) != 0)
- return rc;
-
- /* Create privregs page only if not VTi. */
- order = get_order_from_shift(XMAPPEDREGS_SHIFT);
- v->arch.privregs = alloc_xenheap_pages(order);
- BUG_ON(v->arch.privregs == NULL);
- memset(v->arch.privregs, 0, 1 << XMAPPEDREGS_SHIFT);
- for (i = 0; i < (1 << order); i++)
- share_xen_page_with_guest(virt_to_page(v->arch.privregs) +
- i, d, XENSHARE_writable);
- /*
- * XXX IA64_XMAPPEDREGS_PADDR
- * assign these pages into guest pseudo physical address
- * space for dom0 to map this page by gmfn.
- * this is necessary for domain save, restore and dump-core.
- */
- for (i = 0; i < XMAPPEDREGS_SIZE; i += PAGE_SIZE)
- assign_domain_page(d, IA64_XMAPPEDREGS_PADDR(v->vcpu_id) + i,
- virt_to_maddr(v->arch.privregs + i));
-
- tlbflush_update_time(&v->arch.tlbflush_timestamp,
- tlbflush_current_time());
- }
-
v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
@@ -420,6 +382,41 @@ int vcpu_initialise(struct vcpu *v)
return 0;
}
+int vcpu_late_initialise(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ int rc, order, i;
+
+ if (HAS_PERVCPU_VHPT(d)) {
+ rc = pervcpu_vhpt_alloc(v);
+ if (rc != 0)
+ return rc;
+ }
+
+ /* Create privregs page. */
+ order = get_order_from_shift(XMAPPEDREGS_SHIFT);
+ v->arch.privregs = alloc_xenheap_pages(order);
+ BUG_ON(v->arch.privregs == NULL);
+ memset(v->arch.privregs, 0, 1 << XMAPPEDREGS_SHIFT);
+ for (i = 0; i < (1 << order); i++)
+ share_xen_page_with_guest(virt_to_page(v->arch.privregs) + i,
+ d, XENSHARE_writable);
+ /*
+ * XXX IA64_XMAPPEDREGS_PADDR
+ * assign these pages into guest pseudo physical address
+ * space for dom0 to map this page by gmfn.
+ * this is necessary for domain save, restore and dump-core.
+ */
+ for (i = 0; i < XMAPPEDREGS_SIZE; i += PAGE_SIZE)
+ assign_domain_page(d, IA64_XMAPPEDREGS_PADDR(v->vcpu_id) + i,
+ virt_to_maddr(v->arch.privregs + i));
+
+ tlbflush_update_time(&v->arch.tlbflush_timestamp,
+ tlbflush_current_time());
+
+ return 0;
+}
+
void vcpu_destroy(struct vcpu *v)
{
if (v->domain->arch.is_vti)
@@ -553,6 +550,7 @@ int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
{
struct pt_regs *regs = vcpu_regs (v);
struct domain *d = v->domain;
+ int rc;
*regs = c.nat->user_regs;
@@ -582,16 +580,25 @@ int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
v->arch.event_callback_ip = er->event_callback_ip;
v->arch.dcr = er->dcr;
v->arch.iva = er->iva;
- }
-
- if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
- return 0;
- if (d->arch.is_vti)
- vmx_final_setup_guest(v);
-
- /* This overrides some registers. */
- vcpu_init_regs(v);
-
+ }
+
+ if (test_bit(_VCPUF_initialised, &v->vcpu_flags))
+ return 0;
+
+ if (d->arch.is_vti)
+ vmx_final_setup_guest(v);
+ else {
+ rc = vcpu_late_initialise(v);
+ if (rc != 0)
+ return rc;
+ VCPU(v, interrupt_mask_addr) =
+ (unsigned char *) d->arch.shared_info_va +
+ INT_ENABLE_OFFSET(v);
+ }
+
+ /* This overrides some registers. */
+ vcpu_init_regs(v);
+
/* Don't redo final setup */
set_bit(_VCPUF_initialised, &v->vcpu_flags);
return 0;
@@ -683,7 +690,6 @@ domain_set_shared_info_va (unsigned long va)
{
struct vcpu *v = current;
struct domain *d = v->domain;
- struct vcpu *v1;
/* Check virtual address:
must belong to region 7,
@@ -699,10 +705,8 @@ domain_set_shared_info_va (unsigned long va)
printk ("Domain set shared_info_va to 0x%016lx\n", va);
d->arch.shared_info_va = va;
- for_each_vcpu (d, v1) {
- VCPU(v1, interrupt_mask_addr) =
- (unsigned char *)va + INT_ENABLE_OFFSET(v1);
- }
+ VCPU(v, interrupt_mask_addr) = (unsigned char *)va +
+ INT_ENABLE_OFFSET(v);
__ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS);
@@ -760,8 +764,9 @@ int shadow_mode_control(struct domain *d, xen_domctl_shadow_op_t *sc)
atomic64_set(&d->arch.shadow_fault_count, 0);
atomic64_set(&d->arch.shadow_dirty_count, 0);
- d->arch.shadow_bitmap_size = (d->max_pages + BITS_PER_LONG-1) &
- ~(BITS_PER_LONG-1);
+ d->arch.shadow_bitmap_size =
+ ((d->arch.convmem_end >> PAGE_SHIFT) +
+ BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1);
d->arch.shadow_bitmap = xmalloc_array(unsigned long,
d->arch.shadow_bitmap_size / BITS_PER_LONG);
if (d->arch.shadow_bitmap == NULL) {
@@ -1051,10 +1056,11 @@ int construct_dom0(struct domain *d,
if(initrd_start && initrd_len){
unsigned long offset;
- pinitrd_start= dom0_size - (PAGE_ALIGN(initrd_len) + 4*1024*1024);
- if (pinitrd_start <= pstart_info)
- panic("%s:enough memory is not assigned to dom0", __func__);
-
+ /* The next page aligned boundary after the start info.
+ Note: EFI_PAGE_SHIFT = 12 <= PAGE_SHIFT */
+ pinitrd_start = pstart_info + PAGE_SIZE;
+ if (pinitrd_start + initrd_len >= dom0_size)
+ panic("%s: not enough memory assigned to dom0", __func__);
for (offset = 0; offset < initrd_len; offset += PAGE_SIZE) {
struct page_info *p;
p = assign_new_domain_page(d, pinitrd_start + offset);
@@ -1104,15 +1110,11 @@ int construct_dom0(struct domain *d,
printk ("Dom0 max_vcpus=%d\n", dom0_max_vcpus);
for ( i = 1; i < dom0_max_vcpus; i++ )
if (alloc_vcpu(d, i, i) == NULL)
- printk ("Cannot allocate dom0 vcpu %d\n", i);
+ panic("Cannot allocate dom0 vcpu %d\n", i);
/* Copy the OS image. */
loaddomainelfimage(d,image_start);
- /* Copy the initial ramdisk. */
- //if ( initrd_len != 0 )
- // memcpy((void *)vinitrd_start, initrd_start, initrd_len);
-
BUILD_BUG_ON(sizeof(start_info_t) + sizeof(dom0_vga_console_info_t) +
sizeof(struct ia64_boot_param) > PAGE_SIZE);
@@ -1161,8 +1163,7 @@ int construct_dom0(struct domain *d,
bp->console_info.orig_y = bp->console_info.num_rows == 0 ?
0 : bp->console_info.num_rows - 1;
- bp->initrd_start = dom0_size -
- (PAGE_ALIGN(ia64_boot_param->initrd_size) + 4*1024*1024);
+ bp->initrd_start = pinitrd_start;
bp->initrd_size = ia64_boot_param->initrd_size;
ci = (dom0_vga_console_info_t *)((unsigned char *)si +
diff --git a/xen/arch/ia64/xen/faults.c b/xen/arch/ia64/xen/faults.c
index 3c8741a1e6..02606b1228 100644
--- a/xen/arch/ia64/xen/faults.c
+++ b/xen/arch/ia64/xen/faults.c
@@ -83,16 +83,14 @@ void reflect_interruption(unsigned long isr, struct pt_regs *regs,
check_bad_nested_interruption(isr, regs, vector);
PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
PSCB(v, precover_ifs) = regs->cr_ifs;
- vcpu_bsw0(v);
PSCB(v, ipsr) = vcpu_get_ipsr_int_state(v, regs->cr_ipsr);
+ vcpu_bsw0(v);
PSCB(v, isr) = isr;
PSCB(v, iip) = regs->cr_iip;
PSCB(v, ifs) = 0;
- PSCB(v, incomplete_regframe) = 0;
regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
- regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
v->vcpu_info->evtchn_upcall_mask = 1;
PSCB(v, interrupt_collection_enabled) = 0;
@@ -120,19 +118,24 @@ void reflect_extint(struct pt_regs *regs)
reflect_interruption(isr, regs, IA64_EXTINT_VECTOR);
}
-void reflect_event(struct pt_regs *regs)
+void reflect_event(void)
{
- unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
struct vcpu *v = current;
+ struct pt_regs *regs;
+ unsigned long isr;
+
+ if (!event_pending(v))
+ return;
/* Sanity check */
- if (is_idle_vcpu(v) || !user_mode(regs)) {
+ if (is_idle_vcpu(v)) {
//printk("WARN: invocation to reflect_event in nested xen\n");
return;
}
- if (!event_pending(v))
- return;
+ regs = vcpu_regs(v);
+
+ isr = regs->cr_ipsr & IA64_PSR_RI;
if (!PSCB(v, interrupt_collection_enabled))
printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
@@ -140,16 +143,14 @@ void reflect_event(struct pt_regs *regs)
regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
PSCB(v, precover_ifs) = regs->cr_ifs;
- vcpu_bsw0(v);
PSCB(v, ipsr) = vcpu_get_ipsr_int_state(v, regs->cr_ipsr);
+ vcpu_bsw0(v);
PSCB(v, isr) = isr;
PSCB(v, iip) = regs->cr_iip;
PSCB(v, ifs) = 0;
- PSCB(v, incomplete_regframe) = 0;
regs->cr_iip = v->arch.event_callback_ip;
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
- regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
v->vcpu_info->evtchn_upcall_mask = 1;
PSCB(v, interrupt_collection_enabled) = 0;
@@ -175,7 +176,6 @@ static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
{
if (!PSCB(v, interrupt_collection_enabled)) {
PSCB(v, ifs) = regs->cr_ifs;
- PSCB(v, incomplete_regframe) = 1;
regs->cr_ifs = 0;
perfc_incrc(lazy_cover);
return 1; // retry same instruction with cr.ifs off
@@ -215,8 +215,8 @@ void ia64_do_page_fault(unsigned long address, unsigned long isr,
unsigned long m_pteval;
m_pteval = translate_domain_pte(pteval, address, itir,
&logps, &entry);
- vcpu_itc_no_srlz(current, (is_data ? 2 : 1) | 4,
- address, m_pteval, pteval, logps, &entry);
+ vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
+ m_pteval, pteval, logps, &entry);
if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
p2m_entry_retry(&entry)) {
/* dtlb has been purged in-between. This dtlb was
@@ -261,8 +261,6 @@ void ia64_do_page_fault(unsigned long address, unsigned long isr,
((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
regs->cr_ipsr =
(regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
- // NOTE: nested trap must NOT pass PSCB address
- //regs->r31 = (unsigned long) &PSCB(current);
perfc_incra(slow_reflect, fault >> 8);
return;
}
@@ -545,7 +543,8 @@ ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
vcpu_increment_iip(current);
} else
reflect_interruption(isr, regs, vector);
- } else if (!PSCB(v, interrupt_collection_enabled)) {
+ } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
+ && ia64_get_cpl(regs->cr_ipsr) == 2) {
if (ia64_hyperprivop(iim, regs))
vcpu_increment_iip(current);
} else {
diff --git a/xen/arch/ia64/xen/hypercall.c b/xen/arch/ia64/xen/hypercall.c
index 69be18efcd..0a0890c28f 100644
--- a/xen/arch/ia64/xen/hypercall.c
+++ b/xen/arch/ia64/xen/hypercall.c
@@ -17,6 +17,7 @@
#include <asm/sal.h> /* FOR struct ia64_sal_retval */
#include <asm/fpswa.h> /* FOR struct fpswa_ret_t */
+#include <asm/vmx_vcpu.h>
#include <asm/vcpu.h>
#include <asm/dom_fw.h>
#include <public/domctl.h>
@@ -32,94 +33,14 @@
#include <xen/event.h>
#include <xen/perfc.h>
-static long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg);
-static long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg);
-
-const hypercall_t ia64_hypercall_table[NR_hypercalls] =
-{
- (hypercall_t)do_ni_hypercall, /* do_set_trap_table *//* 0 */
- (hypercall_t)do_ni_hypercall, /* do_mmu_update */
- (hypercall_t)do_ni_hypercall, /* do_set_gdt */
- (hypercall_t)do_ni_hypercall, /* do_stack_switch */
- (hypercall_t)do_ni_hypercall, /* do_set_callbacks */
- (hypercall_t)do_ni_hypercall, /* do_fpu_taskswitch *//* 5 */
- (hypercall_t)do_sched_op_compat,
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_ni_hypercall, /* do_set_debugreg */
- (hypercall_t)do_ni_hypercall, /* do_get_debugreg */
- (hypercall_t)do_ni_hypercall, /* do_update_descriptor * 10 */
- (hypercall_t)do_ni_hypercall, /* do_ni_hypercall */
- (hypercall_t)do_memory_op,
- (hypercall_t)do_multicall,
- (hypercall_t)do_ni_hypercall, /* do_update_va_mapping */
- (hypercall_t)do_ni_hypercall, /* do_set_timer_op */ /* 15 */
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_xen_version,
- (hypercall_t)do_console_io,
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_grant_table_op, /* 20 */
- (hypercall_t)do_ni_hypercall, /* do_vm_assist */
- (hypercall_t)do_ni_hypercall, /* do_update_va_mapping_othe */
- (hypercall_t)do_ni_hypercall, /* (x86 only) */
- (hypercall_t)do_ni_hypercall, /* do_vcpu_op */
- (hypercall_t)do_ni_hypercall, /* (x86_64 only) */ /* 25 */
- (hypercall_t)do_ni_hypercall, /* do_mmuext_op */
- (hypercall_t)do_ni_hypercall, /* do_acm_op */
- (hypercall_t)do_ni_hypercall, /* do_nmi_op */
- (hypercall_t)do_sched_op,
- (hypercall_t)do_callback_op, /* */ /* 30 */
- (hypercall_t)do_xenoprof_op, /* */
- (hypercall_t)do_event_channel_op,
- (hypercall_t)do_physdev_op,
- (hypercall_t)do_hvm_op, /* */
- (hypercall_t)do_sysctl, /* */ /* 35 */
- (hypercall_t)do_domctl, /* */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_ni_hypercall, /* */ /* 40 */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_ni_hypercall, /* */ /* 45 */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_ni_hypercall, /* */
- (hypercall_t)do_dom0vp_op, /* dom0vp_op */
- (hypercall_t)do_ni_hypercall, /* arch_1 */
- (hypercall_t)do_ni_hypercall, /* arch_2 */ /* 50 */
- (hypercall_t)do_ni_hypercall, /* arch_3 */
- (hypercall_t)do_ni_hypercall, /* arch_4 */
- (hypercall_t)do_ni_hypercall, /* arch_5 */
- (hypercall_t)do_ni_hypercall, /* arch_6 */
- (hypercall_t)do_ni_hypercall, /* arch_7 */ /* 55 */
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_ni_hypercall, /* 60 */
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_ni_hypercall,
- (hypercall_t)do_ni_hypercall
-};
+extern long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg);
+extern long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg);
static IA64FAULT
xen_hypercall (struct pt_regs *regs)
{
uint32_t cmd = (uint32_t)regs->r2;
-
- if (cmd < NR_hypercalls) {
- perfc_incra(hypercalls, cmd);
- regs->r8 = (*ia64_hypercall_table[cmd])(
- regs->r14,
- regs->r15,
- regs->r16,
- regs->r17,
- regs->r18,
- regs->r19);
- } else
- regs->r8 = -ENOSYS;
-
+ printk("Warning %s should not be called %d\n", __FUNCTION__, cmd);
return IA64_NO_FAULT;
}
@@ -129,7 +50,8 @@ xen_fast_hypercall (struct pt_regs *regs)
uint32_t cmd = (uint32_t)regs->r2;
switch (cmd) {
case __HYPERVISOR_ia64_fast_eoi:
- regs->r8 = pirq_guest_eoi(current->domain, regs->r14);
+ printk("Warning %s should not be called %d\n",
+ __FUNCTION__, cmd);
break;
default:
regs->r8 = -ENOSYS;
@@ -137,6 +59,12 @@ xen_fast_hypercall (struct pt_regs *regs)
return IA64_NO_FAULT;
}
+long do_pirq_guest_eoi(int pirq)
+{
+ return pirq_guest_eoi(current->domain, pirq);
+}
+
+
static void
fw_hypercall_ipi (struct pt_regs *regs)
{
@@ -320,42 +248,42 @@ unsigned long hypercall_create_continuation(
va_list args;
va_start(args, format);
- if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) {
- panic("PREEMPT happen in multicall\n"); // Not support yet
- } else {
- vcpu_set_gr(v, 2, op, 0);
- for ( i = 0; *p != '\0'; i++) {
- switch ( *p++ )
- {
- case 'i':
- arg = (unsigned long)va_arg(args, unsigned int);
- break;
- case 'l':
- arg = (unsigned long)va_arg(args, unsigned long);
- break;
- case 'h':
- arg = (unsigned long)va_arg(args, void *);
- break;
- default:
- arg = 0;
- BUG();
- }
- switch (i) {
- case 0: vcpu_set_gr(v, 14, arg, 0);
- break;
- case 1: vcpu_set_gr(v, 15, arg, 0);
- break;
- case 2: vcpu_set_gr(v, 16, arg, 0);
- break;
- case 3: vcpu_set_gr(v, 17, arg, 0);
- break;
- case 4: vcpu_set_gr(v, 18, arg, 0);
- break;
- default: panic("Too many args for hypercall continuation\n");
- break;
- }
- }
+ if (test_bit(_MCSF_in_multicall, &mcs->flags))
+ panic("PREEMPT happen in multicall\n"); // Not support yet
+
+ vcpu_set_gr(v, 15, op, 0);
+
+ for (i = 0; *p != '\0'; i++) {
+ switch ( *p++ )
+ {
+ case 'i':
+ arg = (unsigned long)va_arg(args, unsigned int);
+ break;
+ case 'l':
+ arg = (unsigned long)va_arg(args, unsigned long);
+ break;
+ case 'h':
+ arg = (unsigned long)va_arg(args, void *);
+ break;
+ default:
+ arg = 0;
+ BUG();
+ }
+ vcpu_set_gr(v, 16 + i, arg, 0);
+ }
+
+ if (i >= 6)
+ panic("Too many args for hypercall continuation\n");
+
+ // Clean other argument to 0
+ while (i < 6) {
+ vcpu_set_gr(v, 16 + i, 0, 0);
+ i++;
}
+
+ // re-execute break;
+ vcpu_decrement_iip(v);
+
v->arch.hypercall_continuation = 1;
va_end(args);
return op;
@@ -369,7 +297,7 @@ extern int
iosapic_guest_write(
unsigned long physbase, unsigned int reg, u32 pval);
-static long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
int irq;
long ret;
@@ -524,7 +452,7 @@ static long unregister_guest_callback(struct callback_unregister *unreg)
/* First time to add callback to xen/ia64, so let's just stick to
* the newer callback interface.
*/
-static long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
long ret;
diff --git a/xen/arch/ia64/xen/hyperprivop.S b/xen/arch/ia64/xen/hyperprivop.S
index 763f66b072..2b940d0922 100644
--- a/xen/arch/ia64/xen/hyperprivop.S
+++ b/xen/arch/ia64/xen/hyperprivop.S
@@ -37,10 +37,10 @@
# define FAST_BREAK
# undef FAST_ACCESS_REFLECT //XXX TODO fast_access_reflect
// doesn't support dom0 vp yet.
-//# define FAST_RFI
+# define FAST_RFI
// TODO: Since we use callback to deliver interrupt,
// FAST_SSM_I needs to be rewritten.
-//# define FAST_SSM_I
+# define FAST_SSM_I
# define FAST_PTC_GA
# undef RFI_TO_INTERRUPT // not working yet
#endif
@@ -81,9 +81,6 @@
// r19 == vpsr.ic
// r31 == pr
GLOBAL_ENTRY(fast_hyperprivop)
-#ifndef FAST_HYPERPRIVOPS // see beginning of file
- br.sptk.many dispatch_break_fault ;;
-#endif
// HYPERPRIVOP_SSM_I?
// assumes domain interrupts pending, so just do it
cmp.eq p7,p6=HYPERPRIVOP_SSM_I,r17
@@ -102,7 +99,9 @@ GLOBAL_ENTRY(fast_hyperprivop)
cmp.eq p7,p6=HYPERPRIVOP_RFI,r17
(p7) br.sptk.many hyper_rfi
;;
-
+#ifndef FAST_HYPERPRIVOPS // see beginning of file
+ br.sptk.many dispatch_break_fault ;;
+#endif
// if event enabled and there are pending events
cmp.ne p7,p0=r20,r0
;;
@@ -140,6 +139,11 @@ GLOBAL_ENTRY(fast_hyperprivop)
(p7) br.sptk.many hyper_get_rr
;;
+ // HYPERPRIVOP_GET_PSR?
+ cmp.eq p7,p6=HYPERPRIVOP_GET_PSR,r17
+(p7) br.sptk.many hyper_get_psr
+ ;;
+
// HYPERPRIVOP_PTC_GA?
cmp.eq p7,p6=HYPERPRIVOP_PTC_GA,r17
(p7) br.sptk.many hyper_ptc_ga
@@ -192,7 +196,6 @@ END(fast_hyperprivop)
// and isr.ri to cr.isr.ri (all other bits zero)
// - cover and set shared_mem precover_ifs to cr.ifs
// ^^^ MISSED THIS FOR fast_break??
-// - set shared_mem ifs and incomplete_regframe to 0
// - set shared_mem interrupt_delivery_enabled to 0
// - set shared_mem interrupt_collection_enabled to 0
// - set r31 to SHAREDINFO_ADDR
@@ -272,32 +275,26 @@ ENTRY(hyper_ssm_i)
st1 [r22]=r20
st4 [r18]=r0
// cover and set shared_mem precover_ifs to cr.ifs
- // set shared_mem ifs and incomplete_regframe to 0
+ // set shared_mem ifs to 0
cover ;;
mov r20=cr.ifs;;
- adds r21=XSI_INCOMPL_REGFR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st4 [r21]=r0 ;;
adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r21]=r0 ;;
adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r21]=r20 ;;
// leave cr.ifs alone for later rfi
- // set iip to go to domain IVA break instruction vector
+ // set iip to go to event callback handler
movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
ld8 r22=[r22];;
- adds r22=IA64_VCPU_IVA_OFFSET,r22;;
- ld8 r23=[r22];;
- movl r24=0x3000;;
- add r24=r24,r23;;
+ adds r22=IA64_VCPU_EVENT_CALLBACK_IP_OFFSET,r22;;
+ ld8 r24=[r22];;
mov cr.iip=r24;;
// OK, now all set to go except for switch to virtual bank0
mov r30=r2
mov r29=r3
- mov r28=r4
;;
adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18;;
- adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
bsw.1;;
// FIXME?: ar.unat is not really handled correctly,
// but may not matter if the OS is NaT-clean
@@ -317,11 +314,9 @@ ENTRY(hyper_ssm_i)
.mem.offset 8,0; st8.spill [r3]=r29,16 ;;
.mem.offset 0,0; st8.spill [r2]=r30,16;
.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
- mov r31=r4
bsw.0 ;;
mov r2=r30
mov r3=r29
- mov r4=r28
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
mov pr=r31,-1 ;;
@@ -461,11 +456,9 @@ GLOBAL_ENTRY(fast_tick_reflect)
st1 [r22]=r20;;
st4 [r18]=r0;;
// cover and set shared_mem precover_ifs to cr.ifs
- // set shared_mem ifs and incomplete_regframe to 0
+ // set shared_mem ifs to 0
cover ;;
mov r20=cr.ifs;;
- adds r21=XSI_INCOMPL_REGFR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st4 [r21]=r0 ;;
adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r21]=r0 ;;
adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
@@ -480,14 +473,12 @@ GLOBAL_ENTRY(fast_tick_reflect)
// OK, now all set to go except for switch to virtual bank0
mov r30=r2
mov r29=r3
- mov r27=r4
#ifdef HANDLE_AR_UNAT
mov r28=ar.unat;
#endif
;;
adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18
adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
- adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
;;
bsw.1;;
.mem.offset 0,0; st8.spill [r2]=r16,16;
@@ -519,13 +510,11 @@ GLOBAL_ENTRY(fast_tick_reflect)
ld8 r22=[r2],16;
ld8 r23=[r3],16;;
#endif
- mov r31=r4
;;
bsw.0 ;;
mov r24=ar.unat;
mov r2=r30
mov r3=r29
- mov r4=r27
#ifdef HANDLE_AR_UNAT
mov ar.unat=r28;
#endif
@@ -647,11 +636,9 @@ ENTRY(fast_reflect)
st1 [r24]=r22
st4 [r18]=r0;;
// cover and set shared_mem precover_ifs to cr.ifs
- // set shared_mem ifs and incomplete_regframe to 0
+ // set shared_mem ifs to 0
cover ;;
mov r24=cr.ifs;;
- adds r21=XSI_INCOMPL_REGFR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st4 [r21]=r0 ;;
adds r21=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r21]=r0 ;;
adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
@@ -670,10 +657,8 @@ ENTRY(fast_reflect)
#ifdef HANDLE_AR_UNAT
mov r28=ar.unat;
#endif
- mov r27=r4
adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18;
adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
- adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
;;
bsw.1;;
.mem.offset 0,0; st8.spill [r2]=r16,16;
@@ -705,7 +690,6 @@ ENTRY(fast_reflect)
ld8 r22=[r2],16;
ld8 r23=[r3],16;;
#endif
- mov r31=r4
;;
bsw.0 ;;
mov r24=ar.unat;
@@ -714,7 +698,6 @@ ENTRY(fast_reflect)
#ifdef HANDLE_AR_UNAT
mov ar.unat=r28;
#endif
- mov r4=r27
;;
adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
@@ -1014,10 +997,22 @@ page_not_present:
#endif
END(fast_tlb_miss_reflect)
+ENTRY(slow_vcpu_rfi)
+ adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18;;
+ ld8 r22=[r22];;
+ tbit.z p6,p0=r22,63
+(p6) br.spnt.few dispatch_break_fault ;;
+ // if vips is valid, discard current register frame
+ // don't need dorfirfi any more
+ alloc r22=ar.pfs,0,0,0,0
+ br.spnt.few dispatch_break_fault
+ ;;
+END(slow_vcpu_rfi)
+
// ensure that, if giving up, registers at entry to fast_hyperprivop unchanged
ENTRY(hyper_rfi)
#ifndef FAST_RFI
- br.spnt.few dispatch_break_fault ;;
+ br.spnt.few slow_vcpu_rfi ;;
#endif
// if no interrupts pending, proceed
mov r30=r0
@@ -1027,33 +1022,37 @@ ENTRY(hyper_rfi)
adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r21=[r20];; // r21 = vcr.ipsr
extr.u r22=r21,IA64_PSR_I_BIT,1 ;;
- mov r30=r22
+ mov r30=r22;;
// r30 determines whether we might deliver an immediate extint
+#ifndef RFI_TO_INTERRUPT // see beginning of file
+ cmp.ne p6,p0=r30,r0
+(p6) br.cond.spnt.few slow_vcpu_rfi ;;
+#endif
1:
adds r20=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r21=[r20];; // r21 = vcr.ipsr
extr.u r22=r21,IA64_PSR_BE_BIT,1 ;;
// if turning on psr.be, give up for now and do it the slow way
cmp.ne p7,p0=r22,r0
-(p7) br.spnt.few dispatch_break_fault ;;
+(p7) br.spnt.few slow_vcpu_rfi ;;
// if (!(vpsr.dt && vpsr.rt && vpsr.it)), do it the slow way
movl r20=(IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT);;
and r22=r20,r21
;;
cmp.ne p7,p0=r22,r20
-(p7) br.spnt.few dispatch_break_fault ;;
+(p7) br.spnt.few slow_vcpu_rfi ;;
// if was in metaphys mode, do it the slow way (FIXME later?)
adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
ld4 r20=[r20];;
cmp.ne p7,p0=r20,r0
-(p7) br.spnt.few dispatch_break_fault ;;
+(p7) br.spnt.few slow_vcpu_rfi ;;
// if domain hasn't already done virtual bank switch
// do it the slow way (FIXME later?)
#if 0
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
ld4 r20=[r20];;
cmp.eq p7,p0=r20,r0
-(p7) br.spnt.few dispatch_break_fault ;;
+(p7) br.spnt.few slow_vcpu_rfi ;;
#endif
// validate vcr.iip, if in Xen range, do it the slow way
adds r20=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
@@ -1062,11 +1061,7 @@ ENTRY(hyper_rfi)
movl r24=HYPERVISOR_VIRT_END;;
cmp.ltu p0,p7=r22,r23 ;; // if !(iip<low) &&
(p7) cmp.geu p0,p7=r22,r24 ;; // !(iip>=high)
-(p7) br.spnt.few dispatch_break_fault ;;
-#ifndef RFI_TO_INTERRUPT // see beginning of file
- cmp.ne p6,p0=r30,r0
-(p6) br.cond.spnt.few dispatch_break_fault ;;
-#endif
+(p7) br.spnt.few slow_vcpu_rfi ;;
1: // OK now, let's do an rfi.
#ifdef FAST_HYPERPRIVOP_CNT
@@ -1084,8 +1079,6 @@ ENTRY(hyper_rfi)
just_do_rfi:
// r18=&vpsr.i|vpsr.ic, r21==vpsr, r22=vcr.iip
mov cr.iip=r22;;
- adds r20=XSI_INCOMPL_REGFR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st4 [r20]=r0 ;;
adds r20=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r20=[r20];;
dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
@@ -1261,20 +1254,16 @@ ENTRY(rfi_with_interrupt)
st1 [r22]=r20
st4 [r18]=r0;;
// cover and set shared_mem precover_ifs to cr.ifs
- // set shared_mem ifs and incomplete_regframe to 0
+ // set shared_mem ifs to 0
#if 0
cover ;;
mov r20=cr.ifs;;
- adds r22=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
- st4 [r22]=r0 ;;
adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r22]=r0 ;;
adds r22=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
st8 [r22]=r20 ;;
// leave cr.ifs alone for later rfi
#else
- adds r22=XSI_INCOMPL_REG_OFS-XSI_PSR_IC_OFS,r18 ;;
- st4 [r22]=r0 ;;
adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r20=[r22];;
st8 [r22]=r0 ;;
@@ -1310,13 +1299,8 @@ ENTRY(rfi_with_interrupt)
.mem.offset 8,0; st8.spill [r3]=r29,16 ;;
.mem.offset 0,0; st8.spill [r2]=r30,16;
.mem.offset 8,0; st8.spill [r3]=r31,16 ;;
- movl r31=XSI_IPSR;;
bsw.0 ;;
mov r2=r30; mov r3=r29;;
-#else
- bsw.1;;
- movl r31=XSI_IPSR;;
- bsw.0 ;;
#endif
adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
st4 [r20]=r0 ;;
@@ -1336,13 +1320,9 @@ ENTRY(hyper_cover)
mov r25=cr.iip;;
// skip test for vpsr.ic.. it's a prerequisite for hyperprivops
cover ;;
- adds r20=XSI_INCOMPL_REGFR_OFS-XSI_PSR_IC_OFS,r18 ;;
- mov r30=cr.ifs;;
- adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18
- ld4 r21=[r20] ;;
- cmp.eq p6,p7=r21,r0 ;;
-(p6) st8 [r22]=r30;;
-(p7) st4 [r20]=r0;;
+ mov r30=cr.ifs
+ adds r22=XSI_IFS_OFS-XSI_PSR_IC_OFS,r18;;
+ st8 [r22]=r30;;
mov cr.ifs=r0;;
// adjust return address to skip over break instruction
extr.u r26=r24,41,2 ;;
@@ -1479,6 +1459,53 @@ ENTRY(hyper_set_itm)
;;
END(hyper_set_itm)
+ENTRY(hyper_get_psr)
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_PSR);;
+ ld4 r21=[r20];;
+ adds r21=1,r21;;
+ st4 [r20]=r21;;
+#endif
+ mov r24=cr.ipsr
+ movl r8=0x18ffffffff;;
+ // only return PSR{36:35,31:0}
+ and r8=r8,r24
+ // set vpsr.ic
+ ld4 r21=[r18];;
+ dep r8=r21,r8,IA64_PSR_IC_BIT,1
+ // set vpsr.pp
+ adds r20=XSI_VPSR_PP_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld1 r21=[r20];;
+ dep r8=r21,r8,IA64_PSR_PP_BIT,1
+ // set vpsr.dt
+ adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld4 r21=[r20];;
+ cmp.ne p6,p0=r21,r0
+ ;;
+(p6) dep.z r8=r8,IA64_PSR_DT_BIT,1
+ // set vpsr.i
+ adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld8 r20=[r20];;
+ ld1 r21=[r20];;
+ dep r8=r21,r8,IA64_PSR_I_BIT,1
+ ;;
+ mov r25=cr.iip
+ extr.u r26=r24,41,2 ;;
+ cmp.eq p6,p7=2,r26 ;;
+(p6) mov r26=0
+(p6) adds r25=16,r25
+(p7) adds r26=1,r26
+ ;;
+ dep r24=r26,r24,41,2
+ ;;
+ mov cr.ipsr=r24
+ mov cr.iip=r25
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+END(hyper_get_psr)
+
+
ENTRY(hyper_get_rr)
#ifdef FAST_HYPERPRIVOP_CNT
movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_RR);;
diff --git a/xen/arch/ia64/xen/irq.c b/xen/arch/ia64/xen/irq.c
index 6211e9b418..079f445def 100644
--- a/xen/arch/ia64/xen/irq.c
+++ b/xen/arch/ia64/xen/irq.c
@@ -48,6 +48,13 @@
#include <xen/event.h>
#define apicid_to_phys_cpu_present(x) 1
+#ifdef CONFIG_IA64_GENERIC
+unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
+{
+ return (unsigned int) vec;
+}
+#endif
+
/*
* Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used
diff --git a/xen/arch/ia64/xen/ivt.S b/xen/arch/ia64/xen/ivt.S
index 4eec37f89a..ac55769642 100644
--- a/xen/arch/ia64/xen/ivt.S
+++ b/xen/arch/ia64/xen/ivt.S
@@ -1,6 +1,8 @@
#ifdef XEN
#include <asm/debugger.h>
#include <asm/vhpt.h>
+#include <public/arch-ia64.h>
+#include <asm/config.h>
#endif
/*
* arch/ia64/kernel/ivt.S
@@ -1054,7 +1056,6 @@ ENTRY(break_fault)
* level.
*/
DBG_FAULT(11)
-#ifdef XEN
mov r16=cr.isr
mov r17=cr.iim
mov r31=pr
@@ -1074,121 +1075,229 @@ ENTRY(break_fault)
#endif
movl r18=THIS_CPU(current_psr_ic_addr)
;;
- ld8 r18=[r18]
- ;;
- ld4 r19=[r18]
+ ld8 r18=[r18]
;;
cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
(p7) br.spnt.many dispatch_privop_fault
;;
- // If vpsr.ic is off, we have a hyperprivop. A hyperprivop is
- // hand-coded assembly with psr.ic off which means it can make
- // no calls, cannot use r1-r15, and it can have no memory accesses
- // unless they are to pinned addresses!
- cmp4.eq p7,p0=r0,r19
+ // if (ipsr.cpl == 2 && (iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX)
+ // this is a hyperprivop. A hyperprivop is hand-coded assembly with
+ // psr.ic off which means it can make no calls, cannot use r1-r15,
+ // and it can have no memory accesses unless they are to pinned
+ // addresses!
+ mov r19= cr.ipsr
+ movl r20=HYPERPRIVOP_START
+ mov r21=HYPERPRIVOP_MAX
+ ;;
+ sub r20=r17,r20
+ extr.u r19=r19,IA64_PSR_CPL0_BIT,2 // extract cpl field from cr.ipsr
+ ;;
+ cmp.gtu p7,p0=r21,r20
+ ;;
+ cmp.eq.and p7,p0=2,r19 // ipsr.cpl==2
(p7) br.sptk.many fast_hyperprivop
;;
movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET
;;
ld8 r22 = [r22]
;;
- adds r22=IA64_VCPU_BREAKIMM_OFFSET,r22
+ adds r23=IA64_VCPU_BREAKIMM_OFFSET,r22
;;
- ld4 r23=[r22];;
- cmp4.eq p6,p7=r23,r17 // Xen-reserved breakimm?
-(p6) br.spnt.many dispatch_break_fault
+ ld4 r23=[r23];;
+ cmp4.eq p6,p0=r23,r17;; // Xen-reserved breakimm?
+ cmp.eq.and p6,p0=2,r19
+(p6) br.spnt.many fast_hypercall
;;
br.sptk.many fast_break_reflect
;;
-#else /* !XEN */
- movl r16=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
- ld8 r16=[r16]
- mov r17=cr.iim
- mov r18=__IA64_BREAK_SYSCALL
- mov r21=ar.fpsr
- mov r29=cr.ipsr
- mov r19=b6
- mov r25=ar.unat
- mov r27=ar.rsc
- mov r26=ar.pfs
- mov r28=cr.iip
-#ifndef XEN
- mov r31=pr // prepare to save predicates
-#endif
- mov r20=r1
+
+
+fast_hypercall:
+ shr r25=r2,8;;
+ cmp.ne p7,p0=r0,r25
+(p7) br.spnt.few dispatch_break_fault
;;
+ // fall through
+
+
+ /*
+ * The streamlined system call entry/exit paths only save/restore the initial part
+ * of pt_regs. This implies that the callers of system-calls must adhere to the
+ * normal procedure calling conventions.
+ *
+ * Registers to be saved & restored:
+ * CR registers: cr.ipsr, cr.iip, cr.ifs
+ * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
+ * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
+ * Registers to be restored only:
+ * r8-r11: output value from the system call.
+ *
+ * During system call exit, scratch registers (including r15) are modified/cleared
+ * to prevent leaking bits from kernel to user level.
+ */
+
+// DBG_FAULT(11)
+// mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
+ mov r16=r22
+ mov r29=cr.ipsr // M2 (12 cyc)
+// mov r31=pr // I0 (2 cyc)
+ mov r15=r2
+
+// mov r17=cr.iim // M2 (2 cyc)
+ mov.m r27=ar.rsc // M2 (12 cyc)
+// mov r18=__IA64_BREAK_SYSCALL // A
+
+ mov.m ar.rsc=0 // M2
+ mov.m r21=ar.fpsr // M2 (12 cyc)
+ mov r19=b6 // I0 (2 cyc)
+ ;;
+ mov.m r23=ar.bspstore // M2 (12 cyc)
+ mov.m r24=ar.rnat // M2 (5 cyc)
+ mov.i r26=ar.pfs // I0 (2 cyc)
+
+ invala // M0|1
+ nop.m 0 // M
+ mov r20=r1 // A save r1
+
+ nop.m 0
+// movl r30=sys_call_table // X
+ movl r30=ia64_hypercall_table // X
+
+ mov r28=cr.iip // M2 (2 cyc)
+// cmp.eq p0,p7=r18,r17 // I0 is this a system call?
+//(p7) br.cond.spnt non_syscall // B no ->
+ //
+ // From this point on, we are definitely on the syscall-path
+ // and we can use (non-banked) scratch registers.
+ //
+///////////////////////////////////////////////////////////////////////
+ mov r1=r16 // A move task-pointer to "addl"-addressable reg
+ mov r2=r16 // A setup r2 for ia64_syscall_setup
+// add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
+
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
- cmp.eq p0,p7=r18,r17 // is this a system call?
- // (p7 <- false, if so)
-(p7) br.cond.spnt non_syscall
+// adds r15=-1024,r15 // A subtract 1024 from syscall number
+// mov r3=NR_syscalls - 1
+ mov r3=NR_hypercalls - 1
;;
- ld1 r17=[r16] // load current->thread.on_ustack flag
- st1 [r16]=r0 // clear current->thread.on_ustack flag
- add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
- // set r1 for
- // MINSTATE_START_SAVE_MIN_VIRT
+ ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
+// ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
+ mov r9=r0 // force flags = 0
+ extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
+
+ shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
+ addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
+ cmp.leu p6,p7=r15,r3 // A syscall number in range?
;;
- invala
- /* adjust return address so we skip over the break instruction: */
+ lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
+(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
+ tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
- extr.u r8=r29,41,2 // extract ei field from cr.ipsr
+ mov.m ar.bspstore=r22 // M2 switch to kernel RBS
+ cmp.eq p8,p9=2,r8 // A isr.ei==2?
;;
- cmp.eq p6,p7=2,r8 // isr.ei==2?
- mov r2=r1 // setup r2 for ia64_syscall_setup
+
+(p8) mov r8=0 // A clear ei to 0
+//(p7) movl r30=sys_ni_syscall // X
+(p7) movl r30=do_ni_hypercall // X
+
+(p8) adds r28=16,r28 // A switch cr.iip to next bundle
+(p9) adds r8=1,r8 // A increment ei to next slot
+ nop.i 0
;;
-(p6) mov r8=0 // clear ei to 0
-(p6) adds r28=16,r28 // switch cr.iip to next bundle
- // cr.ipsr.ei wrapped
-(p7) adds r8=1,r8 // increment ei to next slot
+
+ mov.m r25=ar.unat // M2 (5 cyc)
+ dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
+// adds r15=1024,r15 // A restore original syscall number
+ //
+ // If any of the above loads miss in L1D, we'll stall here until
+ // the data arrives.
+ //
+///////////////////////////////////////////////////////////////////////
+ st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
+ mov b6=r30 // I0 setup syscall handler branch reg early
+ cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
+
+// and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
+ mov r18=ar.bsp // M2 (12 cyc)
+ ;;
+(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
+// cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
+ br.call.sptk.many b7=ia64_syscall_setup // B
+1:
+ mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
+ nop 0
+ bsw.1 // B (6 cyc) regs are saved, switch to bank 1
;;
- cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
- dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
+
+ ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
+// movl r3=ia64_ret_from_syscall // X
;;
- // switch from user to kernel RBS:
- MINSTATE_START_SAVE_MIN_VIRT
- br.call.sptk.many b7=ia64_syscall_setup
+ srlz.i // M0 ensure interruption collection is on
+// mov rp=r3 // I0 set the real return addr
+//(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
+(p15) ssm psr.i // M2 restore psr.i
+//(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
+// br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
+ br.call.sptk.many b0=b6 // B invoke syscall-handker (ignore return addr)
+// br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
;;
- MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
- ssm psr.ic | PSR_DEFAULT_BITS
+ adds r2=PT(R8)+16,r12
;;
- srlz.i // guarantee that interruption
- // collection is on
- mov r3=NR_syscalls - 1
+ st8 [r2]=r8
;;
-(p15) ssm psr.i // restore psr.i
- // p10==true means out registers are more than 8 or r15's Nat is true
-(p10) br.cond.spnt.many ia64_ret_from_syscall
+ br.call.sptk.many b0=do_softirq
;;
- movl r16=sys_call_table
-
- adds r15=-1024,r15 // r15 contains the syscall number --
- // subtract 1024 from it
- movl r2=ia64_ret_from_syscall
+ //restore hypercall argument if continuation
+ adds r2=IA64_VCPU_HYPERCALL_CONTINUATION_OFS,r13
;;
- shladd r20=r15,3,r16 // r20 = sys_call_table +
- // 8*(syscall-1024)
- cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 +
- // NR_syscalls) ?
- mov rp=r2 // set the real return addr
+ ld1 r20=[r2]
;;
-(p6) ld8 r20=[r20] // load address of syscall entry point
-(p7) movl r20=sys_ni_syscall
-
- add r2=TI_FLAGS+IA64_TASK_SIZE,r13
+ st1 [r2]=r0
;;
- ld4 r2=[r2] // r2 = current_thread_info()->flags
+ cmp.ne p6,p0=r20,r0
;;
- and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
+(p6) adds r2=PT(R16)+16,r12
+(p6) adds r3=PT(R17)+16,r12
;;
- cmp.eq p8,p0=r2,r0
- mov b6=r20
+(p6) ld8 r32=[r2],16
+(p6) ld8 r33=[r3],16
+ ;;
+(p6) ld8 r34=[r2],16
+(p6) ld8 r35=[r3],16
+ ;;
+(p6) ld8 r36=[r2],16
+ ;;
+//save ar.bsp before cover
+ mov r16=ar.bsp
+ add r2=PT(R14)+16,r12
+ ;;
+ st8 [r2]=r16
+ ;;
+ rsm psr.i|psr.ic
+ ;;
+ srlz.i
+ ;;
+ cover
+ ;;
+ mov r20=cr.ifs
+ adds r2=PT(CR_IFS)+16,r12
+ ;;
+ st8 [r2]=r20
+ ;;
+ br.call.sptk.many b0=reflect_event
+ ;;
+ adds r2=PT(R14)+16,r12
+ adds r3=PT(R8)+16,r12
+ ;;
+ //r16 contains ar.bsp before cover
+ ld8 r16=[r2]
+ ld8 r8=[r3]
+ ;;
+ br.sptk.many ia64_ret_from_syscall
;;
-(p8) br.call.sptk.many b6=b6 // ignore this return addr
- br.cond.sptk ia64_trace_syscall
- // NOT REACHED
-#endif
END(break_fault)
.org ia64_ivt+0x3000
@@ -1288,7 +1397,150 @@ END(dispatch_break_fault)
DBG_FAULT(14)
FAULT(14)
-#ifndef XEN
+#ifdef XEN
+ // this code segment is from 2.6.16.13
+
+ /*
+ * There is no particular reason for this code to be here, other than that
+ * there happens to be space here that would go unused otherwise. If this
+ * fault ever gets "unreserved", simply moved the following code to a more
+ * suitable spot...
+ *
+ * ia64_syscall_setup() is a separate subroutine so that it can
+ * allocate stacked registers so it can safely demine any
+ * potential NaT values from the input registers.
+ *
+ * On entry:
+ * - executing on bank 0 or bank 1 register set (doesn't matter)
+ * - r1: stack pointer
+ * - r2: current task pointer
+ * - r3: preserved
+ * - r11: original contents (saved ar.pfs to be saved)
+ * - r12: original contents (sp to be saved)
+ * - r13: original contents (tp to be saved)
+ * - r15: original contents (syscall # to be saved)
+ * - r18: saved bsp (after switching to kernel stack)
+ * - r19: saved b6
+ * - r20: saved r1 (gp)
+ * - r21: saved ar.fpsr
+ * - r22: kernel's register backing store base (krbs_base)
+ * - r23: saved ar.bspstore
+ * - r24: saved ar.rnat
+ * - r25: saved ar.unat
+ * - r26: saved ar.pfs
+ * - r27: saved ar.rsc
+ * - r28: saved cr.iip
+ * - r29: saved cr.ipsr
+ * - r31: saved pr
+ * - b0: original contents (to be saved)
+ * On exit:
+ * - p10: TRUE if syscall is invoked with more than 8 out
+ * registers or r15's Nat is true
+ * - r1: kernel's gp
+ * - r3: preserved (same as on entry)
+ * - r8: -EINVAL if p10 is true
+ * - r12: points to kernel stack
+ * - r13: points to current task
+ * - r14: preserved (same as on entry)
+ * - p13: preserved
+ * - p15: TRUE if interrupts need to be re-enabled
+ * - ar.fpsr: set to kernel settings
+ * - b6: preserved (same as on entry)
+ */
+GLOBAL_ENTRY(ia64_syscall_setup)
+#if PT(B6) != 0
+# error This code assumes that b6 is the first field in pt_regs.
+#endif
+ st8 [r1]=r19 // save b6
+ add r16=PT(CR_IPSR),r1 // initialize first base pointer
+ add r17=PT(R11),r1 // initialize second base pointer
+ ;;
+ alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
+ st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
+ tnat.nz p8,p0=in0
+
+ st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
+ tnat.nz p9,p0=in1
+(pKStk) mov r18=r0 // make sure r18 isn't NaT
+ ;;
+
+ st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
+ st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
+ mov r28=b0 // save b0 (2 cyc)
+ ;;
+
+ st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
+ dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
+(p8) mov in0=-1
+ ;;
+
+ st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
+ extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
+ and r8=0x7f,r19 // A // get sof of ar.pfs
+
+ st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
+ tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
+(p9) mov in1=-1
+ ;;
+
+(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
+ tnat.nz p10,p0=in2
+ add r11=8,r11
+ ;;
+(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
+(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
+ tnat.nz p11,p0=in3
+ ;;
+(p10) mov in2=-1
+ tnat.nz p12,p0=in4 // [I0]
+(p11) mov in3=-1
+ ;;
+(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
+(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
+ shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
+ ;;
+ st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
+ st8 [r17]=r28,PT(R1)-PT(B0) // save b0
+ tnat.nz p13,p0=in5 // [I0]
+ ;;
+ st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
+ st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
+(p12) mov in4=-1
+ ;;
+
+.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
+.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
+(p13) mov in5=-1
+ ;;
+ st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
+ tnat.nz p13,p0=in6
+ cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
+ ;;
+ mov r8=1
+(p9) tnat.nz p10,p0=r15
+ adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
+
+ st8.spill [r17]=r15 // save r15
+ tnat.nz p8,p0=in7
+ nop.i 0
+
+ mov r13=r2 // establish `current'
+ movl r1=__gp // establish kernel global pointer
+ ;;
+ st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
+(p13) mov in6=-1
+(p8) mov in7=-1
+
+ cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
+ movl r17=FPSR_DEFAULT
+ ;;
+ mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
+(p10) mov r8=-EINVAL
+ br.ret.sptk.many b7
+END(ia64_syscall_setup)
+
+
+#else
/*
* There is no particular reason for this code to be here, other
* than that there happens to be space here that would go unused
diff --git a/xen/arch/ia64/xen/mm.c b/xen/arch/ia64/xen/mm.c
index 0a5acc0636..aec322870c 100644
--- a/xen/arch/ia64/xen/mm.c
+++ b/xen/arch/ia64/xen/mm.c
@@ -700,12 +700,12 @@ unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr,
printk("%s: d 0x%p id %d current 0x%p id %d\n",
__func__, d, d->domain_id, current, current->vcpu_id);
- if ((mpaddr >> PAGE_SHIFT) < d->max_pages)
+ if (mpaddr < d->arch.convmem_end)
printk("%s: non-allocated mpa 0x%lx (< 0x%lx)\n", __func__,
- mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
+ mpaddr, d->arch.convmem_end);
else
printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
- mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
+ mpaddr, d->arch.convmem_end);
if (entry != NULL)
p2m_entry_set(entry, NULL, __pte(0));
@@ -972,7 +972,7 @@ ioports_deny_access(struct domain *d, unsigned long fp, unsigned long lp)
// clear pte
old_pte = ptep_get_and_clear(mm, mpaddr, pte);
}
- domain_flush_vtlb_all();
+ domain_flush_vtlb_all(d);
return 0;
}
@@ -1439,10 +1439,9 @@ dom0vp_expose_p2m(struct domain* d,
for (i = 0; i < expose_num_pfn / PTRS_PER_PTE + 1; i++) {
assign_pte = lookup_noalloc_domain_pte(d, (assign_start_gpfn + i) <<
PAGE_SHIFT);
- BUG_ON(assign_pte == NULL);
- if (pte_present(*assign_pte)) {
+ if (assign_pte == NULL || pte_present(*assign_pte))
continue;
- }
+
if (expose_p2m_page(d, (assign_start_gpfn + i) << PAGE_SHIFT,
p2m_pte_zero_page) < 0) {
gdprintk(XENLOG_INFO, "%s failed to assign zero-pte page\n", __func__);
@@ -1731,7 +1730,7 @@ domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
#ifndef CONFIG_XEN_IA64_TLB_TRACK
//XXX sledgehammer.
// flush finer range.
- domain_flush_vtlb_all();
+ domain_flush_vtlb_all(d);
put_page(page);
#else
switch (tlb_track_search_and_remove(d->arch.tlb_track,
@@ -1750,7 +1749,7 @@ domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
* queue the page and flush vTLB only once.
* I.e. The caller must call dfree_flush() explicitly.
*/
- domain_flush_vtlb_all();
+ domain_flush_vtlb_all(d);
put_page(page);
break;
case TLB_TRACK_NOT_FOUND:
@@ -1784,7 +1783,7 @@ domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
* So we abondaned to track virtual addresses.
* full vTLB flush is necessary.
*/
- domain_flush_vtlb_all();
+ domain_flush_vtlb_all(d);
put_page(page);
break;
case TLB_TRACK_AGAIN:
diff --git a/xen/arch/ia64/xen/tlb_track.c b/xen/arch/ia64/xen/tlb_track.c
index 07816a4acb..1cbf6126de 100644
--- a/xen/arch/ia64/xen/tlb_track.c
+++ b/xen/arch/ia64/xen/tlb_track.c
@@ -454,6 +454,31 @@ tlb_track_search_and_remove(struct tlb_track* tlb_track,
continue;
if (pte_pfn(entry->pte_val) == mfn) {
+ /*
+ * PARANOIA
+ * We're here after zapping p2m entry. However another pCPU
+ * may update the same p2m entry entry the same mfn at the
+ * same time in theory. In such a case, we can't determine
+ * whether this entry is for us or for the racy p2m update.
+ * Such a guest domain's racy behaviour doesn't make sense,
+ * but is allowed. Go the very pessimistic way. Leave this
+ * entry to be found later and do full flush at this time.
+ *
+ * NOTE: Updating tlb tracking hash is protected by spin lock and
+ * setting _PAGE_TLB_INSERTED and_PAGE_TLB_INSERTED_MANY bits
+ * is serialized by the same spin lock.
+ * See tlb_track_insert_or_dirty().
+ */
+ pte_t current_pte = *ptep;
+ if (unlikely(pte_pfn(current_pte) == mfn &&
+ pte_tlb_tracking(current_pte) &&
+ pte_tlb_inserted(current_pte))) {
+ BUG_ON(pte_tlb_inserted_many(current_pte));
+ spin_unlock(&tlb_track->hash_lock);
+ perfc_incrc(tlb_track_sar_many);
+ return TLB_TRACK_MANY;
+ }
+
list_del(&entry->list);
spin_unlock(&tlb_track->hash_lock);
*entryp = entry;
diff --git a/xen/arch/ia64/xen/vcpu.c b/xen/arch/ia64/xen/vcpu.c
index 6c3861a3a9..9b554b1b64 100644
--- a/xen/arch/ia64/xen/vcpu.c
+++ b/xen/arch/ia64/xen/vcpu.c
@@ -450,8 +450,6 @@ IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval)
struct ia64_psr newpsr;
newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
- if (newpsr.cpl == 2)
- newpsr.cpl = 0;
if (!vcpu->vcpu_info->evtchn_upcall_mask)
newpsr.i = 1;
else
@@ -469,6 +467,7 @@ IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval)
else
newpsr.pp = 0;
*pval = *(unsigned long *)&newpsr;
+ *pval &= (MASK(0, 32) | MASK(35, 2));
return IA64_NO_FAULT;
}
@@ -614,7 +613,6 @@ IA64FAULT vcpu_get_ifs(VCPU * vcpu, u64 * pval)
//PSCB(vcpu,ifs) = PSCB(vcpu)->regs.cr_ifs;
//*pval = PSCB(vcpu,regs).cr_ifs;
*pval = PSCB(vcpu, ifs);
- PSCB(vcpu, incomplete_regframe) = 0;
return IA64_NO_FAULT;
}
@@ -696,6 +694,20 @@ IA64FAULT vcpu_increment_iip(VCPU * vcpu)
return IA64_NO_FAULT;
}
+IA64FAULT vcpu_decrement_iip(VCPU * vcpu)
+{
+ REGS *regs = vcpu_regs(vcpu);
+ struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
+
+ if (ipsr->ri == 0) {
+ ipsr->ri = 2;
+ regs->cr_iip -= 16;
+ } else
+ ipsr->ri--;
+
+ return IA64_NO_FAULT;
+}
+
IA64FAULT vcpu_set_ifa(VCPU * vcpu, u64 val)
{
PSCB(vcpu, ifa) = val;
@@ -1208,8 +1220,8 @@ IA64FAULT vcpu_set_itc(VCPU * vcpu, u64 val)
#ifdef DISALLOW_SETTING_ITC_FOR_NOW
static int did_print;
if (!did_print) {
- printk("vcpu_set_itc: Setting ar.itc is currently disabled\n");
- printk("(this message is only displayed one)\n");
+ printk("vcpu_set_itc: Setting ar.itc is currently disabled "
+ "(this message is only displayed once)\n");
did_print = 1;
}
#else
@@ -1324,10 +1336,8 @@ IA64FAULT vcpu_rfi(VCPU * vcpu)
{
// TODO: Only allowed for current vcpu
PSR psr;
- u64 int_enable, regspsr = 0;
- u64 ifs;
+ u64 int_enable, ifs;
REGS *regs = vcpu_regs(vcpu);
- extern void dorfirfi(void);
psr.i64 = PSCB(vcpu, ipsr);
if (psr.ia64_psr.cpl < 3)
@@ -1350,23 +1360,13 @@ IA64FAULT vcpu_rfi(VCPU * vcpu)
printk("*** DOMAIN TRYING TO TURN ON BIG-ENDIAN!!!\n");
return IA64_ILLOP_FAULT;
}
- PSCB(vcpu, incomplete_regframe) = 0; // is this necessary?
+
ifs = PSCB(vcpu, ifs);
- //if ((ifs & regs->cr_ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
- //if ((ifs & 0x8000000000000000L) && ifs != regs->cr_ifs) {
- if (ifs & regs->cr_ifs & 0x8000000000000000L) {
- // TODO: validate PSCB(vcpu,iip)
- // TODO: PSCB(vcpu,ipsr) = psr;
- PSCB(vcpu, ipsr) = psr.i64;
- // now set up the trampoline
- regs->cr_iip = *(unsigned long *)dorfirfi; // function pointer!!
- __asm__ __volatile("mov %0=psr;;":"=r"(regspsr)::"memory");
- regs->cr_ipsr =
- regspsr & ~(IA64_PSR_I | IA64_PSR_IC | IA64_PSR_BN);
- } else {
- regs->cr_ipsr = psr.i64;
- regs->cr_iip = PSCB(vcpu, iip);
- }
+ if (ifs & 0x8000000000000000UL)
+ regs->cr_ifs = ifs;
+
+ regs->cr_ipsr = psr.i64;
+ regs->cr_iip = PSCB(vcpu, iip);
PSCB(vcpu, interrupt_collection_enabled) = 1;
vcpu_bsw1(vcpu);
vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
@@ -1379,10 +1379,7 @@ IA64FAULT vcpu_cover(VCPU * vcpu)
REGS *regs = vcpu_regs(vcpu);
if (!PSCB(vcpu, interrupt_collection_enabled)) {
- if (!PSCB(vcpu, incomplete_regframe))
- PSCB(vcpu, ifs) = regs->cr_ifs;
- else
- PSCB(vcpu, incomplete_regframe) = 0;
+ PSCB(vcpu, ifs) = regs->cr_ifs;
}
regs->cr_ifs = 0;
return IA64_NO_FAULT;
@@ -1685,9 +1682,18 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
// note: architecturally, iha is optionally set for alt faults but
// xenlinux depends on it so should document it as part of PV interface
vcpu_thash(vcpu, address, iha);
- if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE))
+ if (!(rr & RR_VE_MASK) || !(pta & IA64_PTA_VE)) {
+ REGS *regs = vcpu_regs(vcpu);
+ // NOTE: This is specific code for linux kernel
+ // We assume region 7 is identity mapped
+ if (region == 7 && ia64_psr(regs)->cpl == 2) {
+ pte.val = address & _PAGE_PPN_MASK;
+ pte.val = pte.val | pgprot_val(PAGE_KERNEL);
+ goto out;
+ }
return is_data ? IA64_ALT_DATA_TLB_VECTOR :
IA64_ALT_INST_TLB_VECTOR;
+ }
/* avoid recursively walking (short format) VHPT */
if (((address ^ pta) & ((itir_mask(pta) << 3) >> 3)) == 0)
@@ -1707,6 +1713,7 @@ IA64FAULT vcpu_translate(VCPU * vcpu, u64 address, BOOLEAN is_data,
return is_data ? IA64_DATA_TLB_VECTOR : IA64_INST_TLB_VECTOR;
/* found mapping in guest VHPT! */
+out:
*itir = rr & RR_PS_MASK;
*pteval = pte.val;
perfc_incrc(vhpt_translate);
@@ -2182,14 +2189,6 @@ vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
else
vhpt_insert(vaddr, pte, PAGE_SHIFT << 2);
#endif
- if (IorD & 0x4) /* don't place in 1-entry TLB */
- return;
- if (IorD & 0x1) {
- vcpu_set_tr_entry(&PSCBX(vcpu, itlb), mp_pte, ps << 2, vaddr);
- }
- if (IorD & 0x2) {
- vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), mp_pte, ps << 2, vaddr);
- }
}
IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
@@ -2216,6 +2215,7 @@ IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
vcpu_flush_tlb_vhpt_range(ifa, logps);
goto again;
}
+ vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);
return IA64_NO_FAULT;
}
@@ -2242,6 +2242,7 @@ IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
vcpu_flush_tlb_vhpt_range(ifa, logps);
goto again;
}
+ vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);
return IA64_NO_FAULT;
}
diff --git a/xen/arch/ia64/xen/vhpt.c b/xen/arch/ia64/xen/vhpt.c
index 05382370cc..c8846efc45 100644
--- a/xen/arch/ia64/xen/vhpt.c
+++ b/xen/arch/ia64/xen/vhpt.c
@@ -21,9 +21,6 @@
#include <asm/vcpumask.h>
#include <asm/vmmu.h>
-/* Defined in tlb.c */
-extern void ia64_global_tlb_purge(u64 start, u64 end, u64 nbits);
-
extern long running_on_sim;
DEFINE_PER_CPU (unsigned long, vhpt_paddr);
@@ -261,12 +258,13 @@ static void __vcpu_flush_vtlb_all(void *vcpu)
vcpu_flush_vtlb_all((struct vcpu*)vcpu);
}
-void domain_flush_vtlb_all (void)
+// caller must incremented reference count to d somehow.
+void domain_flush_vtlb_all(struct domain* d)
{
int cpu = smp_processor_id ();
struct vcpu *v;
- for_each_vcpu (current->domain, v) {
+ for_each_vcpu(d, v) {
if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
continue;
@@ -364,7 +362,7 @@ void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
// ptc.ga has release semantics.
/* ptc.ga */
- ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
+ platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT);
perfc_incrc(domain_flush_vtlb_range);
}
@@ -442,7 +440,8 @@ __domain_flush_vtlb_track_entry(struct domain* d,
perfc_incrc(domain_flush_vtlb_local);
} else {
/* ptc.ga has release semantics. */
- ia64_global_tlb_purge(vaddr, vaddr + PAGE_SIZE, PAGE_SHIFT);
+ platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE,
+ PAGE_SHIFT);
perfc_incrc(domain_flush_vtlb_global);
}
diff --git a/xen/arch/ia64/xen/xenasm.S b/xen/arch/ia64/xen/xenasm.S
index af5f31c5b2..8a819bfac9 100644
--- a/xen/arch/ia64/xen/xenasm.S
+++ b/xen/arch/ia64/xen/xenasm.S
@@ -256,28 +256,6 @@ GLOBAL_ENTRY(__get_domain_bundle)
;;
END(__get_domain_bundle)
-GLOBAL_ENTRY(dorfirfi)
- // Read current vcpu shared info
- movl r16=THIS_CPU(current_psr_ic_addr)
- ;;
- ld8 r19 = [r16]
- ;;
- add r16 = XSI_IIP_OFS - XSI_PSR_IC_OFS, r19
- add r17 = XSI_IPSR_OFS - XSI_PSR_IC_OFS, r19
- add r18 = XSI_IFS_OFS - XSI_PSR_IC_OFS, r19
- ;;
- ld8 r16 = [r16]
- ld8 r17 = [r17]
- ld8 r18 = [r18]
- ;;
- mov cr.iip=r16
- mov cr.ipsr=r17
- mov cr.ifs=r18
- ;;
- rfi
- ;;
-END(dorfirfi)
-
/* derived from linux/arch/ia64/hp/sim/boot/boot_head.S */
GLOBAL_ENTRY(pal_emulator_static)
mov r8=-1
diff --git a/xen/arch/ia64/xen/xencomm.c b/xen/arch/ia64/xen/xencomm.c
index 3b8e40b5b3..21eef3a51a 100644
--- a/xen/arch/ia64/xen/xencomm.c
+++ b/xen/arch/ia64/xen/xencomm.c
@@ -153,7 +153,7 @@ xencomm_copy_from_guest(
chunksz -= chunk_skip;
skip -= chunk_skip;
- if (skip == 0) {
+ if (skip == 0 && chunksz > 0) {
unsigned int bytes = min(chunksz, n - to_pos);
int res;
@@ -295,7 +295,7 @@ xencomm_copy_to_guest(
skip -= chunk_skip;
dest_paddr += chunk_skip;
- if (skip == 0) {
+ if (skip == 0 && chunksz > 0) {
unsigned int bytes = min(chunksz, n - from_pos);
int res;
diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c
index 7833642fb4..0d1eaf23dd 100644
--- a/xen/arch/ia64/xen/xensetup.c
+++ b/xen/arch/ia64/xen/xensetup.c
@@ -249,6 +249,7 @@ void start_kernel(void)
unsigned long dom0_initrd_start, dom0_initrd_size;
unsigned long md_end, relo_start, relo_end, relo_size = 0;
struct domain *idle_domain;
+ struct vcpu *dom0_vcpu0;
efi_memory_desc_t *kern_md, *last_md, *md;
#ifdef CONFIG_SMP
int i;
@@ -503,8 +504,11 @@ printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
/* Create initial domain 0. */
dom0 = domain_create(0, 0);
- if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
+ if (dom0 == NULL)
panic("Error creating domain 0\n");
+ dom0_vcpu0 = alloc_vcpu(dom0, 0, 0);
+ if (dom0_vcpu0 == NULL || vcpu_late_initialise(dom0_vcpu0) != 0)
+ panic("Cannot allocate dom0 vcpu 0\n");
dom0->is_privileged = 1;
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 5d8ec92150..e86df4e735 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -142,9 +142,13 @@ static void hpet_stop_timer(HPETState *h, unsigned int tn)
stop_timer(&h->timers[tn]);
}
+/* the number of HPET tick that stands for
+ * 1/(2^10) second, namely, 0.9765625 milliseconds */
+#define HPET_TINY_TIME_SPAN (h->tsc_freq >> 10)
+
static void hpet_set_timer(HPETState *h, unsigned int tn)
{
- uint64_t tn_cmp, cur_tick;
+ uint64_t tn_cmp, cur_tick, diff;
ASSERT(tn < HPET_TIMER_NUM);
@@ -167,11 +171,19 @@ static void hpet_set_timer(HPETState *h, unsigned int tn)
cur_tick = (uint32_t)cur_tick;
}
- if ( (int64_t)(tn_cmp - cur_tick) > 0 )
- set_timer(&h->timers[tn], NOW() +
- hpet_tick_to_ns(h, tn_cmp-cur_tick));
- else
- set_timer(&h->timers[tn], NOW());
+ diff = tn_cmp - cur_tick;
+
+ /*
+ * Detect time values set in the past. This is hard to do for 32-bit
+ * comparators as the timer does not have to be set that far in the future
+ * for the counter difference to wrap a 32-bit signed integer. We fudge
+ * by looking for a 'small' time value in the past.
+ */
+ if ( (int64_t)diff < 0 )
+ diff = (timer_is_32bit(h, tn) && (-diff > HPET_TINY_TIME_SPAN))
+ ? (uint32_t)diff : 0;
+
+ set_timer(&h->timers[tn], NOW() + hpet_tick_to_ns(h, diff));
}
static inline uint64_t hpet_fixup_reg(
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index febc278c23..9a46f7c555 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -149,11 +149,19 @@ int hvm_domain_initialise(struct domain *d)
void hvm_domain_destroy(struct domain *d)
{
+ HVMStateEntry *se, *dse;
pit_deinit(d);
rtc_deinit(d);
pmtimer_deinit(d);
hpet_deinit(d);
+ se = d->arch.hvm_domain.first_se;
+ while (se) {
+ dse = se;
+ se = se->next;
+ xfree(dse);
+ }
+
if ( d->arch.hvm_domain.shared_page_va )
unmap_domain_page_global(
(void *)d->arch.hvm_domain.shared_page_va);
@@ -189,6 +197,9 @@ int hvm_vcpu_initialise(struct vcpu *v)
rtc_init(v, RTC_PORT(0), RTC_IRQ);
pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
hpet_init(v);
+
+ /* init hvm sharepage */
+ shpage_init(v->domain, get_sp(v->domain));
/* Init guest TSC to start from zero. */
hvm_set_guest_time(v, 0);
diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c
index 1ab512d2ee..cef7fc9a46 100644
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -207,11 +207,11 @@ static inline void pit_load_count(PITChannelState *s, int channel, int val)
switch (s->mode) {
case 2:
/* create periodic time */
- create_periodic_time(&s->pt, period, 0, 0, pit_time_fired, s);
+ create_periodic_time(current, &s->pt, period, 0, 0, pit_time_fired, s);
break;
case 1:
/* create one shot time */
- create_periodic_time(&s->pt, period, 0, 1, pit_time_fired, s);
+ create_periodic_time(current, &s->pt, period, 0, 1, pit_time_fired, s);
#ifdef DEBUG_PIT
printk("HVM_PIT: create one shot time.\n");
#endif
@@ -356,6 +356,154 @@ void pit_stop_channel0_irq(PITState * pit)
destroy_periodic_time(&s->pt);
}
+#ifdef HVM_DEBUG_SUSPEND
+static void pit_info(PITState *pit)
+{
+ PITChannelState *s;
+ int i;
+
+ for(i = 0; i < 3; i++) {
+ printk("*****pit channel %d's state:*****\n", i);
+ s = &pit->channels[i];
+ printk("pit 0x%x.\n", s->count);
+ printk("pit 0x%x.\n", s->latched_count);
+ printk("pit 0x%x.\n", s->count_latched);
+ printk("pit 0x%x.\n", s->status_latched);
+ printk("pit 0x%x.\n", s->status);
+ printk("pit 0x%x.\n", s->read_state);
+ printk("pit 0x%x.\n", s->write_state);
+ printk("pit 0x%x.\n", s->write_latch);
+ printk("pit 0x%x.\n", s->rw_mode);
+ printk("pit 0x%x.\n", s->mode);
+ printk("pit 0x%x.\n", s->bcd);
+ printk("pit 0x%x.\n", s->gate);
+ printk("pit %"PRId64"\n", s->count_load_time);
+
+ if (s->pt) {
+ struct periodic_time *pt = s->pt;
+ printk("pit channel %d has a periodic timer:\n", i);
+ printk("pt %d.\n", pt->enabled);
+ printk("pt %d.\n", pt->one_shot);
+ printk("pt %d.\n", pt->irq);
+ printk("pt %d.\n", pt->first_injected);
+
+ printk("pt %d.\n", pt->pending_intr_nr);
+ printk("pt %d.\n", pt->period);
+ printk("pt %"PRId64"\n", pt->period_cycles);
+ printk("pt %"PRId64"\n", pt->last_plt_gtime);
+ }
+ }
+
+}
+#else
+static void pit_info(PITState *pit)
+{
+}
+#endif
+
+static void pit_save(hvm_domain_context_t *h, void *opaque)
+{
+ struct domain *d = opaque;
+ PITState *pit = &d->arch.hvm_domain.pl_time.vpit;
+ PITChannelState *s;
+ struct periodic_time *pt;
+ int i, pti = -1;
+
+ pit_info(pit);
+
+ for(i = 0; i < 3; i++) {
+ s = &pit->channels[i];
+ hvm_put_32u(h, s->count);
+ hvm_put_16u(h, s->latched_count);
+ hvm_put_8u(h, s->count_latched);
+ hvm_put_8u(h, s->status_latched);
+ hvm_put_8u(h, s->status);
+ hvm_put_8u(h, s->read_state);
+ hvm_put_8u(h, s->write_state);
+ hvm_put_8u(h, s->write_latch);
+ hvm_put_8u(h, s->rw_mode);
+ hvm_put_8u(h, s->mode);
+ hvm_put_8u(h, s->bcd);
+ hvm_put_8u(h, s->gate);
+ hvm_put_64u(h, s->count_load_time);
+
+ if (s->pt.enabled && pti == -1)
+ pti = i;
+ }
+
+ pt = &pit->channels[pti].pt;
+
+ /* save the vcpu for pt */
+ hvm_put_32u(h, pt->vcpu->vcpu_id);
+
+ /* save guest time */
+ hvm_put_8u(h, pti);
+ hvm_put_32u(h, pt->pending_intr_nr);
+ hvm_put_64u(h, pt->last_plt_gtime);
+
+}
+
+static int pit_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+ struct domain *d = opaque;
+ PITState *pit = &d->arch.hvm_domain.pl_time.vpit;
+ PITChannelState *s;
+ int i, pti, vcpu_id;
+ u32 period;
+
+ if (version_id != 1)
+ return -EINVAL;
+
+ for(i = 0; i < 3; i++) {
+ s = &pit->channels[i];
+ s->count = hvm_get_32u(h);
+ s->latched_count = hvm_get_16u(h);
+ s->count_latched = hvm_get_8u(h);
+ s->status_latched = hvm_get_8u(h);
+ s->status = hvm_get_8u(h);
+ s->read_state = hvm_get_8u(h);
+ s->write_state = hvm_get_8u(h);
+ s->write_latch = hvm_get_8u(h);
+ s->rw_mode = hvm_get_8u(h);
+ s->mode = hvm_get_8u(h);
+ s->bcd = hvm_get_8u(h);
+ s->gate = hvm_get_8u(h);
+ s->count_load_time = hvm_get_64u(h);
+ }
+
+ vcpu_id = hvm_get_32u(h);
+
+ pti = hvm_get_8u(h);
+ if ( pti < 0 || pti > 2) {
+ printk("pit load get a wrong channel %d when HVM resume.\n", pti);
+ return -EINVAL;
+ }
+
+ s = &pit->channels[pti];
+ period = DIV_ROUND((s->count * 1000000000ULL), PIT_FREQ);
+
+ printk("recreate periodic timer %d in mode %d, freq=%d.\n", pti, s->mode, period);
+ switch (s->mode) {
+ case 2:
+ /* create periodic time */
+ create_periodic_time(d->vcpu[vcpu_id], &s->pt, period, 0, 0, pit_time_fired, s);
+ break;
+ case 1:
+ /* create one shot time */
+ create_periodic_time(d->vcpu[vcpu_id], &s->pt, period, 0, 1, pit_time_fired, s);
+ break;
+ default:
+ printk("pit mode %"PRId8" should not use periodic timer!\n", s->mode);
+ return -EINVAL;
+ }
+ s->pt.pending_intr_nr = hvm_get_32u(h);
+ s->pt.last_plt_gtime = hvm_get_64u(h);
+
+ pit_info(pit);
+
+ return 0;
+}
+
static void pit_reset(void *opaque)
{
PITState *pit = opaque;
@@ -383,6 +531,7 @@ void pit_init(struct vcpu *v, unsigned long cpu_khz)
s++; s->pt.vcpu = v;
s++; s->pt.vcpu = v;
+ hvm_register_savevm(v->domain, "xen_hvm_i8254", PIT_BASE, 1, pit_save, pit_load, v->domain);
register_portio_handler(v->domain, PIT_BASE, 4, handle_pit_io);
/* register the speaker port */
register_portio_handler(v->domain, 0x61, 1, handle_speaker_io);
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 3de58812c3..c7b56c0111 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -29,6 +29,8 @@
#include <asm/current.h>
#include <io_ports.h>
#include <xen/event.h>
+#include <xen/compile.h>
+#include <public/version.h>
extern struct hvm_mmio_handler hpet_mmio_handler;
@@ -155,6 +157,307 @@ static inline void hvm_mmio_access(struct vcpu *v,
}
}
+/* save/restore support */
+#define HVM_FILE_MAGIC 0x54381286
+#define HVM_FILE_VERSION 0x00000001
+
+int hvm_register_savevm(struct domain *d,
+ const char *idstr,
+ int instance_id,
+ int version_id,
+ SaveStateHandler *save_state,
+ LoadStateHandler *load_state,
+ void *opaque)
+{
+ HVMStateEntry *se, **pse;
+
+ if ( (se = xmalloc(struct HVMStateEntry)) == NULL ){
+ printk("allocat hvmstate entry fail.\n");
+ return -1;
+ }
+
+ strncpy(se->idstr, idstr, HVM_SE_IDSTR_LEN);
+
+ se->instance_id = instance_id;
+ se->version_id = version_id;
+ se->save_state = save_state;
+ se->load_state = load_state;
+ se->opaque = opaque;
+ se->next = NULL;
+
+ /* add at the end of list */
+ pse = &d->arch.hvm_domain.first_se;
+ while (*pse != NULL)
+ pse = &(*pse)->next;
+ *pse = se;
+ return 0;
+}
+
+int hvm_save(struct vcpu *v, hvm_domain_context_t *h)
+{
+ uint32_t len, len_pos, cur_pos;
+ uint32_t eax, ebx, ecx, edx;
+ HVMStateEntry *se;
+ char *chgset;
+
+ if (!is_hvm_vcpu(v)) {
+ printk("hvm_save only for hvm guest!\n");
+ return -1;
+ }
+
+ memset(h, 0, sizeof(hvm_domain_context_t));
+ hvm_put_32u(h, HVM_FILE_MAGIC);
+ hvm_put_32u(h, HVM_FILE_VERSION);
+
+ /* save xen changeset */
+ chgset = strrchr(XEN_CHANGESET, ' ');
+ if ( chgset )
+ chgset++;
+ else
+ chgset = XEN_CHANGESET;
+
+ len = strlen(chgset);
+ hvm_put_8u(h, len);
+ hvm_put_buffer(h, chgset, len);
+
+ /* save cpuid */
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ hvm_put_32u(h, eax);
+
+ for(se = v->domain->arch.hvm_domain.first_se; se != NULL; se = se->next) {
+ /* ID string */
+ len = strnlen(se->idstr, HVM_SE_IDSTR_LEN);
+ hvm_put_8u(h, len);
+ hvm_put_buffer(h, se->idstr, len);
+
+ hvm_put_32u(h, se->instance_id);
+ hvm_put_32u(h, se->version_id);
+
+ /* record size */
+ len_pos = hvm_ctxt_tell(h);
+ hvm_put_32u(h, 0);
+
+ se->save_state(h, se->opaque);
+
+ cur_pos = hvm_ctxt_tell(h);
+ len = cur_pos - len_pos - 4;
+ hvm_ctxt_seek(h, len_pos);
+ hvm_put_32u(h, len);
+ hvm_ctxt_seek(h, cur_pos);
+
+ }
+
+ h->size = hvm_ctxt_tell(h);
+ hvm_ctxt_seek(h, 0);
+
+ if (h->size >= HVM_CTXT_SIZE) {
+ printk("hvm_domain_context overflow when hvm_save! need %"PRId32" bytes for use.\n", h->size);
+ return -1;
+ }
+
+ return 0;
+
+}
+
+static HVMStateEntry *find_se(struct domain *d, const char *idstr, int instance_id)
+{
+ HVMStateEntry *se;
+
+ for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) {
+ if (!strncmp(se->idstr, idstr, HVM_SE_IDSTR_LEN) &&
+ instance_id == se->instance_id){
+ return se;
+ }
+ }
+ return NULL;
+}
+
+int hvm_load(struct vcpu *v, hvm_domain_context_t *h)
+{
+ uint32_t len, rec_len, rec_pos, magic, instance_id, version_id;
+ uint32_t eax, ebx, ecx, edx;
+ HVMStateEntry *se;
+ char idstr[HVM_SE_IDSTR_LEN];
+ xen_changeset_info_t chgset;
+ char *cur_chgset;
+ int ret;
+
+ if (!is_hvm_vcpu(v)) {
+ printk("hvm_load only for hvm guest!\n");
+ return -1;
+ }
+
+ if (h->size >= HVM_CTXT_SIZE) {
+ printk("hvm_load fail! seems hvm_domain_context overflow when hvm_save! need %"PRId32" bytes.\n", h->size);
+ return -1;
+ }
+
+ hvm_ctxt_seek(h, 0);
+
+ magic = hvm_get_32u(h);
+ if (magic != HVM_FILE_MAGIC) {
+ printk("HVM restore magic dismatch!\n");
+ return -1;
+ }
+
+ magic = hvm_get_32u(h);
+ if (magic != HVM_FILE_VERSION) {
+ printk("HVM restore version dismatch!\n");
+ return -1;
+ }
+
+ /* check xen change set */
+ cur_chgset = strrchr(XEN_CHANGESET, ' ');
+ if ( cur_chgset )
+ cur_chgset++;
+ else
+ cur_chgset = XEN_CHANGESET;
+
+ len = hvm_get_8u(h);
+ if (len > 20) { /*typical length is 18 -- "revision number:changeset id" */
+ printk("wrong change set length %d when hvm restore!\n", len);
+ return -1;
+ }
+
+ hvm_get_buffer(h, chgset, len);
+ chgset[len] = '\0';
+ if (strncmp(cur_chgset, chgset, len + 1))
+ printk("warnings: try to restore hvm guest(%s) on a different changeset %s.\n",
+ chgset, cur_chgset);
+
+
+ if ( !strcmp(cur_chgset, "unavailable") )
+ printk("warnings: try to restore hvm guest when changeset is unavailable.\n");
+
+
+ /* check cpuid */
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ ebx = hvm_get_32u(h);
+ /*TODO: need difine how big difference is acceptable */
+ if (ebx != eax)
+ printk("warnings: try to restore hvm guest(0x%"PRIx32") "
+ "on a different type processor(0x%"PRIx32").\n",
+ ebx,
+ eax);
+
+ while(1) {
+ if (hvm_ctxt_end(h)) {
+ break;
+ }
+
+ /* ID string */
+ len = hvm_get_8u(h);
+ if (len > HVM_SE_IDSTR_LEN) {
+ printk("wrong HVM save entry idstr len %d!", len);
+ return -1;
+ }
+
+ hvm_get_buffer(h, idstr, len);
+ idstr[len] = '\0';
+
+ instance_id = hvm_get_32u(h);
+ version_id = hvm_get_32u(h);
+
+ rec_len = hvm_get_32u(h);
+ rec_pos = hvm_ctxt_tell(h);
+
+ se = find_se(v->domain, idstr, instance_id);
+ if (se == NULL) {
+ printk("warnings: hvm load can't find device %s's instance %d!\n",
+ idstr, instance_id);
+ } else {
+ ret = se->load_state(h, se->opaque, version_id);
+ if (ret < 0)
+ printk("warnings: loading state fail for device %s instance %d!\n",
+ idstr, instance_id);
+ }
+
+
+ /* make sure to jump end of record */
+ if ( hvm_ctxt_tell(h) - rec_pos != rec_len) {
+ printk("wrong hvm record size, maybe some dismatch between save&restore handler!\n");
+ }
+ hvm_ctxt_seek(h, rec_pos + rec_len);
+ }
+
+ return 0;
+}
+
+int arch_gethvm_ctxt(
+ struct vcpu *v, struct hvm_domain_context *c)
+{
+ if ( !is_hvm_vcpu(v) )
+ return -1;
+
+ return hvm_save(v, c);
+
+}
+
+int arch_sethvm_ctxt(
+ struct vcpu *v, struct hvm_domain_context *c)
+{
+ return hvm_load(v, c);
+}
+
+#ifdef HVM_DEBUG_SUSPEND
+static void shpage_info(shared_iopage_t *sh)
+{
+
+ vcpu_iodata_t *p = &sh->vcpu_iodata[0];
+ ioreq_t *req = &p->vp_ioreq;
+ printk("*****sharepage_info******!\n");
+ printk("vp_eport=%d\n", p->vp_eport);
+ printk("io packet: "
+ "state:%x, pvalid: %x, dir:%x, port: %"PRIx64", "
+ "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
+ req->state, req->data_is_ptr, req->dir, req->addr,
+ req->data, req->count, req->size);
+}
+#else
+static void shpage_info(shared_iopage_t *sh)
+{
+}
+#endif
+
+static void shpage_save(hvm_domain_context_t *h, void *opaque)
+{
+ /* XXX:no action required for shpage save/restore, since it's in guest memory
+ * keep it for debug purpose only */
+
+#if 0
+ struct shared_iopage *s = opaque;
+ /* XXX:smp */
+ struct ioreq *req = &s->vcpu_iodata[0].vp_ioreq;
+
+ shpage_info(s);
+
+ hvm_put_buffer(h, (char*)req, sizeof(struct ioreq));
+#endif
+}
+
+static int shpage_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+ struct shared_iopage *s = opaque;
+#if 0
+ /* XXX:smp */
+ struct ioreq *req = &s->vcpu_iodata[0].vp_ioreq;
+
+ if (version_id != 1)
+ return -EINVAL;
+
+ hvm_get_buffer(h, (char*)req, sizeof(struct ioreq));
+
+
+#endif
+ shpage_info(s);
+ return 0;
+}
+
+void shpage_init(struct domain *d, shared_iopage_t *sp)
+{
+ hvm_register_savevm(d, "xen_hvm_shpage", 0x10, 1, shpage_save, shpage_load, sp);
+}
+
int hvm_buffered_io_intercept(ioreq_t *p)
{
struct vcpu *v = current;
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index 07e79d7c3c..c8c76fbc3d 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -62,7 +62,7 @@ static void rtc_timer_update(RTCState *s)
#ifdef DEBUG_RTC
printk("HVM_RTC: period = %uns\n", period);
#endif
- create_periodic_time(&s->pt, period, RTC_IRQ, 0, rtc_periodic_cb, s);
+ create_periodic_time(current, &s->pt, period, RTC_IRQ, 0, rtc_periodic_cb, s);
}
else
destroy_periodic_time(&s->pt);
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 6f68a85b77..7e23726b29 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -473,11 +473,143 @@ void vioapic_update_EOI(struct domain *d, int vector)
spin_unlock(&hvm_irq->lock);
}
+#ifdef HVM_DEBUG_SUSPEND
+static void ioapic_info(struct vioapic *s)
+{
+ int i;
+ printk("*****ioapic state:*****\n");
+ printk("ioapic 0x%x.\n", s->ioregsel);
+ printk("ioapic 0x%x.\n", s->id);
+ printk("ioapic 0x%lx.\n", s->base_address);
+ for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
+ printk("ioapic redirtbl[%d]:0x%"PRIx64"\n", i, s->redirtbl[i].bits);
+ }
+
+}
+static void hvmirq_info(struct hvm_irq *hvm_irq)
+{
+ int i;
+ printk("*****hvmirq state:*****\n");
+ for (i = 0; i < BITS_TO_LONGS(32*4); i++)
+ printk("hvmirq pci_intx[%d]:0x%lx.\n", i, hvm_irq->pci_intx[i]);
+
+ for (i = 0; i < BITS_TO_LONGS(16); i++)
+ printk("hvmirq isa_irq[%d]:0x%lx.\n", i, hvm_irq->isa_irq[i]);
+
+ for (i = 0; i < BITS_TO_LONGS(1); i++)
+ printk("hvmirq callback_irq_wire[%d]:0x%lx.\n", i, hvm_irq->callback_irq_wire[i]);
+
+ printk("hvmirq callback_via_type:0x%x.\n", hvm_irq->callback_via_type);
+ printk("hvmirq callback_via:0x%x.\n", hvm_irq->callback_via.gsi);
+
+
+ for (i = 0; i < 4; i++)
+ printk("hvmirq pci_link_route[%d]:0x%"PRIx8".\n", i, hvm_irq->pci_link_route[i]);
+
+ for (i = 0; i < 4; i++)
+ printk("hvmirq pci_link_assert_count[%d]:0x%"PRIx8".\n", i, hvm_irq->pci_link_assert_count[i]);
+
+ for (i = 0; i < VIOAPIC_NUM_PINS; i++)
+ printk("hvmirq gsi_assert_count[%d]:0x%"PRIx8".\n", i, hvm_irq->gsi_assert_count[i]);
+
+ printk("hvmirq round_robin_prev_vcpu:0x%"PRIx8".\n", hvm_irq->round_robin_prev_vcpu);
+}
+#else
+static void ioapic_info(struct vioapic *s)
+{
+}
+static void hvmirq_info(struct hvm_irq *hvm_irq)
+{
+}
+#endif
+
+static void ioapic_save(hvm_domain_context_t *h, void *opaque)
+{
+ int i;
+ struct domain *d = opaque;
+ struct vioapic *s = domain_vioapic(d);
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+
+ ioapic_info(s);
+ hvmirq_info(hvm_irq);
+
+ /* save iopaic state*/
+ hvm_put_32u(h, s->ioregsel);
+ hvm_put_32u(h, s->id);
+ hvm_put_64u(h, s->base_address);
+ for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
+ hvm_put_64u(h, s->redirtbl[i].bits);
+ }
+
+ /* save hvm irq state */
+ hvm_put_buffer(h, (char*)hvm_irq->pci_intx, 16);
+ hvm_put_buffer(h, (char*)hvm_irq->isa_irq, 2);
+ hvm_put_32u(h, hvm_irq->callback_via_asserted);
+ hvm_put_32u(h, hvm_irq->callback_via_type);
+ hvm_put_32u(h, hvm_irq->callback_via.gsi);
+
+ for (i = 0; i < 4; i++)
+ hvm_put_8u(h, hvm_irq->pci_link_route[i]);
+
+ for (i = 0; i < 4; i++)
+ hvm_put_8u(h, hvm_irq->pci_link_assert_count[i]);
+
+ for (i = 0; i < VIOAPIC_NUM_PINS; i++)
+ hvm_put_8u(h, hvm_irq->gsi_assert_count[i]);
+
+ hvm_put_8u(h, hvm_irq->round_robin_prev_vcpu);
+
+}
+
+static int ioapic_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+ int i;
+ struct domain *d = opaque;
+ struct vioapic *s = domain_vioapic(d);
+ struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
+
+ if (version_id != 1)
+ return -EINVAL;
+
+ /* restore ioapic state */
+ s->ioregsel = hvm_get_32u(h);
+ s->id = hvm_get_32u(h);
+ s->base_address = hvm_get_64u(h);
+ for (i = 0; i < VIOAPIC_NUM_PINS; i++) {
+ s->redirtbl[i].bits = hvm_get_64u(h);
+ }
+
+ /* restore irq state */
+ hvm_get_buffer(h, (char*)hvm_irq->pci_intx, 16);
+ hvm_get_buffer(h, (char*)hvm_irq->isa_irq, 2);
+ hvm_irq->callback_via_asserted = hvm_get_32u(h);
+ hvm_irq->callback_via_type = hvm_get_32u(h);
+ hvm_irq->callback_via.gsi = hvm_get_32u(h);
+
+ for (i = 0; i < 4; i++)
+ hvm_irq->pci_link_route[i] = hvm_get_8u(h);
+
+ for (i = 0; i < 4; i++)
+ hvm_irq->pci_link_assert_count[i] = hvm_get_8u(h);
+
+ for (i = 0; i < VIOAPIC_NUM_PINS; i++)
+ hvm_irq->gsi_assert_count[i] = hvm_get_8u(h);
+
+ hvm_irq->round_robin_prev_vcpu = hvm_get_8u(h);
+
+ ioapic_info(s);
+ hvmirq_info(hvm_irq);
+
+ return 0;
+}
+
void vioapic_init(struct domain *d)
{
struct vioapic *vioapic = domain_vioapic(d);
int i;
+ hvm_register_savevm(d, "xen_hvm_ioapic", 0, 1, ioapic_save, ioapic_load, d);
+
memset(vioapic, 0, sizeof(*vioapic));
for ( i = 0; i < VIOAPIC_NUM_PINS; i++ )
vioapic->redirtbl[i].fields.mask = 1;
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index b1b42650f4..4be3f55e1e 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -659,7 +659,7 @@ static void vlapic_write(struct vcpu *v, unsigned long address,
uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)val * vlapic->timer_divisor;
vlapic_set_reg(vlapic, APIC_TMICT, val);
- create_periodic_time(&vlapic->pt, period, vlapic->pt.irq,
+ create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq,
vlapic_lvtt_period(vlapic), NULL, vlapic);
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
@@ -795,6 +795,77 @@ static int vlapic_reset(struct vlapic *vlapic)
return 1;
}
+#ifdef HVM_DEBUG_SUSPEND
+static void lapic_info(struct vlapic *s)
+{
+ printk("*****lapic state:*****\n");
+ printk("lapic 0x%"PRIx64".\n", s->apic_base_msr);
+ printk("lapic 0x%x.\n", s->disabled);
+ printk("lapic 0x%x.\n", s->timer_divisor);
+ printk("lapic 0x%x.\n", s->timer_pending_count);
+}
+#else
+static void lapic_info(struct vlapic *s)
+{
+}
+#endif
+
+static void lapic_save(hvm_domain_context_t *h, void *opaque)
+{
+ struct vlapic *s = opaque;
+
+ lapic_info(s);
+
+ hvm_put_64u(h, s->apic_base_msr);
+ hvm_put_32u(h, s->disabled);
+ hvm_put_32u(h, s->timer_divisor);
+
+ /*XXX: need this?*/
+ hvm_put_32u(h, s->timer_pending_count);
+
+ hvm_put_buffer(h, (char*)s->regs, 0x3f0);
+
+}
+
+static int lapic_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+ struct vlapic *s = opaque;
+ struct vcpu *v = vlapic_vcpu(s);
+ unsigned long tmict;
+
+ if (version_id != 1)
+ return -EINVAL;
+
+ s->apic_base_msr = hvm_get_64u(h);
+ s->disabled = hvm_get_32u(h);
+ s->timer_divisor = hvm_get_32u(h);
+
+ /*XXX: need this?*/
+ s->timer_pending_count = hvm_get_32u(h);
+
+ hvm_get_buffer(h, (char*)s->regs, 0x3f0);
+
+ /* rearm the actiemr if needed */
+ tmict = vlapic_get_reg(s, APIC_TMICT);
+ if (tmict > 0) {
+ uint64_t period = APIC_BUS_CYCLE_NS * (uint32_t)tmict * s->timer_divisor;
+
+ create_periodic_time(v, &s->pt, period, s->pt.irq,
+ vlapic_lvtt_period(s), NULL, s);
+
+ printk("lapic_load to rearm the actimer:"
+ "bus cycle is %uns, "
+ "saved tmict count %lu, period %"PRIu64"ns\n",
+ APIC_BUS_CYCLE_NS, tmict, period);
+
+ }
+
+
+ lapic_info(s);
+
+ return 0;
+}
+
int vlapic_init(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
@@ -813,6 +884,7 @@ int vlapic_init(struct vcpu *v)
vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page));
memset(vlapic->regs, 0, PAGE_SIZE);
+ hvm_register_savevm(v->domain, "xen_hvm_lapic", v->vcpu_id, 1, lapic_save, lapic_load, vlapic);
vlapic_reset(vlapic);
vlapic->apic_base_msr = MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index a665367c6d..77abecec63 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -363,6 +363,299 @@ static inline void __restore_debug_registers(struct vcpu *v)
/* DR7 is loaded from the VMCS. */
}
+static int __get_instruction_length(void);
+int vmx_vmcs_save(struct vcpu *v, struct vmcs_data *c)
+{
+ unsigned long inst_len;
+
+ inst_len = __get_instruction_length();
+ c->eip = __vmread(GUEST_RIP);
+
+#ifdef HVM_DEBUG_SUSPEND
+ printk("vmx_vmcs_save: inst_len=0x%lx, eip=0x%"PRIx64".\n",
+ inst_len, c->eip);
+#endif
+
+ c->esp = __vmread(GUEST_RSP);
+ c->eflags = __vmread(GUEST_RFLAGS);
+
+ c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
+ c->cr3 = v->arch.hvm_vmx.cpu_cr3;
+ c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
+
+#ifdef HVM_DEBUG_SUSPEND
+ printk("vmx_vmcs_save: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
+ c->cr3,
+ c->cr0,
+ c->cr4);
+#endif
+
+ c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
+ c->idtr_base = __vmread(GUEST_IDTR_BASE);
+
+ c->gdtr_limit = __vmread(GUEST_GDTR_LIMIT);
+ c->gdtr_base = __vmread(GUEST_GDTR_BASE);
+
+ c->cs_sel = __vmread(GUEST_CS_SELECTOR);
+ c->cs_limit = __vmread(GUEST_CS_LIMIT);
+ c->cs_base = __vmread(GUEST_CS_BASE);
+ c->cs_arbytes = __vmread(GUEST_CS_AR_BYTES);
+
+ c->ds_sel = __vmread(GUEST_DS_SELECTOR);
+ c->ds_limit = __vmread(GUEST_DS_LIMIT);
+ c->ds_base = __vmread(GUEST_DS_BASE);
+ c->ds_arbytes = __vmread(GUEST_DS_AR_BYTES);
+
+ c->es_sel = __vmread(GUEST_ES_SELECTOR);
+ c->es_limit = __vmread(GUEST_ES_LIMIT);
+ c->es_base = __vmread(GUEST_ES_BASE);
+ c->es_arbytes = __vmread(GUEST_ES_AR_BYTES);
+
+ c->ss_sel = __vmread(GUEST_SS_SELECTOR);
+ c->ss_limit = __vmread(GUEST_SS_LIMIT);
+ c->ss_base = __vmread(GUEST_SS_BASE);
+ c->ss_arbytes = __vmread(GUEST_SS_AR_BYTES);
+
+ c->fs_sel = __vmread(GUEST_FS_SELECTOR);
+ c->fs_limit = __vmread(GUEST_FS_LIMIT);
+ c->fs_base = __vmread(GUEST_FS_BASE);
+ c->fs_arbytes = __vmread(GUEST_FS_AR_BYTES);
+
+ c->gs_sel = __vmread(GUEST_GS_SELECTOR);
+ c->gs_limit = __vmread(GUEST_GS_LIMIT);
+ c->gs_base = __vmread(GUEST_GS_BASE);
+ c->gs_arbytes = __vmread(GUEST_GS_AR_BYTES);
+
+ c->tr_sel = __vmread(GUEST_TR_SELECTOR);
+ c->tr_limit = __vmread(GUEST_TR_LIMIT);
+ c->tr_base = __vmread(GUEST_TR_BASE);
+ c->tr_arbytes = __vmread(GUEST_TR_AR_BYTES);
+
+ c->ldtr_sel = __vmread(GUEST_LDTR_SELECTOR);
+ c->ldtr_limit = __vmread(GUEST_LDTR_LIMIT);
+ c->ldtr_base = __vmread(GUEST_LDTR_BASE);
+ c->ldtr_arbytes = __vmread(GUEST_LDTR_AR_BYTES);
+
+ c->sysenter_cs = __vmread(GUEST_SYSENTER_CS);
+ c->sysenter_esp = __vmread(GUEST_SYSENTER_ESP);
+ c->sysenter_eip = __vmread(GUEST_SYSENTER_EIP);
+
+ return 1;
+}
+
+int vmx_vmcs_restore(struct vcpu *v, struct vmcs_data *c)
+{
+ unsigned long mfn, old_base_mfn;
+
+ vmx_vmcs_enter(v);
+
+ __vmwrite(GUEST_RIP, c->eip);
+ __vmwrite(GUEST_RSP, c->esp);
+ __vmwrite(GUEST_RFLAGS, c->eflags);
+
+ v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
+ __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
+
+#ifdef HVM_DEBUG_SUSPEND
+ printk("vmx_vmcs_restore: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
+ c->cr3,
+ c->cr0,
+ c->cr4);
+#endif
+
+ if (!vmx_paging_enabled(v)) {
+ printk("vmx_vmcs_restore: paging not enabled.");
+ goto skip_cr3;
+ }
+
+ if (c->cr3 == v->arch.hvm_vmx.cpu_cr3) {
+ /*
+ * This is simple TLB flush, implying the guest has
+ * removed some translation or changed page attributes.
+ * We simply invalidate the shadow.
+ */
+ mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
+ if (mfn != pagetable_get_pfn(v->arch.guest_table)) {
+ goto bad_cr3;
+ }
+ } else {
+ /*
+ * If different, make a shadow. Check if the PDBR is valid
+ * first.
+ */
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64"", c->cr3);
+ /* current!=vcpu as not called by arch_vmx_do_launch */
+ mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
+ if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain)) {
+ goto bad_cr3;
+ }
+ old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
+ v->arch.guest_table = pagetable_from_pfn(mfn);
+ if (old_base_mfn)
+ put_page(mfn_to_page(old_base_mfn));
+ /*
+ * arch.shadow_table should now hold the next CR3 for shadow
+ */
+ v->arch.hvm_vmx.cpu_cr3 = c->cr3;
+ }
+
+ skip_cr3:
+#if defined(__x86_64__)
+ if (vmx_long_mode_enabled(v)) {
+ unsigned long vm_entry_value;
+ vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
+ vm_entry_value |= VM_ENTRY_IA32E_MODE;
+ __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
+ }
+#endif
+
+ __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
+ v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
+
+ __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
+ __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
+
+ __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
+ __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
+
+ __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
+ __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
+ __vmwrite(GUEST_CS_BASE, c->cs_base);
+ __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes);
+
+ __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
+ __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
+ __vmwrite(GUEST_DS_BASE, c->ds_base);
+ __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes);
+
+ __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
+ __vmwrite(GUEST_ES_LIMIT, c->es_limit);
+ __vmwrite(GUEST_ES_BASE, c->es_base);
+ __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes);
+
+ __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
+ __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
+ __vmwrite(GUEST_SS_BASE, c->ss_base);
+ __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes);
+
+ __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
+ __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
+ __vmwrite(GUEST_FS_BASE, c->fs_base);
+ __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes);
+
+ __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
+ __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
+ __vmwrite(GUEST_GS_BASE, c->gs_base);
+ __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes);
+
+ __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
+ __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
+ __vmwrite(GUEST_TR_BASE, c->tr_base);
+ __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes);
+
+ __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
+ __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
+ __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
+ __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes);
+
+ __vmwrite(GUEST_SYSENTER_CS, c->sysenter_cs);
+ __vmwrite(GUEST_SYSENTER_ESP, c->sysenter_esp);
+ __vmwrite(GUEST_SYSENTER_EIP, c->sysenter_eip);
+
+ vmx_vmcs_exit(v);
+
+ shadow_update_paging_modes(v);
+ return 0;
+
+ bad_cr3:
+ gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"", c->cr3);
+ vmx_vmcs_exit(v);
+ return -EINVAL;
+}
+
+#ifdef HVM_DEBUG_SUSPEND
+static void dump_msr_state(struct vmx_msr_state *m)
+{
+ int i = 0;
+ printk("**** msr state ****\n");
+ printk("shadow_gs=0x%lx, flags=0x%lx, msr_items:", m->shadow_gs, m->flags);
+ for (i = 0; i < VMX_MSR_COUNT; i++)
+ printk("0x%lx,", m->msrs[i]);
+ printk("\n");
+}
+#else
+static void dump_msr_state(struct vmx_msr_state *m)
+{
+}
+#endif
+
+void vmx_save_cpu_state(struct vcpu *v, struct hvmcpu_context *ctxt)
+{
+ struct vmcs_data *data = &ctxt->data;
+ struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
+ unsigned long guest_flags = guest_state->flags;
+ int i = 0;
+
+ data->shadow_gs = guest_state->shadow_gs;
+ data->vmxassist_enabled = v->arch.hvm_vmx.vmxassist_enabled;
+ /* save msrs */
+ data->flags = guest_flags;
+ for (i = 0; i < VMX_MSR_COUNT; i++)
+ data->msr_items[i] = guest_state->msrs[i];
+
+ dump_msr_state(guest_state);
+}
+
+void vmx_load_cpu_state(struct vcpu *v, struct hvmcpu_context *ctxt)
+{
+ int i = 0;
+ struct vmcs_data *data = &ctxt->data;
+ struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
+
+ /* restore msrs */
+ guest_state->flags = data->flags;
+ for (i = 0; i < VMX_MSR_COUNT; i++)
+ guest_state->msrs[i] = data->msr_items[i];
+
+ guest_state->shadow_gs = data->shadow_gs;
+
+ /*XXX:no need to restore msrs, current!=vcpu as not called by arch_vmx_do_launch */
+/* vmx_restore_guest_msrs(v);*/
+
+ v->arch.hvm_vmx.vmxassist_enabled = data->vmxassist_enabled;
+
+ dump_msr_state(guest_state);
+}
+
+void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvmcpu_context *ctxt)
+{
+ struct vmcs_data *data = &ctxt->data;
+
+ vmx_save_cpu_state(v, ctxt);
+
+ vmx_vmcs_enter(v);
+
+ vmx_vmcs_save(v, data);
+
+ vmx_vmcs_exit(v);
+
+}
+
+void vmx_load_vmcs_ctxt(struct vcpu *v, struct hvmcpu_context *ctxt)
+{
+ vmx_load_cpu_state(v, ctxt);
+
+ if (vmx_vmcs_restore(v, &ctxt->data)) {
+ printk("vmx_vmcs restore failed!\n");
+ domain_crash(v->domain);
+ }
+
+ /* only load vmcs once */
+ ctxt->valid = 0;
+
+}
+
/*
* DR7 is saved and restored on every vmexit. Other debug registers only
* need to be restored if their value is going to affect execution -- i.e.,
@@ -721,6 +1014,9 @@ static void vmx_setup_hvm_funcs(void)
hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
+ hvm_funcs.save_cpu_ctxt = vmx_save_vmcs_ctxt;
+ hvm_funcs.load_cpu_ctxt = vmx_load_vmcs_ctxt;
+
hvm_funcs.paging_enabled = vmx_paging_enabled;
hvm_funcs.long_mode_enabled = vmx_long_mode_enabled;
hvm_funcs.pae_enabled = vmx_pae_enabled;
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index edd7e6b4ac..4f36dd3bdf 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -378,6 +378,87 @@ static int vpic_intercept_elcr_io(ioreq_t *p)
return 1;
}
+#ifdef HVM_DEBUG_SUSPEND
+static void vpic_info(struct vpic *s)
+{
+ printk("*****pic state:*****\n");
+ printk("pic 0x%x.\n", s->irr);
+ printk("pic 0x%x.\n", s->imr);
+ printk("pic 0x%x.\n", s->isr);
+ printk("pic 0x%x.\n", s->irq_base);
+ printk("pic 0x%x.\n", s->init_state);
+ printk("pic 0x%x.\n", s->priority_add);
+ printk("pic 0x%x.\n", s->readsel_isr);
+ printk("pic 0x%x.\n", s->poll);
+ printk("pic 0x%x.\n", s->auto_eoi);
+ printk("pic 0x%x.\n", s->rotate_on_auto_eoi);
+ printk("pic 0x%x.\n", s->special_fully_nested_mode);
+ printk("pic 0x%x.\n", s->special_mask_mode);
+ printk("pic 0x%x.\n", s->elcr);
+ printk("pic 0x%x.\n", s->int_output);
+ printk("pic 0x%x.\n", s->is_master);
+}
+#else
+static void vpic_info(struct vpic *s)
+{
+}
+#endif
+
+static void vpic_save(hvm_domain_context_t *h, void *opaque)
+{
+ struct vpic *s = opaque;
+
+ vpic_info(s);
+
+ hvm_put_8u(h, s->irr);
+ hvm_put_8u(h, s->imr);
+ hvm_put_8u(h, s->isr);
+ hvm_put_8u(h, s->irq_base);
+ hvm_put_8u(h, s->init_state);
+ hvm_put_8u(h, s->priority_add);
+ hvm_put_8u(h, s->readsel_isr);
+
+ hvm_put_8u(h, s->poll);
+ hvm_put_8u(h, s->auto_eoi);
+
+ hvm_put_8u(h, s->rotate_on_auto_eoi);
+ hvm_put_8u(h, s->special_fully_nested_mode);
+ hvm_put_8u(h, s->special_mask_mode);
+
+ hvm_put_8u(h, s->elcr);
+ hvm_put_8u(h, s->int_output);
+}
+
+static int vpic_load(hvm_domain_context_t *h, void *opaque, int version_id)
+{
+ struct vpic *s = opaque;
+
+ if (version_id != 1)
+ return -EINVAL;
+
+ s->irr = hvm_get_8u(h);
+ s->imr = hvm_get_8u(h);
+ s->isr = hvm_get_8u(h);
+ s->irq_base = hvm_get_8u(h);
+ s->init_state = hvm_get_8u(h);
+ s->priority_add = hvm_get_8u(h);
+ s->readsel_isr = hvm_get_8u(h);
+
+ s->poll = hvm_get_8u(h);
+ s->auto_eoi = hvm_get_8u(h);
+
+ s->rotate_on_auto_eoi = hvm_get_8u(h);
+ s->special_fully_nested_mode = hvm_get_8u(h);
+ s->special_mask_mode = hvm_get_8u(h);
+
+ s->elcr = hvm_get_8u(h);
+ s->int_output = hvm_get_8u(h);
+
+ vpic_info(s);
+
+ return 0;
+}
+
void vpic_init(struct domain *d)
{
struct vpic *vpic;
@@ -387,12 +468,14 @@ void vpic_init(struct domain *d)
memset(vpic, 0, sizeof(*vpic));
vpic->is_master = 1;
vpic->elcr = 1 << 2;
+ hvm_register_savevm(d, "xen_hvm_i8259", 0x20, 1, vpic_save, vpic_load, vpic);
register_portio_handler(d, 0x20, 2, vpic_intercept_pic_io);
register_portio_handler(d, 0x4d0, 1, vpic_intercept_elcr_io);
/* Slave PIC. */
vpic++;
memset(vpic, 0, sizeof(*vpic));
+ hvm_register_savevm(d, "xen_hvm_i8259", 0xa0, 1, vpic_save, vpic_load, vpic);
register_portio_handler(d, 0xa0, 2, vpic_intercept_pic_io);
register_portio_handler(d, 0x4d1, 1, vpic_intercept_elcr_io);
}
diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
index 12e8999582..626ad6ed44 100644
--- a/xen/arch/x86/hvm/vpt.c
+++ b/xen/arch/x86/hvm/vpt.c
@@ -169,7 +169,9 @@ void pt_intr_post(struct vcpu *v, int vector, int type)
pt->pending_intr_nr--;
pt->last_plt_gtime += pt->period_cycles;
- hvm_set_guest_time(pt->vcpu, pt->last_plt_gtime);
+
+ if ( hvm_get_guest_time(pt->vcpu) < pt->last_plt_gtime )
+ hvm_set_guest_time(pt->vcpu, pt->last_plt_gtime);
if ( pt->cb != NULL )
pt->cb(pt->vcpu, pt->priv);
@@ -195,7 +197,7 @@ void pt_reset(struct vcpu *v)
}
}
-void create_periodic_time(struct periodic_time *pt, uint64_t period,
+void create_periodic_time(struct vcpu *v, struct periodic_time *pt, uint64_t period,
uint8_t irq, char one_shot, time_cb *cb, void *data)
{
destroy_periodic_time(pt);
@@ -209,7 +211,7 @@ void create_periodic_time(struct periodic_time *pt, uint64_t period,
period = 900000; /* force to 0.9ms */
}
pt->period = period;
- pt->vcpu = current;
+ pt->vcpu = v;
pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
pt->irq = irq;
pt->period_cycles = (u64)period * cpu_khz / 1000000L;
@@ -218,7 +220,7 @@ void create_periodic_time(struct periodic_time *pt, uint64_t period,
pt->cb = cb;
pt->priv = data;
- list_add(&pt->list, &current->arch.hvm_vcpu.tm_list);
+ list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
set_timer(&pt->timer, pt->scheduled);
}
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 115ad74d72..b821222634 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2184,10 +2184,11 @@ int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn)
expected_count = (page->count_info & PGC_allocated) ? 1 : 0;
if ( (page->count_info & PGC_count_mask) != expected_count )
{
- /* Don't complain if we're in HVM and there's one extra mapping:
- * The qemu helper process has an untyped mapping of this dom's RAM */
+ /* Don't complain if we're in HVM and there are some extra mappings:
+ * The qemu helper process has an untyped mapping of this dom's RAM
+ * and the HVM restore program takes another. */
if ( !(shadow_mode_external(v->domain)
- && (page->count_info & PGC_count_mask) <= 2
+ && (page->count_info & PGC_count_mask) <= 3
&& (page->u.inuse.type_info & PGT_count_mask) == 0) )
{
SHADOW_ERROR("can't find all mappings of mfn %lx: "
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 0546cb1a66..8b8b8585a7 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1627,7 +1627,7 @@ sh_make_shadow(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
default: /* Do nothing */ break;
}
}
-
+
shadow_promote(v, gmfn, shadow_type);
set_shadow_status(v, gmfn, shadow_type, smfn);
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 28530c40d5..aa232585c8 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -24,6 +24,7 @@
#include <xen/percpu.h>
#include <xen/multicall.h>
#include <asm/debugger.h>
+#include <asm/hvm/support.h>
#include <public/sched.h>
#include <public/vcpu.h>
#ifdef CONFIG_COMPAT
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index b9cd57e949..4494c84494 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -215,6 +215,39 @@ ret_t do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
}
break;
+ case XEN_DOMCTL_sethvmcontext:
+ {
+ struct hvm_domain_context *c;
+ struct domain *d;
+ struct vcpu *v;
+
+ ret = -ESRCH;
+ if ( (d = find_domain_by_id(op->domain)) == NULL )
+ break;
+
+ ret = -ENOMEM;
+ if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
+ goto sethvmcontext_out;
+
+ v = d->vcpu[0];
+
+ ret = -EFAULT;
+
+#ifndef CONFIG_COMPAT
+ if ( copy_from_guest(c, op->u.hvmcontext.ctxt, 1) != 0 )
+ goto sethvmcontext_out;
+
+ ret = arch_sethvm_ctxt(v, c);
+#endif
+
+ xfree(c);
+
+ sethvmcontext_out:
+ put_domain(d);
+
+ }
+ break;
+
case XEN_DOMCTL_pausedomain:
{
struct domain *d = find_domain_by_id(op->domain);
@@ -552,6 +585,46 @@ ret_t do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
}
break;
+ case XEN_DOMCTL_gethvmcontext:
+ {
+ struct hvm_domain_context *c;
+ struct domain *d;
+ struct vcpu *v;
+
+ ret = -ESRCH;
+ if ( (d = find_domain_by_id(op->domain)) == NULL )
+ break;
+
+ ret = -ENOMEM;
+ if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
+ goto gethvmcontext_out;
+
+ v = d->vcpu[0];
+
+ ret = -ENODATA;
+ if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
+ goto gethvmcontext_out;
+
+ ret = 0;
+ if (arch_gethvm_ctxt(v, c) == -1)
+ ret = -EFAULT;
+
+#ifndef CONFIG_COMPAT
+ if ( copy_to_guest(op->u.hvmcontext.ctxt, c, 1) )
+ ret = -EFAULT;
+
+ xfree(c);
+#endif
+
+ if ( copy_to_guest(u_domctl, op, 1) )
+ ret = -EFAULT;
+
+ gethvmcontext_out:
+ put_domain(d);
+
+ }
+ break;
+
case XEN_DOMCTL_getvcpuinfo:
{
struct domain *d;
diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h
index e1306d744b..063e6e33eb 100644
--- a/xen/include/asm-ia64/config.h
+++ b/xen/include/asm-ia64/config.h
@@ -8,8 +8,9 @@
// manufactured from component pieces
// defined in linux/arch/ia64/defconfig
-//#define CONFIG_IA64_GENERIC
-#define CONFIG_IA64_HP_SIM
+#define CONFIG_IA64_GENERIC
+#define CONFIG_HZ 32
+
#define CONFIG_IA64_L1_CACHE_SHIFT 7
// needed by include/asm-ia64/page.h
#define CONFIG_IA64_PAGE_SIZE_16KB // 4KB doesn't work?!?
@@ -51,6 +52,9 @@
#define CONFIG_VGA 1
+
+#define NR_hypercalls 64
+
#ifndef __ASSEMBLY__
// can't find where this typedef was before?!?
@@ -106,9 +110,6 @@ extern char _end[]; /* standard ELF symbol */
// xen/include/asm/config.h
//#define HZ 1000
// FIXME SMP: leave SMP for a later time
-
-#define NR_hypercalls 64
-
///////////////////////////////////////////////////////////////
// xen/include/asm/config.h
// Natural boundary upon TR size to define xenheap space
@@ -145,14 +146,6 @@ extern int smp_num_siblings;
// avoid redefining task_struct in asm/current.h
#define task_struct vcpu
-// linux/include/asm-ia64/machvec.h (linux/arch/ia64/lib/io.c)
-#define platform_inb __ia64_inb
-#define platform_inw __ia64_inw
-#define platform_inl __ia64_inl
-#define platform_outb __ia64_outb
-#define platform_outw __ia64_outw
-#define platform_outl __ia64_outl
-
#include <xen/cache.h>
#ifndef CONFIG_SMP
#define __cacheline_aligned_in_smp
@@ -206,6 +199,16 @@ void sort_main_extable(void);
// Deprivated linux inf and put here for short time compatibility
#define kmalloc(s, t) xmalloc_bytes((s))
#define kfree(s) xfree((s))
+#define kzalloc(size, flags) \
+({ \
+ unsigned char *mem; \
+ mem = (unsigned char *)xmalloc_bytes(size); \
+ if (mem) \
+ memset(mem, 0, size); \
+ (void *)mem; \
+})
+#define kcalloc(n, size, flags) kzalloc(n * size, flags)
+#define alloc_bootmem_node(pg, size) xmalloc_bytes(size)
// see common/keyhandler.c
#define nop() asm volatile ("nop 0")
diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h
index b327bcb27b..2090908bf2 100644
--- a/xen/include/asm-ia64/domain.h
+++ b/xen/include/asm-ia64/domain.h
@@ -21,6 +21,7 @@ struct tlb_track;
extern void domain_relinquish_resources(struct domain *);
struct vcpu;
extern void relinquish_vcpu_resources(struct vcpu *v);
+extern int vcpu_late_initialise(struct vcpu *v);
/* given a current domain metaphysical address, return the physical address */
extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
@@ -78,6 +79,9 @@ struct arch_domain {
};
};
+ /* maximum metaphysical address of conventional memory */
+ u64 convmem_end;
+
/* Allowed accesses to io ports. */
struct rangeset *ioport_caps;
diff --git a/xen/include/asm-ia64/hypercall.h b/xen/include/asm-ia64/hypercall.h
index 6e4a0320fd..ed0af4678d 100644
--- a/xen/include/asm-ia64/hypercall.h
+++ b/xen/include/asm-ia64/hypercall.h
@@ -13,6 +13,8 @@ extern long
do_event_channel_op_compat(
XEN_GUEST_HANDLE(evtchn_op_t) uop);
+extern long do_pirq_guest_eoi(int pirq);
+
extern int
vmx_do_mmu_update(
mmu_update_t *ureqs,
diff --git a/xen/include/asm-ia64/linux-null/linux/ioport.h b/xen/include/asm-ia64/linux-null/asm/nmi.h
index 29f4f4cd69..29f4f4cd69 100644
--- a/xen/include/asm-ia64/linux-null/linux/ioport.h
+++ b/xen/include/asm-ia64/linux-null/asm/nmi.h
diff --git a/xen/include/asm-ia64/linux-null/linux/dmapool.h b/xen/include/asm-ia64/linux-null/linux/dmapool.h
new file mode 100644
index 0000000000..29f4f4cd69
--- /dev/null
+++ b/xen/include/asm-ia64/linux-null/linux/dmapool.h
@@ -0,0 +1 @@
+/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-null/linux/rwsem.h b/xen/include/asm-ia64/linux-null/linux/rwsem.h
new file mode 100644
index 0000000000..29f4f4cd69
--- /dev/null
+++ b/xen/include/asm-ia64/linux-null/linux/rwsem.h
@@ -0,0 +1 @@
+/* This file is intentionally left empty. */
diff --git a/xen/include/asm-ia64/linux-xen/asm/README.origin b/xen/include/asm-ia64/linux-xen/asm/README.origin
index f8d8375770..23aa5b3ef4 100644
--- a/xen/include/asm-ia64/linux-xen/asm/README.origin
+++ b/xen/include/asm-ia64/linux-xen/asm/README.origin
@@ -34,3 +34,10 @@ iosapic.h -> linux/include/asm-ia64/iosapic.h
# The files below are from Linux-2.6.16.33
perfmon.h -> linux/include/asm-ia64/perfmon.h
perfmon_default_smpl.h -> linux/include/asm-ia64/perfmon_default_smpl.h
+
+# The files below are from Linux-2.6.19
+machvec.h -> linux/include/asm-ia64/machvec.h
+machvec_dig.h -> linux/include/asm-ia64/machvec_dig.h
+machvec_sn2.h -> linux/include/asm-ia64/machvec_sn2.h
+machvec_hpzx1.h -> linux/include/asm-ia64/machvec_hpzx1.h
+machvec_pci.h -> linux/include/asm-ia64/pci.h
diff --git a/xen/include/asm-ia64/linux/asm/machvec.h b/xen/include/asm-ia64/linux-xen/asm/machvec.h
index 79e89a7db5..fd1f883b63 100644
--- a/xen/include/asm-ia64/linux/asm/machvec.h
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec.h
@@ -85,6 +85,114 @@ machvec_noop_mm (struct mm_struct *mm)
{
}
+#ifdef XEN
+#include <xen/lib.h>
+/*
+ * These should never get called, they just fill out the machine
+ * vectors and make the compiler happy.
+ */
+static inline void*
+machvec_noop_dma_alloc_coherent (struct device *dev, size_t size,
+ dma_addr_t *addr, int dir)
+{
+ panic("%s() called", __FUNCTION__);
+ return (void *)0;
+}
+
+static inline void
+machvec_noop_dma_free_coherent (struct device *dev, size_t size,
+ void *vaddr, dma_addr_t handle)
+{
+ panic("%s() called", __FUNCTION__);
+}
+
+static inline dma_addr_t
+machvec_noop_dma_map_single (struct device *dev, void *addr,
+ size_t size, int dir)
+{
+ panic("%s() called", __FUNCTION__);
+ return (dma_addr_t)0;
+}
+
+static inline void
+machvec_noop_dma_unmap_single (struct device *dev, dma_addr_t vaddr,
+ size_t size, int dir)
+{
+ panic("%s() called", __FUNCTION__);
+}
+
+static inline int
+machvec_noop_dma_map_sg (struct device *dev, struct scatterlist *sglist,
+ int nents, int dir)
+{
+ panic("%s() called", __FUNCTION__);
+ return 0;
+}
+
+static inline void
+machvec_noop_dma_unmap_sg (struct device *dev, struct scatterlist *sglist,
+ int nents, int dir)
+{
+ panic("%s() called", __FUNCTION__);
+}
+
+static inline void
+machvec_noop_dma_sync_single_for_cpu (struct device *dev, dma_addr_t vaddr,
+ size_t size, int dir)
+{
+ panic("%s() called", __FUNCTION__);
+}
+
+#define machvec_noop_dma_sync_single_for_device \
+ machvec_noop_dma_sync_single_for_cpu
+
+static inline void
+machvec_noop_dma_sync_sg_for_cpu (struct device *dev,
+ struct scatterlist *sglist,
+ int nents, int dir)
+{
+ panic("%s() called", __FUNCTION__);
+}
+
+#define machvec_noop_dma_sync_sg_for_device \
+ machvec_noop_dma_sync_sg_for_cpu
+
+static inline int
+machvec_noop_dma_mapping_error (dma_addr_t dma_addr)
+{
+ panic("%s() called", __FUNCTION__);
+ return 1;
+}
+
+static inline int
+machvec_noop_dma_supported (struct device *dev, u64 mask)
+{
+ panic("%s() called", __FUNCTION__);
+ return 0;
+}
+
+static inline char*
+machvec_noop_pci_get_legacy_mem (struct pci_bus *bus)
+{
+ panic("%s() called", __FUNCTION__);
+ return 0;
+}
+
+static inline int
+machvec_noop_pci_legacy_read (struct pci_bus *bus, u16 port, u32 *val, u8 size)
+{
+ panic("%s() called", __FUNCTION__);
+ return 0;
+}
+
+static inline int
+machvec_noop_pci_legacy_write (struct pci_bus *bus, u16 port, u32 val, u8 size)
+{
+ panic("%s() called", __FUNCTION__);
+ return 0;
+}
+#endif
+
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
diff --git a/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h b/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h
new file mode 100644
index 0000000000..a8b658720f
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h
@@ -0,0 +1,46 @@
+#ifndef _ASM_IA64_MACHVEC_DIG_h
+#define _ASM_IA64_MACHVEC_DIG_h
+
+extern ia64_mv_setup_t dig_setup;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "dig"
+#ifdef XEN
+/*
+ * All the World is a PC .... yay! yay! yay!
+ */
+extern ia64_mv_setup_t hpsim_setup;
+#define platform_setup hpsim_setup
+
+#define platform_dma_init machvec_noop
+#define platform_dma_alloc_coherent machvec_noop_dma_alloc_coherent
+#define platform_dma_free_coherent machvec_noop_dma_free_coherent
+#define platform_dma_map_single machvec_noop_dma_map_single
+#define platform_dma_unmap_single machvec_noop_dma_unmap_single
+#define platform_dma_map_sg machvec_noop_dma_map_sg
+#define platform_dma_unmap_sg machvec_noop_dma_unmap_sg
+#define platform_dma_sync_single_for_cpu \
+ machvec_noop_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu \
+ machvec_noop_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device \
+ machvec_noop_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device \
+ machvec_noop_dma_sync_sg_for_device
+#define platform_dma_mapping_error machvec_noop_dma_mapping_error
+#define platform_dma_supported machvec_noop_dma_supported
+
+#define platform_pci_get_legacy_mem machvec_noop_pci_get_legacy_mem
+#define platform_pci_legacy_read machvec_noop_pci_legacy_read
+#define platform_pci_legacy_write machvec_noop_pci_legacy_write
+#else
+#define platform_setup dig_setup
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff --git a/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h b/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h
new file mode 100644
index 0000000000..96a85ce2ce
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_MACHVEC_HPZX1_h
+#define _ASM_IA64_MACHVEC_HPZX1_h
+
+extern ia64_mv_setup_t dig_setup;
+extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
+extern ia64_mv_dma_free_coherent sba_free_coherent;
+extern ia64_mv_dma_map_single sba_map_single;
+extern ia64_mv_dma_unmap_single sba_unmap_single;
+extern ia64_mv_dma_map_sg sba_map_sg;
+extern ia64_mv_dma_unmap_sg sba_unmap_sg;
+extern ia64_mv_dma_supported sba_dma_supported;
+extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "hpzx1"
+#ifdef XEN
+extern ia64_mv_setup_t hpsim_setup;
+extern ia64_mv_irq_init_t hpsim_irq_init;
+#define platform_setup hpsim_setup
+#define platform_irq_init hpsim_irq_init
+
+#define platform_dma_init machvec_noop
+#define platform_dma_alloc_coherent machvec_noop_dma_alloc_coherent
+#define platform_dma_free_coherent machvec_noop_dma_free_coherent
+#define platform_dma_map_single machvec_noop_dma_map_single
+#define platform_dma_unmap_single machvec_noop_dma_unmap_single
+#define platform_dma_map_sg machvec_noop_dma_map_sg
+#define platform_dma_unmap_sg machvec_noop_dma_unmap_sg
+#define platform_dma_sync_single_for_cpu \
+ machvec_noop_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu \
+ machvec_noop_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device \
+ machvec_noop_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device \
+ machvec_noop_dma_sync_sg_for_device
+#define platform_dma_mapping_error machvec_noop_dma_mapping_error
+#define platform_dma_supported machvec_noop_dma_supported
+
+#define platform_pci_get_legacy_mem machvec_noop_pci_get_legacy_mem
+#define platform_pci_legacy_read machvec_noop_pci_legacy_read
+#define platform_pci_legacy_write machvec_noop_pci_legacy_write
+#else
+#define platform_setup dig_setup
+#define platform_dma_init machvec_noop
+#define platform_dma_alloc_coherent sba_alloc_coherent
+#define platform_dma_free_coherent sba_free_coherent
+#define platform_dma_map_single sba_map_single
+#define platform_dma_unmap_single sba_unmap_single
+#define platform_dma_map_sg sba_map_sg
+#define platform_dma_unmap_sg sba_unmap_sg
+#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
+#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
+#define platform_dma_sync_single_for_device machvec_dma_sync_single
+#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
+#define platform_dma_supported sba_dma_supported
+#define platform_dma_mapping_error sba_dma_mapping_error
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h b/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h
new file mode 100644
index 0000000000..4af484a7d9
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#ifndef _ASM_IA64_MACHVEC_SN2_H
+#define _ASM_IA64_MACHVEC_SN2_H
+
+extern ia64_mv_setup_t sn_setup;
+extern ia64_mv_cpu_init_t sn_cpu_init;
+extern ia64_mv_irq_init_t sn_irq_init;
+extern ia64_mv_send_ipi_t sn2_send_IPI;
+extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
+extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
+extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
+extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
+extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
+extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
+extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
+extern ia64_mv_inb_t __sn_inb;
+extern ia64_mv_inw_t __sn_inw;
+extern ia64_mv_inl_t __sn_inl;
+extern ia64_mv_outb_t __sn_outb;
+extern ia64_mv_outw_t __sn_outw;
+extern ia64_mv_outl_t __sn_outl;
+extern ia64_mv_mmiowb_t __sn_mmiowb;
+extern ia64_mv_readb_t __sn_readb;
+extern ia64_mv_readw_t __sn_readw;
+extern ia64_mv_readl_t __sn_readl;
+extern ia64_mv_readq_t __sn_readq;
+extern ia64_mv_readb_t __sn_readb_relaxed;
+extern ia64_mv_readw_t __sn_readw_relaxed;
+extern ia64_mv_readl_t __sn_readl_relaxed;
+extern ia64_mv_readq_t __sn_readq_relaxed;
+extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
+extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
+extern ia64_mv_dma_map_single sn_dma_map_single;
+extern ia64_mv_dma_unmap_single sn_dma_unmap_single;
+extern ia64_mv_dma_map_sg sn_dma_map_sg;
+extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg;
+extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
+extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
+extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
+extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
+extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
+extern ia64_mv_dma_supported sn_dma_supported;
+#ifndef XEN
+extern ia64_mv_migrate_t sn_migrate;
+extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
+extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
+#endif
+
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure. When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name "sn2"
+#define platform_setup sn_setup
+#define platform_cpu_init sn_cpu_init
+#define platform_irq_init sn_irq_init
+#define platform_send_ipi sn2_send_IPI
+#ifndef XEN
+#define platform_timer_interrupt sn_timer_interrupt
+#endif
+#define platform_global_tlb_purge sn2_global_tlb_purge
+#ifndef XEN
+#define platform_tlb_migrate_finish sn_tlb_migrate_finish
+#endif
+#define platform_pci_fixup sn_pci_fixup
+#define platform_inb __sn_inb
+#define platform_inw __sn_inw
+#define platform_inl __sn_inl
+#define platform_outb __sn_outb
+#define platform_outw __sn_outw
+#define platform_outl __sn_outl
+#define platform_mmiowb __sn_mmiowb
+#define platform_readb __sn_readb
+#define platform_readw __sn_readw
+#define platform_readl __sn_readl
+#define platform_readq __sn_readq
+#define platform_readb_relaxed __sn_readb_relaxed
+#define platform_readw_relaxed __sn_readw_relaxed
+#define platform_readl_relaxed __sn_readl_relaxed
+#define platform_readq_relaxed __sn_readq_relaxed
+#define platform_local_vector_to_irq sn_local_vector_to_irq
+#ifdef XEN
+#define platform_pci_get_legacy_mem machvec_noop_pci_get_legacy_mem
+#define platform_pci_legacy_read machvec_noop_pci_legacy_read
+#define platform_pci_legacy_write machvec_noop_pci_legacy_write
+#else
+#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
+#define platform_pci_legacy_read sn_pci_legacy_read
+#define platform_pci_legacy_write sn_pci_legacy_write
+#endif
+#define platform_dma_init machvec_noop
+#ifdef XEN
+#define platform_dma_alloc_coherent machvec_noop_dma_alloc_coherent
+#define platform_dma_free_coherent machvec_noop_dma_free_coherent
+#define platform_dma_map_single machvec_noop_dma_map_single
+#define platform_dma_unmap_single machvec_noop_dma_unmap_single
+#define platform_dma_map_sg machvec_noop_dma_map_sg
+#define platform_dma_unmap_sg machvec_noop_dma_unmap_sg
+#define platform_dma_sync_single_for_cpu \
+ machvec_noop_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu \
+ machvec_noop_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device \
+ machvec_noop_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device machvec_noop_dma_sync_sg_for_device
+#define platform_dma_mapping_error machvec_noop_dma_mapping_error
+#define platform_dma_supported machvec_noop_dma_supported
+#else
+#define platform_dma_alloc_coherent sn_dma_alloc_coherent
+#define platform_dma_free_coherent sn_dma_free_coherent
+#define platform_dma_map_single sn_dma_map_single
+#define platform_dma_unmap_single sn_dma_unmap_single
+#define platform_dma_map_sg sn_dma_map_sg
+#define platform_dma_unmap_sg sn_dma_unmap_sg
+#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
+#define platform_dma_mapping_error sn_dma_mapping_error
+#define platform_dma_supported sn_dma_supported
+#define platform_migrate sn_migrate
+#endif
+
+#ifndef XEN
+#ifdef CONFIG_PCI_MSI
+#define platform_setup_msi_irq sn_setup_msi_irq
+#define platform_teardown_msi_irq sn_teardown_msi_irq
+#else
+#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
+#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
+#endif
+#endif
+
+#include <asm/sn/io.h>
+
+#endif /* _ASM_IA64_MACHVEC_SN2_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/page.h b/xen/include/asm-ia64/linux-xen/asm/page.h
index 84e1c320d0..28ec059605 100644
--- a/xen/include/asm-ia64/linux-xen/asm/page.h
+++ b/xen/include/asm-ia64/linux-xen/asm/page.h
@@ -12,6 +12,16 @@
#include <asm/intrinsics.h>
#include <asm/types.h>
+#ifdef XEN /* This will go away with newer upstream */
+#define RGN_SHIFT 61
+#define RGN_BASE(r) (r << RGN_SHIFT)
+#define RGN_BITS RGN_BASE(-1)
+#define RGN_HPAGE REGION_HPAGE
+#ifndef CONFIG_HUGETLB_PAGE
+# define REGION_HPAGE (4UL)
+#endif
+#endif
+
/*
* PAGE_SHIFT determines the actual kernel page size.
*/
diff --git a/xen/include/asm-ia64/linux/asm/pci.h b/xen/include/asm-ia64/linux-xen/asm/pci.h
index dba9f220be..0e4e0d85d1 100644
--- a/xen/include/asm-ia64/linux/asm/pci.h
+++ b/xen/include/asm-ia64/linux-xen/asm/pci.h
@@ -6,9 +6,14 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#ifdef XEN
+#include <linux/ioport.h>
+#endif
#include <asm/io.h>
+#ifndef XEN
#include <asm/scatterlist.h>
+#endif
/*
* Can be used to override the logic in pci_scan_bus for skipping already-configured bus
@@ -55,7 +60,9 @@ pcibios_penalize_isa_irq (int irq, int active)
#define HAVE_ARCH_PCI_MWI 1
extern int pcibios_prep_mwi (struct pci_dev *);
+#ifndef XEN
#include <asm-generic/pci-dma-compat.h>
+#endif
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
@@ -107,6 +114,7 @@ extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
#define HAVE_PCI_LEGACY
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma);
+#ifndef XEN
extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
size_t count);
extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
@@ -114,6 +122,7 @@ extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
extern int pci_mmap_legacy_mem(struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma);
+#endif
#define pci_get_legacy_mem platform_pci_get_legacy_mem
#define pci_legacy_read platform_pci_legacy_read
@@ -156,6 +165,21 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev,
extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res, struct pci_bus_region *region);
+#ifndef XEN
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
+#endif
+
#define pcibios_scan_all_fns(a, b) 0
#endif /* _ASM_IA64_PCI_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/README.origin b/xen/include/asm-ia64/linux-xen/asm/sn/README.origin
new file mode 100644
index 0000000000..762d1f5a9c
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/README.origin
@@ -0,0 +1,16 @@
+# Source files in this directory are near-identical copies of linux-2.6.19
+# files:
+
+# NOTE: ALL changes to these files should be clearly marked
+# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
+# easily updated to future versions of the corresponding Linux files.
+
+addrs.h -> linux/include/asm-ia64/sn/addrs.h
+arch.h -> linux/include/asm-ia64/sn/arch.h
+hubdev.h -> linux/arch/ia64/sn/include/xtalk/hubdev.h
+intr.h -> linux/include/asm-ia64/sn/intr.h
+io.h -> linux/include/asm-ia64/sn/io.h
+nodepda.h -> linux/include/asm-ia64/sn/nodepda.h
+pcibr_provider.h -> linux/include/asm-ia64/sn/pcibr_provider.h
+rw_mmr.h -> linux/include/asm-ia64/sn/rw_mmr.h
+types.h -> linux/include/asm-ia64/sn/types.h
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h b/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h
new file mode 100644
index 0000000000..531a880a7b
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h
@@ -0,0 +1,299 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_ADDRS_H
+#define _ASM_IA64_SN_ADDRS_H
+
+#include <asm/percpu.h>
+#include <asm/sn/types.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/pda.h>
+
+/*
+ * Memory/SHUB Address Format:
+ * +-+---------+--+--------------+
+ * |0| NASID |AS| NodeOffset |
+ * +-+---------+--+--------------+
+ *
+ * NASID: (low NASID bit is 0) Memory and SHUB MMRs
+ * AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
+ * 00: Local Resources and MMR space
+ * Top bit of NodeOffset
+ * 0: Local resources space
+ * node id:
+ * 0: IA64/NT compatibility space
+ * 2: Local MMR Space
+ * 4: Local memory, regardless of local node id
+ * 1: Global MMR space
+ * 01: GET space.
+ * 10: AMO space.
+ * 11: Cacheable memory space.
+ *
+ * NodeOffset: byte offset
+ *
+ *
+ * TIO address format:
+ * +-+----------+--+--------------+
+ * |0| NASID |AS| Nodeoffset |
+ * +-+----------+--+--------------+
+ *
+ * NASID: (low NASID bit is 1) TIO
+ * AS: 2-bit Chiplet Identifier
+ * 00: TIO LB (Indicates TIO MMR access.)
+ * 01: TIO ICE (indicates coretalk space access.)
+ *
+ * NodeOffset: top bit must be set.
+ *
+ *
+ * Note that in both of the above address formats, the low
+ * NASID bit indicates if the reference is to the SHUB or TIO MMRs.
+ */
+
+
+/*
+ * Define basic shift & mask constants for manipulating NASIDs and AS values.
+ */
+#define NASID_BITMASK (sn_hub_info->nasid_bitmask)
+#define NASID_SHIFT (sn_hub_info->nasid_shift)
+#define AS_SHIFT (sn_hub_info->as_shift)
+#define AS_BITMASK 0x3UL
+
+#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT)
+#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT)
+
+
+/*
+ * AS values. These are the same on both SHUB1 & SHUB2.
+ */
+#define AS_GET_VAL 1UL
+#define AS_AMO_VAL 2UL
+#define AS_CAC_VAL 3UL
+#define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT)
+#define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT)
+#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT)
+
+
+/*
+ * Virtual Mode Local & Global MMR space.
+ */
+#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL
+#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL
+#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
+#define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
+#define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
+
+#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL
+#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL
+#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
+#define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
+
+/*
+ * Physical mode addresses
+ */
+#define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
+
+
+/*
+ * Clear region & AS bits.
+ */
+#define TO_PHYS_MASK (~(RGN_BITS | AS_MASK))
+
+
+/*
+ * Misc NASID manipulation.
+ */
+#define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT)
+#define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a))
+#define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1))
+#define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT)
+#define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK)
+#define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a))
+#define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
+#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
+#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a))
+#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
+#define IS_TIO_NASID(n) ((n) & 1)
+
+
+/* non-II mmr's start at top of big window space (4G) */
+#define BWIN_TOP 0x0000000100000000UL
+
+/*
+ * general address defines
+ */
+#define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE)
+#define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
+#define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
+#define GET_BASE (PAGE_OFFSET | AS_GET_SPACE)
+
+/*
+ * Convert Memory addresses between various addressing modes.
+ */
+#define TO_PHYS(x) (TO_PHYS_MASK & (x))
+#define TO_CAC(x) (CAC_BASE | TO_PHYS(x))
+#if defined(CONFIG_SGI_SN) || defined(XEN)
+#define TO_AMO(x) (AMO_BASE | TO_PHYS(x))
+#define TO_GET(x) (GET_BASE | TO_PHYS(x))
+#else
+#define TO_AMO(x) ({ BUG(); x; })
+#define TO_GET(x) ({ BUG(); x; })
+#endif
+
+/*
+ * Covert from processor physical address to II/TIO physical address:
+ * II - squeeze out the AS bits
+ * TIO- requires a chiplet id in bits 38-39. For DMA to memory,
+ * the chiplet id is zero. If we implement TIO-TIO dma, we might need
+ * to insert a chiplet id into this macro. However, it is our belief
+ * right now that this chiplet id will be ICE, which is also zero.
+ */
+#define SH1_TIO_PHYS_TO_DMA(x) \
+ ((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
+
+#define SH2_NETWORK_BANK_OFFSET(x) \
+ ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
+
+#define SH2_NETWORK_BANK_SELECT(x) \
+ ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4))) \
+ >> (sn_hub_info->nasid_shift - 4)) << 36)
+
+#define SH2_NETWORK_ADDRESS(x) \
+ (SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
+
+#define SH2_TIO_PHYS_TO_DMA(x) \
+ (((u64)(NASID_GET(x)) << 40) | SH2_NETWORK_ADDRESS(x))
+
+#define PHYS_TO_TIODMA(x) \
+ (is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
+
+#define PHYS_TO_DMA(x) \
+ ((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
+
+
+/*
+ * Macros to test for address type.
+ */
+#define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
+#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
+
+
+/*
+ * The following definitions pertain to the IO special address
+ * space. They define the location of the big and little windows
+ * of any given node.
+ */
+#define BWIN_SIZE_BITS 29 /* big window size: 512M */
+#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */
+#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
+ : RAW_NODE_SWIN_BASE(n, w))
+#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \
+ ((u64) (w) << TIO_SWIN_SIZE_BITS))
+#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
+#define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
+#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
+#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
+#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
+#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
+#define BWIN_WIDGET_MASK 0x7
+#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+#define SH1_IS_BIG_WINDOW_ADDR(x) ((x) & BWIN_TOP)
+
+#define TIO_BWIN_WINDOW_SELECT_MASK 0x7
+#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
+
+#define TIO_HWIN_SHIFT_BITS 33
+#define TIO_HWIN(x) (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
+
+/*
+ * The following definitions pertain to the IO special address
+ * space. They define the location of the big and little windows
+ * of any given node.
+ */
+
+#define SWIN_SIZE_BITS 24
+#define SWIN_WIDGET_MASK 0xF
+
+#define TIO_SWIN_SIZE_BITS 28
+#define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS)
+#define TIO_SWIN_WIDGET_MASK 0x3
+
+/*
+ * Convert smallwindow address to xtalk address.
+ *
+ * 'addr' can be physical or virtual address, but will be converted
+ * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
+ */
+#define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
+
+
+/*
+ * The following macros produce the correct base virtual address for
+ * the hub registers. The REMOTE_HUB_* macro produce
+ * the address for the specified hub's registers. The intent is
+ * that the appropriate PI, MD, NI, or II register would be substituted
+ * for x.
+ *
+ * WARNING:
+ * When certain Hub chip workaround are defined, it's not sufficient
+ * to dereference the *_HUB_ADDR() macros. You should instead use
+ * HUB_L() and HUB_S() if you must deal with pointers to hub registers.
+ * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
+ * They're always safe.
+ */
+/* Shub1 TIO & MMR addressing macros */
+#define SH1_TIO_IOSPACE_ADDR(n,x) \
+ GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_BWIN_MMR(n,x) \
+ GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_SWIN_MMR(n,x) \
+ (NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
+
+#define SH1_REMOTE_MMR(n,x) \
+ (SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) : \
+ SH1_REMOTE_SWIN_MMR(n,x))
+
+/* Shub1 TIO & MMR addressing macros */
+#define SH2_TIO_IOSPACE_ADDR(n,x) \
+ ((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
+
+#define SH2_REMOTE_MMR(n,x) \
+ GLOBAL_MMR_ADDR(n,x)
+
+
+/* TIO & MMR addressing macros that work on both shub1 & shub2 */
+#define TIO_IOSPACE_ADDR(n,x) \
+ ((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) : \
+ SH2_TIO_IOSPACE_ADDR(n,x)))
+
+#define SH_REMOTE_MMR(n,x) \
+ (is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
+
+#define REMOTE_HUB_ADDR(n,x) \
+ (IS_TIO_NASID(n) ? ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) : \
+ ((volatile u64*)SH_REMOTE_MMR(n,x)))
+
+
+#define HUB_L(x) (*((volatile typeof(*x) *)x))
+#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d))
+
+#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a)))
+#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
+
+/*
+ * Coretalk address breakdown
+ */
+#define CTALK_NASID_SHFT 40
+#define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT)
+#define CTALK_CID_SHFT 38
+#define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT)
+#define CTALK_NODE_OFFSET 0x3FFFFFFFFF
+
+#endif /* _ASM_IA64_SN_ADDRS_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/arch.h b/xen/include/asm-ia64/linux-xen/asm/sn/arch.h
new file mode 100644
index 0000000000..a159c728b9
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/arch.h
@@ -0,0 +1,92 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI specific setup.
+ *
+ * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
+ */
+#ifndef _ASM_IA64_SN_ARCH_H
+#define _ASM_IA64_SN_ARCH_H
+
+#ifndef XEN
+#include <linux/numa.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+#include <asm/sn/types.h>
+#endif
+#include <asm/sn/sn_cpuid.h>
+
+/*
+ * This is the maximum number of NUMALINK nodes that can be part of a single
+ * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
+ * remote partitions are NOT included in this number.
+ * The number of compact nodes cannot exceed size of a coherency domain.
+ * The purpose of this define is to specify a node count that includes
+ * all C/M/TIO nodes in an SSI system.
+ *
+ * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes.
+ *
+ * Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
+ * to ACPI3.0, this limit will be removed. The notion of "compact nodes"
+ * should be deleted and TIOs should be included in MAX_NUMNODES.
+ */
+#define MAX_TIO_NODES MAX_NUMNODES
+#define MAX_COMPACT_NODES (MAX_NUMNODES + MAX_TIO_NODES)
+
+/*
+ * Maximum number of nodes in all partitions and in all coherency domains.
+ * This is the total number of nodes accessible in the numalink fabric. It
+ * includes all C & M bricks, plus all TIOs.
+ *
+ * This value is also the value of the maximum number of NASIDs in the numalink
+ * fabric.
+ */
+#define MAX_NUMALINK_NODES 16384
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced. They are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct sn_hub_info_s {
+ u8 shub2;
+ u8 nasid_shift;
+ u8 as_shift;
+ u8 shub_1_1_found;
+ u16 nasid_bitmask;
+};
+DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
+#define sn_hub_info (&__get_cpu_var(__sn_hub_info))
+#ifndef XEN
+#define is_shub2() (sn_hub_info->shub2)
+#define is_shub1() (sn_hub_info->shub2 == 0)
+#else
+#define is_shub2() 0
+#define is_shub1() 1
+#endif
+
+/*
+ * Use this macro to test if shub 1.1 wars should be enabled
+ */
+#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found)
+
+
+/*
+ * Compact node ID to nasid mappings kept in the per-cpu data areas of each
+ * cpu.
+ */
+DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
+
+#ifndef XEN
+extern u8 sn_partition_id;
+extern u8 sn_system_size;
+extern u8 sn_sharing_domain_size;
+extern u8 sn_region_size;
+
+extern void sn_flush_all_caches(long addr, long bytes);
+#endif
+#endif /* _ASM_IA64_SN_ARCH_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h b/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h
new file mode 100644
index 0000000000..96bbfe2a1f
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h
@@ -0,0 +1,95 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
+#define _ASM_IA64_SN_XTALK_HUBDEV_H
+
+#ifndef XEN
+#include "xtalk/xwidgetdev.h"
+#else
+#include <asm/sn/xwidgetdev.h>
+#endif
+
+#define HUB_WIDGET_ID_MAX 0xf
+#define DEV_PER_WIDGET (2*2*8)
+#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
+#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
+#define IIO_ITTE_WIDGET_SHIFT 8
+
+#define IIO_ITTE_WIDGET(itte) \
+ (((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK)
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
+#define IIO_NUM_ITTES 7
+#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
+
+/* This struct is shared between the PROM and the kernel.
+ * Changes to this struct will require corresponding changes to the kernel.
+ */
+struct sn_flush_device_common {
+ int sfdl_bus;
+ int sfdl_slot;
+ int sfdl_pin;
+ struct common_bar_list {
+ unsigned long start;
+ unsigned long end;
+ } sfdl_bar_list[6];
+ unsigned long sfdl_force_int_addr;
+ unsigned long sfdl_flush_value;
+ volatile unsigned long *sfdl_flush_addr;
+ u32 sfdl_persistent_busnum;
+ u32 sfdl_persistent_segment;
+ struct pcibus_info *sfdl_pcibus_info;
+};
+
+/* This struct is kernel only and is not used by the PROM */
+struct sn_flush_device_kernel {
+ spinlock_t sfdl_flush_lock;
+ struct sn_flush_device_common *common;
+};
+
+/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
+ * for older official PROMs to function on the new kernel base. This struct
+ * will be removed when the next official PROM release occurs. */
+
+struct sn_flush_device_war {
+ struct sn_flush_device_common common;
+ u32 filler; /* older PROMs expect the default size of a spinlock_t */
+};
+
+/*
+ * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
+ */
+struct sn_flush_nasid_entry {
+ struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num
+ u64 iio_itte[8];
+};
+
+struct hubdev_info {
+ geoid_t hdi_geoid;
+ short hdi_nasid;
+ short hdi_peer_nasid; /* Dual Porting Peer */
+
+ struct sn_flush_nasid_entry hdi_flush_nasid_list;
+ struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
+
+
+ void *hdi_nodepda;
+ void *hdi_node_vertex;
+ u32 max_segment_number;
+ u32 max_pcibus_number;
+};
+
+extern void hubdev_init_node(nodepda_t *, cnodeid_t);
+extern void hub_error_init(struct hubdev_info *);
+extern void ice_error_init(struct hubdev_info *);
+
+
+#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/intr.h b/xen/include/asm-ia64/linux-xen/asm/sn/intr.h
new file mode 100644
index 0000000000..5e7ef5c993
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/intr.h
@@ -0,0 +1,73 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_INTR_H
+#define _ASM_IA64_SN_INTR_H
+
+#ifndef XEN
+#include <linux/rcupdate.h>
+#else
+#include <linux/list.h>
+#endif
+#include <asm/sn/types.h>
+
+#define SGI_UART_VECTOR 0xe9
+
+/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
+#define SGI_XPC_ACTIVATE 0x30
+#define SGI_II_ERROR 0x31
+#define SGI_XBOW_ERROR 0x32
+#define SGI_PCIASIC_ERROR 0x33
+#define SGI_ACPI_SCI_INT 0x34
+#define SGI_TIOCA_ERROR 0x35
+#define SGI_TIO_ERROR 0x36
+#define SGI_TIOCX_ERROR 0x37
+#define SGI_MMTIMER_VECTOR 0x38
+#define SGI_XPC_NOTIFY 0xe7
+
+#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c
+#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6
+
+#define SN2_IRQ_RESERVED 0x1
+#define SN2_IRQ_CONNECTED 0x2
+#define SN2_IRQ_SHARED 0x4
+
+// The SN PROM irq struct
+struct sn_irq_info {
+ struct sn_irq_info *irq_next; /* deprecated DO NOT USE */
+ short irq_nasid; /* Nasid IRQ is assigned to */
+ int irq_slice; /* slice IRQ is assigned to */
+ int irq_cpuid; /* kernel logical cpuid */
+ int irq_irq; /* the IRQ number */
+ int irq_int_bit; /* Bridge interrupt pin */
+ /* <0 means MSI */
+ u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
+ int irq_bridge_type;/* pciio asic type (pciio.h) */
+ void *irq_bridge; /* bridge generating irq */
+ void *irq_pciioinfo; /* associated pciio_info_t */
+ int irq_last_intr; /* For Shub lb lost intr WAR */
+ int irq_cookie; /* unique cookie */
+ int irq_flags; /* flags */
+ int irq_share_cnt; /* num devices sharing IRQ */
+ struct list_head list; /* list of sn_irq_info structs */
+#ifndef XEN
+ struct rcu_head rcu; /* rcu callback list */
+#endif
+};
+
+extern void sn_send_IPI_phys(int, long, int, int);
+extern u64 sn_intr_alloc(nasid_t, int,
+ struct sn_irq_info *,
+ int, nasid_t, int);
+extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
+extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
+extern struct list_head **sn_irq_lh;
+
+#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
+
+#endif /* _ASM_IA64_SN_INTR_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/io.h b/xen/include/asm-ia64/linux-xen/asm/sn/io.h
new file mode 100644
index 0000000000..78585a22cf
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/io.h
@@ -0,0 +1,281 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_SN_IO_H
+#define _ASM_SN_IO_H
+#include <linux/compiler.h>
+#include <asm/intrinsics.h>
+
+extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
+extern void __sn_mmiowb(void); /* Forward definition */
+
+extern int num_cnodes;
+
+#define __sn_mf_a() ia64_mfa()
+
+#ifdef XEN
+/*
+ * Xen doesn't deal with any PIC devices directly, it's all handled in dom0
+ */
+#define sn_dma_flush(foo) do {} while(0)
+#else
+extern void sn_dma_flush(unsigned long);
+#endif
+
+#define __sn_inb ___sn_inb
+#define __sn_inw ___sn_inw
+#define __sn_inl ___sn_inl
+#define __sn_outb ___sn_outb
+#define __sn_outw ___sn_outw
+#define __sn_outl ___sn_outl
+#define __sn_readb ___sn_readb
+#define __sn_readw ___sn_readw
+#define __sn_readl ___sn_readl
+#define __sn_readq ___sn_readq
+#define __sn_readb_relaxed ___sn_readb_relaxed
+#define __sn_readw_relaxed ___sn_readw_relaxed
+#define __sn_readl_relaxed ___sn_readl_relaxed
+#define __sn_readq_relaxed ___sn_readq_relaxed
+
+/*
+ * Convenience macros for setting/clearing bits using the above accessors
+ */
+
+#define __sn_setq_relaxed(addr, val) \
+ writeq((__sn_readq_relaxed(addr) | (val)), (addr))
+#define __sn_clrq_relaxed(addr, val) \
+ writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
+
+/*
+ * The following routines are SN Platform specific, called when
+ * a reference is made to inX/outX set macros. SN Platform
+ * inX set of macros ensures that Posted DMA writes on the
+ * Bridge is flushed.
+ *
+ * The routines should be self explainatory.
+ */
+
+static inline unsigned int
+___sn_inb (unsigned long port)
+{
+ volatile unsigned char *addr;
+ unsigned char ret = -1;
+
+ if ((addr = sn_io_addr(port))) {
+ ret = *addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ }
+ return ret;
+}
+
+static inline unsigned int
+___sn_inw (unsigned long port)
+{
+ volatile unsigned short *addr;
+ unsigned short ret = -1;
+
+ if ((addr = sn_io_addr(port))) {
+ ret = *addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ }
+ return ret;
+}
+
+static inline unsigned int
+___sn_inl (unsigned long port)
+{
+ volatile unsigned int *addr;
+ unsigned int ret = -1;
+
+ if ((addr = sn_io_addr(port))) {
+ ret = *addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ }
+ return ret;
+}
+
+static inline void
+___sn_outb (unsigned char val, unsigned long port)
+{
+ volatile unsigned char *addr;
+
+ if ((addr = sn_io_addr(port))) {
+ *addr = val;
+ __sn_mmiowb();
+ }
+}
+
+static inline void
+___sn_outw (unsigned short val, unsigned long port)
+{
+ volatile unsigned short *addr;
+
+ if ((addr = sn_io_addr(port))) {
+ *addr = val;
+ __sn_mmiowb();
+ }
+}
+
+static inline void
+___sn_outl (unsigned int val, unsigned long port)
+{
+ volatile unsigned int *addr;
+
+ if ((addr = sn_io_addr(port))) {
+ *addr = val;
+ __sn_mmiowb();
+ }
+}
+
+/*
+ * The following routines are SN Platform specific, called when
+ * a reference is made to readX/writeX set macros. SN Platform
+ * readX set of macros ensures that Posted DMA writes on the
+ * Bridge is flushed.
+ *
+ * The routines should be self explainatory.
+ */
+
+static inline unsigned char
+___sn_readb (const volatile void __iomem *addr)
+{
+ unsigned char val;
+
+ val = *(volatile unsigned char __force *)addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ return val;
+}
+
+static inline unsigned short
+___sn_readw (const volatile void __iomem *addr)
+{
+ unsigned short val;
+
+ val = *(volatile unsigned short __force *)addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ return val;
+}
+
+static inline unsigned int
+___sn_readl (const volatile void __iomem *addr)
+{
+ unsigned int val;
+
+ val = *(volatile unsigned int __force *)addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ return val;
+}
+
+static inline unsigned long
+___sn_readq (const volatile void __iomem *addr)
+{
+ unsigned long val;
+
+ val = *(volatile unsigned long __force *)addr;
+ __sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
+ return val;
+}
+
+/*
+ * For generic and SN2 kernels, we have a set of fast access
+ * PIO macros. These macros are provided on SN Platform
+ * because the normal inX and readX macros perform an
+ * additional task of flushing Post DMA request on the Bridge.
+ *
+ * These routines should be self explainatory.
+ */
+
+static inline unsigned int
+sn_inb_fast (unsigned long port)
+{
+ volatile unsigned char *addr = (unsigned char *)port;
+ unsigned char ret;
+
+ ret = *addr;
+ __sn_mf_a();
+ return ret;
+}
+
+static inline unsigned int
+sn_inw_fast (unsigned long port)
+{
+ volatile unsigned short *addr = (unsigned short *)port;
+ unsigned short ret;
+
+ ret = *addr;
+ __sn_mf_a();
+ return ret;
+}
+
+static inline unsigned int
+sn_inl_fast (unsigned long port)
+{
+ volatile unsigned int *addr = (unsigned int *)port;
+ unsigned int ret;
+
+ ret = *addr;
+ __sn_mf_a();
+ return ret;
+}
+
+static inline unsigned char
+___sn_readb_relaxed (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short
+___sn_readw_relaxed (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int
+___sn_readl_relaxed (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned int __force *) addr;
+}
+
+static inline unsigned long
+___sn_readq_relaxed (const volatile void __iomem *addr)
+{
+ return *(volatile unsigned long __force *) addr;
+}
+
+struct pci_dev;
+
+static inline int
+sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
+{
+
+ if (vchan > 1) {
+ return -1;
+ }
+
+ if (!(*addr >> 32)) /* Using a mask here would be cleaner */
+ return 0; /* but this generates better code */
+
+ if (vchan == 1) {
+ /* Set Bit 57 */
+ *addr |= (1UL << 57);
+ } else {
+ /* Clear Bit 57 */
+ *addr &= ~(1UL << 57);
+ }
+
+ return 0;
+}
+
+#endif /* _ASM_SN_IO_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h b/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h
new file mode 100644
index 0000000000..ffa4302423
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h
@@ -0,0 +1,87 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_NODEPDA_H
+#define _ASM_IA64_SN_NODEPDA_H
+
+
+#include <asm/semaphore.h>
+#include <asm/irq.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/intr.h>
+#ifndef XEN
+#include <asm/sn/bte.h>
+#endif
+
+/*
+ * NUMA Node-Specific Data structures are defined in this file.
+ * In particular, this is the location of the node PDA.
+ * A pointer to the right node PDA is saved in each CPU PDA.
+ */
+
+/*
+ * Node-specific data structure.
+ *
+ * One of these structures is allocated on each node of a NUMA system.
+ *
+ * This structure provides a convenient way of keeping together
+ * all per-node data structures.
+ */
+struct phys_cpuid {
+ short nasid;
+ char subnode;
+ char slice;
+};
+
+struct nodepda_s {
+ void *pdinfo; /* Platform-dependent per-node info */
+
+#ifndef XEN
+ /*
+ * The BTEs on this node are shared by the local cpus
+ */
+ struct bteinfo_s bte_if[MAX_BTES_PER_NODE]; /* Virtual Interface */
+ struct timer_list bte_recovery_timer;
+ spinlock_t bte_recovery_lock;
+#endif
+
+ /*
+ * Array of pointers to the nodepdas for each node.
+ */
+ struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES];
+
+ /*
+ * Array of physical cpu identifiers. Indexed by cpuid.
+ */
+ struct phys_cpuid phys_cpuid[NR_CPUS];
+ spinlock_t ptc_lock ____cacheline_aligned_in_smp;
+};
+
+typedef struct nodepda_s nodepda_t;
+
+/*
+ * Access Functions for node PDA.
+ * Since there is one nodepda for each node, we need a convenient mechanism
+ * to access these nodepdas without cluttering code with #ifdefs.
+ * The next set of definitions provides this.
+ * Routines are expected to use
+ *
+ * sn_nodepda - to access node PDA for the node on which code is running
+ * NODEPDA(cnodeid) - to access node PDA for cnodeid
+ */
+
+DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
+#define sn_nodepda (__get_cpu_var(__sn_nodepda))
+#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
+
+/*
+ * Check if given a compact node id the corresponding node has all the
+ * cpus disabled.
+ */
+#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0)
+
+#endif /* _ASM_IA64_SN_NODEPDA_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h b/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h
new file mode 100644
index 0000000000..8f88ecd3a7
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h
@@ -0,0 +1,153 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+
+#ifdef XEN
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#endif
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibus_provider_defs.h>
+
+/* Workarounds */
+#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
+
+#define BUSTYPE_MASK 0x1
+
+/* Macros given a pcibus structure */
+#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
+#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
+ asic == PCIIO_ASIC_TYPE_TIOCP)
+#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
+
+
+/*
+ * The different PCI Bridge types supported on the SGI Altix platforms
+ */
+#define PCIBR_BRIDGETYPE_UNKNOWN -1
+#define PCIBR_BRIDGETYPE_PIC 2
+#define PCIBR_BRIDGETYPE_TIOCP 3
+
+/*
+ * Bridge 64bit Direct Map Attributes
+ */
+#define PCI64_ATTR_PREF (1ull << 59)
+#define PCI64_ATTR_PREC (1ull << 58)
+#define PCI64_ATTR_VIRTUAL (1ull << 57)
+#define PCI64_ATTR_BAR (1ull << 56)
+#define PCI64_ATTR_SWAP (1ull << 55)
+#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
+
+#define PCI32_LOCAL_BASE 0
+#define PCI32_MAPPED_BASE 0x40000000
+#define PCI32_DIRECT_BASE 0x80000000
+
+#define IS_PCI32_MAPPED(x) ((u64)(x) < PCI32_DIRECT_BASE && \
+ (u64)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI32_DIRECT(x) ((u64)(x) >= PCI32_MAPPED_BASE)
+
+
+/*
+ * Bridge PMU Address Transaltion Entry Attibutes
+ */
+#define PCI32_ATE_V (0x1 << 0)
+#define PCI32_ATE_CO (0x1 << 1)
+#define PCI32_ATE_PREC (0x1 << 2)
+#define PCI32_ATE_MSI (0x1 << 2)
+#define PCI32_ATE_PREF (0x1 << 3)
+#define PCI32_ATE_BAR (0x1 << 4)
+#define PCI32_ATE_ADDR_SHFT 12
+
+#define MINIMAL_ATES_REQUIRED(addr, size) \
+ (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
+
+#define MINIMAL_ATE_FLAG(addr, size) \
+ (MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0)
+
+/* bit 29 of the pci address is the SWAP bit */
+#define ATE_SWAPSHIFT 29
+#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
+#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
+
+/*
+ * I/O page size
+ */
+#if PAGE_SIZE < 16384
+#define IOPFNSHIFT 12 /* 4K per mapped page */
+#else
+#define IOPFNSHIFT 14 /* 16K per mapped page */
+#endif
+
+#define IOPGSIZE (1 << IOPFNSHIFT)
+#define IOPG(x) ((x) >> IOPFNSHIFT)
+#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
+
+#define PCIBR_DEV_SWAP_DIR (1ull << 19)
+#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
+
+/*
+ * PMU resources.
+ */
+struct ate_resource{
+ u64 *ate;
+ u64 num_ate;
+ u64 lowest_free_index;
+};
+
+struct pcibus_info {
+ struct pcibus_bussoft pbi_buscommon; /* common header */
+ u32 pbi_moduleid;
+ short pbi_bridge_type;
+ short pbi_bridge_mode;
+
+ struct ate_resource pbi_int_ate_resource;
+ u64 pbi_int_ate_size;
+
+ u64 pbi_dir_xbase;
+ char pbi_hub_xid;
+
+ u64 pbi_devreg[8];
+
+ u32 pbi_valid_devices;
+ u32 pbi_enabled_devices;
+
+ spinlock_t pbi_lock;
+};
+
+extern int pcibr_init_provider(void);
+extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
+extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
+extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
+extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
+
+/*
+ * prototypes for the bridge asic register access routines in pcibr_reg.c
+ */
+extern void pcireg_control_bit_clr(struct pcibus_info *, u64);
+extern void pcireg_control_bit_set(struct pcibus_info *, u64);
+extern u64 pcireg_tflush_get(struct pcibus_info *);
+extern u64 pcireg_intr_status_get(struct pcibus_info *);
+extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, u64);
+extern void pcireg_intr_enable_bit_set(struct pcibus_info *, u64);
+extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64);
+extern void pcireg_force_intr_set(struct pcibus_info *, int);
+extern u64 pcireg_wrb_flush_get(struct pcibus_info *, int);
+extern void pcireg_int_ate_set(struct pcibus_info *, int, u64);
+extern u64 __iomem * pcireg_int_ate_addr(struct pcibus_info *, int);
+extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
+extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
+extern int pcibr_ate_alloc(struct pcibus_info *, int);
+extern void pcibr_ate_free(struct pcibus_info *, int);
+extern void ate_write(struct pcibus_info *, int, int, u64);
+extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
+ void *resp);
+extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
+ int action, void *resp);
+extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
+#endif
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h b/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h
new file mode 100644
index 0000000000..cccef83367
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_RW_MMR_H
+#define _ASM_IA64_SN_RW_MMR_H
+
+
+/*
+ * This file that access MMRs via uncached physical addresses.
+ * pio_phys_read_mmr - read an MMR
+ * pio_phys_write_mmr - write an MMR
+ * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
+ * Second MMR will be skipped if address is NULL
+ *
+ * Addresses passed to these routines should be uncached physical addresses
+ * ie., 0x80000....
+ */
+
+
+extern long pio_phys_read_mmr(volatile long *mmr);
+extern void pio_phys_write_mmr(volatile long *mmr, long val);
+#ifndef XEN
+extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
+#else
+extern void pio_atomic_phys_write_mmrs(volatile unsigned long *mmr1, long val1, volatile unsigned long *mmr2, long val2);
+#endif
+
+#endif /* _ASM_IA64_SN_RW_MMR_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/types.h b/xen/include/asm-ia64/linux-xen/asm/sn/types.h
new file mode 100644
index 0000000000..6df228178a
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/types.h
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_IA64_SN_TYPES_H
+#define _ASM_IA64_SN_TYPES_H
+
+#include <linux/types.h>
+
+typedef unsigned long cpuid_t;
+typedef signed short nasid_t; /* node id in numa-as-id space */
+typedef signed char partid_t; /* partition ID type */
+typedef unsigned int moduleid_t; /* user-visible module number type */
+typedef unsigned int cmoduleid_t; /* kernel compact module id type */
+typedef unsigned char slotid_t; /* slot (blade) within module */
+typedef unsigned char slabid_t; /* slab (asic) within slot */
+typedef u64 nic_t;
+typedef unsigned long iopaddr_t;
+#ifndef XEN
+typedef unsigned long paddr_t;
+#endif
+typedef short cnodeid_t;
+
+#endif /* _ASM_IA64_SN_TYPES_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/system.h b/xen/include/asm-ia64/linux-xen/asm/system.h
index 9f98e2a743..c1138c56fb 100644
--- a/xen/include/asm-ia64/linux-xen/asm/system.h
+++ b/xen/include/asm-ia64/linux-xen/asm/system.h
@@ -190,6 +190,7 @@ do { \
#ifdef XEN
#define local_irq_is_enabled() (!irqs_disabled())
extern struct vcpu *ia64_switch_to(struct vcpu *next_task);
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
#else
#ifdef __KERNEL__
diff --git a/xen/include/asm-ia64/linux-xen/asm/types.h b/xen/include/asm-ia64/linux-xen/asm/types.h
index d2c04d3f6a..3dcfa34ae7 100644
--- a/xen/include/asm-ia64/linux-xen/asm/types.h
+++ b/xen/include/asm-ia64/linux-xen/asm/types.h
@@ -74,6 +74,14 @@ typedef unsigned short kmem_bufctl_t;
#ifdef XEN
#include <asm/xentypes.h>
+
+#ifndef __ASSEMBLY__
+typedef unsigned int gfp_t;
+typedef u64 resource_size_t;
+typedef u32 dev_t;
+typedef unsigned int mode_t;
+#define THIS_MODULE NULL
+#endif
#endif
#endif /* _ASM_IA64_TYPES_H */
diff --git a/xen/include/asm-ia64/linux-xen/linux/README.origin b/xen/include/asm-ia64/linux-xen/linux/README.origin
index 6cc513be16..b46632953f 100644
--- a/xen/include/asm-ia64/linux-xen/linux/README.origin
+++ b/xen/include/asm-ia64/linux-xen/linux/README.origin
@@ -12,3 +12,8 @@ interrupt.h -> linux/include/linux/interrupt.h
# The files below are from Linux-2.6.16.33
oprofile.h -> linux/include/linux/oprofile.h
+
+# The files below are from Linux-2.6.19
+pci.h -> linux/include/linux/pci.h
+kobject.h -> linux/include/linux/kobject.h
+device.h -> linux/include/linux/device.h
diff --git a/xen/include/asm-ia64/linux-xen/linux/device.h b/xen/include/asm-ia64/linux-xen/linux/device.h
new file mode 100644
index 0000000000..8ebe998659
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/linux/device.h
@@ -0,0 +1,489 @@
+/*
+ * device.h - generic, centralized driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ *
+ * This file is released under the GPLv2
+ *
+ * See Documentation/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_H_
+#define _DEVICE_H_
+
+#include <linux/ioport.h>
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <asm/semaphore.h>
+#include <asm/atomic.h>
+
+#define DEVICE_NAME_SIZE 50
+#define DEVICE_NAME_HALF __stringify(20) /* Less than half to accommodate slop */
+#define DEVICE_ID_SIZE 32
+#define BUS_ID_SIZE KOBJ_NAME_LEN
+
+
+struct device;
+struct device_driver;
+struct class;
+struct class_device;
+
+struct bus_type {
+ const char * name;
+
+ struct subsystem subsys;
+ struct kset drivers;
+ struct kset devices;
+ struct klist klist_devices;
+ struct klist klist_drivers;
+
+ struct bus_attribute * bus_attrs;
+ struct device_attribute * dev_attrs;
+ struct driver_attribute * drv_attrs;
+
+ int (*match)(struct device * dev, struct device_driver * drv);
+ int (*uevent)(struct device *dev, char **envp,
+ int num_envp, char *buffer, int buffer_size);
+ int (*probe)(struct device * dev);
+ int (*remove)(struct device * dev);
+ void (*shutdown)(struct device * dev);
+
+ int (*suspend)(struct device * dev, pm_message_t state);
+ int (*suspend_late)(struct device * dev, pm_message_t state);
+ int (*resume_early)(struct device * dev);
+ int (*resume)(struct device * dev);
+};
+
+extern int __must_check bus_register(struct bus_type * bus);
+extern void bus_unregister(struct bus_type * bus);
+
+extern int __must_check bus_rescan_devices(struct bus_type * bus);
+
+/* iterator helpers for buses */
+
+int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data,
+ int (*fn)(struct device *, void *));
+struct device * bus_find_device(struct bus_type *bus, struct device *start,
+ void *data, int (*match)(struct device *, void *));
+
+int __must_check bus_for_each_drv(struct bus_type *bus,
+ struct device_driver *start, void *data,
+ int (*fn)(struct device_driver *, void *));
+
+/* driverfs interface for exporting bus attributes */
+
+struct bus_attribute {
+#ifndef XEN
+ struct attribute attr;
+#endif
+ ssize_t (*show)(struct bus_type *, char * buf);
+ ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
+};
+
+#define BUS_ATTR(_name,_mode,_show,_store) \
+struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check bus_create_file(struct bus_type *,
+ struct bus_attribute *);
+extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+
+struct device_driver {
+ const char * name;
+ struct bus_type * bus;
+
+ struct completion unloaded;
+ struct kobject kobj;
+ struct klist klist_devices;
+ struct klist_node knode_bus;
+
+ struct module * owner;
+
+ int (*probe) (struct device * dev);
+ int (*remove) (struct device * dev);
+ void (*shutdown) (struct device * dev);
+ int (*suspend) (struct device * dev, pm_message_t state);
+ int (*resume) (struct device * dev);
+
+ unsigned int multithread_probe:1;
+};
+
+
+extern int __must_check driver_register(struct device_driver * drv);
+extern void driver_unregister(struct device_driver * drv);
+
+extern struct device_driver * get_driver(struct device_driver * drv);
+extern void put_driver(struct device_driver * drv);
+extern struct device_driver *driver_find(const char *name, struct bus_type *bus);
+extern int driver_probe_done(void);
+
+/* driverfs interface for exporting driver attributes */
+
+struct driver_attribute {
+#ifndef XEN
+ struct attribute attr;
+#endif
+ ssize_t (*show)(struct device_driver *, char * buf);
+ ssize_t (*store)(struct device_driver *, const char * buf, size_t count);
+};
+
+#define DRIVER_ATTR(_name,_mode,_show,_store) \
+struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check driver_create_file(struct device_driver *,
+ struct driver_attribute *);
+extern void driver_remove_file(struct device_driver *, struct driver_attribute *);
+
+extern int __must_check driver_for_each_device(struct device_driver * drv,
+ struct device *start, void *data,
+ int (*fn)(struct device *, void *));
+struct device * driver_find_device(struct device_driver *drv,
+ struct device *start, void *data,
+ int (*match)(struct device *, void *));
+
+/*
+ * device classes
+ */
+struct class {
+ const char * name;
+ struct module * owner;
+
+ struct subsystem subsys;
+ struct list_head children;
+ struct list_head devices;
+ struct list_head interfaces;
+#ifdef XEN
+ spinlock_t sem;
+#else
+ struct semaphore sem; /* locks both the children and interfaces lists */
+#endif
+
+ struct kobject *virtual_dir;
+
+ struct class_attribute * class_attrs;
+ struct class_device_attribute * class_dev_attrs;
+ struct device_attribute * dev_attrs;
+
+ int (*uevent)(struct class_device *dev, char **envp,
+ int num_envp, char *buffer, int buffer_size);
+ int (*dev_uevent)(struct device *dev, char **envp, int num_envp,
+ char *buffer, int buffer_size);
+
+ void (*release)(struct class_device *dev);
+ void (*class_release)(struct class *class);
+ void (*dev_release)(struct device *dev);
+
+ int (*suspend)(struct device *, pm_message_t state);
+ int (*resume)(struct device *);
+};
+
+extern int __must_check class_register(struct class *);
+extern void class_unregister(struct class *);
+
+
+struct class_attribute {
+#ifndef XEN
+ struct attribute attr;
+#endif
+ ssize_t (*show)(struct class *, char * buf);
+ ssize_t (*store)(struct class *, const char * buf, size_t count);
+};
+
+#define CLASS_ATTR(_name,_mode,_show,_store) \
+struct class_attribute class_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check class_create_file(struct class *,
+ const struct class_attribute *);
+extern void class_remove_file(struct class *, const struct class_attribute *);
+
+struct class_device_attribute {
+#ifndef XEN
+ struct attribute attr;
+#endif
+ ssize_t (*show)(struct class_device *, char * buf);
+ ssize_t (*store)(struct class_device *, const char * buf, size_t count);
+};
+
+#define CLASS_DEVICE_ATTR(_name,_mode,_show,_store) \
+struct class_device_attribute class_device_attr_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check class_device_create_file(struct class_device *,
+ const struct class_device_attribute *);
+
+/**
+ * struct class_device - class devices
+ * @class: pointer to the parent class for this class device. This is required.
+ * @devt: for internal use by the driver core only.
+ * @node: for internal use by the driver core only.
+ * @kobj: for internal use by the driver core only.
+ * @devt_attr: for internal use by the driver core only.
+ * @groups: optional additional groups to be created
+ * @dev: if set, a symlink to the struct device is created in the sysfs
+ * directory for this struct class device.
+ * @class_data: pointer to whatever you want to store here for this struct
+ * class_device. Use class_get_devdata() and class_set_devdata() to get and
+ * set this pointer.
+ * @parent: pointer to a struct class_device that is the parent of this struct
+ * class_device. If NULL, this class_device will show up at the root of the
+ * struct class in sysfs (which is probably what you want to have happen.)
+ * @release: pointer to a release function for this struct class_device. If
+ * set, this will be called instead of the class specific release function.
+ * Only use this if you want to override the default release function, like
+ * when you are nesting class_device structures.
+ * @uevent: pointer to a uevent function for this struct class_device. If
+ * set, this will be called instead of the class specific uevent function.
+ * Only use this if you want to override the default uevent function, like
+ * when you are nesting class_device structures.
+ */
+struct class_device {
+ struct list_head node;
+
+ struct kobject kobj;
+ struct class * class; /* required */
+ dev_t devt; /* dev_t, creates the sysfs "dev" */
+ struct class_device_attribute *devt_attr;
+ struct class_device_attribute uevent_attr;
+ struct device * dev; /* not necessary, but nice to have */
+ void * class_data; /* class-specific data */
+ struct class_device *parent; /* parent of this child device, if there is one */
+ struct attribute_group ** groups; /* optional groups */
+
+ void (*release)(struct class_device *dev);
+ int (*uevent)(struct class_device *dev, char **envp,
+ int num_envp, char *buffer, int buffer_size);
+ char class_id[BUS_ID_SIZE]; /* unique to this class */
+};
+
+static inline void *
+class_get_devdata (struct class_device *dev)
+{
+ return dev->class_data;
+}
+
+static inline void
+class_set_devdata (struct class_device *dev, void *data)
+{
+ dev->class_data = data;
+}
+
+
+extern int __must_check class_device_register(struct class_device *);
+extern void class_device_unregister(struct class_device *);
+extern void class_device_initialize(struct class_device *);
+extern int __must_check class_device_add(struct class_device *);
+extern void class_device_del(struct class_device *);
+
+extern int class_device_rename(struct class_device *, char *);
+
+extern struct class_device * class_device_get(struct class_device *);
+extern void class_device_put(struct class_device *);
+
+extern void class_device_remove_file(struct class_device *,
+ const struct class_device_attribute *);
+extern int __must_check class_device_create_bin_file(struct class_device *,
+ struct bin_attribute *);
+extern void class_device_remove_bin_file(struct class_device *,
+ struct bin_attribute *);
+
+struct class_interface {
+ struct list_head node;
+ struct class *class;
+
+ int (*add) (struct class_device *, struct class_interface *);
+ void (*remove) (struct class_device *, struct class_interface *);
+ int (*add_dev) (struct device *, struct class_interface *);
+ void (*remove_dev) (struct device *, struct class_interface *);
+};
+
+extern int __must_check class_interface_register(struct class_interface *);
+extern void class_interface_unregister(struct class_interface *);
+
+extern struct class *class_create(struct module *owner, const char *name);
+extern void class_destroy(struct class *cls);
+extern struct class_device *class_device_create(struct class *cls,
+ struct class_device *parent,
+ dev_t devt,
+ struct device *device,
+ const char *fmt, ...)
+ __attribute__((format(printf,5,6)));
+extern void class_device_destroy(struct class *cls, dev_t devt);
+
+/* interface for exporting device attributes */
+struct device_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+};
+
+#define DEVICE_ATTR(_name,_mode,_show,_store) \
+struct device_attribute dev_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check device_create_file(struct device *device,
+ struct device_attribute * entry);
+extern void device_remove_file(struct device * dev, struct device_attribute * attr);
+extern int __must_check device_create_bin_file(struct device *dev,
+ struct bin_attribute *attr);
+extern void device_remove_bin_file(struct device *dev,
+ struct bin_attribute *attr);
+struct device {
+ struct klist klist_children;
+ struct klist_node knode_parent; /* node in sibling list */
+ struct klist_node knode_driver;
+ struct klist_node knode_bus;
+ struct device * parent;
+
+ struct kobject kobj;
+ char bus_id[BUS_ID_SIZE]; /* position on parent bus */
+ unsigned is_registered:1;
+ struct device_attribute uevent_attr;
+ struct device_attribute *devt_attr;
+
+#ifdef XEN
+ spinlock_t sem;
+#else
+ struct semaphore sem; /* semaphore to synchronize calls to
+ * its driver.
+ */
+#endif
+
+ struct bus_type * bus; /* type of bus device is on */
+ struct device_driver *driver; /* which driver has allocated this
+ device */
+ void *driver_data; /* data private to the driver */
+ void *platform_data; /* Platform specific data, device
+ core doesn't touch it */
+ void *firmware_data; /* Firmware specific data (e.g. ACPI,
+ BIOS data),reserved for device core*/
+ struct dev_pm_info power;
+
+ u64 *dma_mask; /* dma mask (if dma'able device) */
+ u64 coherent_dma_mask;/* Like dma_mask, but for
+ alloc_coherent mappings as
+ not all hardware supports
+ 64 bit addresses for consistent
+ allocations such descriptors. */
+
+ struct list_head dma_pools; /* dma pools (if dma'ble) */
+
+ struct dma_coherent_mem *dma_mem; /* internal for coherent mem
+ override */
+
+ /* class_device migration path */
+ struct list_head node;
+ struct class *class; /* optional*/
+ dev_t devt; /* dev_t, creates the sysfs "dev" */
+ struct attribute_group **groups; /* optional groups */
+
+ void (*release)(struct device * dev);
+};
+
+static inline void *
+dev_get_drvdata (struct device *dev)
+{
+ return dev->driver_data;
+}
+
+static inline void
+dev_set_drvdata (struct device *dev, void *data)
+{
+ dev->driver_data = data;
+}
+
+static inline int device_is_registered(struct device *dev)
+{
+ return dev->is_registered;
+}
+
+/*
+ * High level routines for use by the bus drivers
+ */
+extern int __must_check device_register(struct device * dev);
+extern void device_unregister(struct device * dev);
+extern void device_initialize(struct device * dev);
+extern int __must_check device_add(struct device * dev);
+extern void device_del(struct device * dev);
+extern int device_for_each_child(struct device *, void *,
+ int (*fn)(struct device *, void *));
+extern int device_rename(struct device *dev, char *new_name);
+
+/*
+ * Manual binding of a device to driver. See drivers/base/bus.c
+ * for information on use.
+ */
+extern int __must_check device_bind_driver(struct device *dev);
+extern void device_release_driver(struct device * dev);
+extern int __must_check device_attach(struct device * dev);
+extern int __must_check driver_attach(struct device_driver *drv);
+extern int __must_check device_reprobe(struct device *dev);
+
+/*
+ * Easy functions for dynamically creating devices on the fly
+ */
+extern struct device *device_create(struct class *cls, struct device *parent,
+ dev_t devt, const char *fmt, ...)
+ __attribute__((format(printf,4,5)));
+extern void device_destroy(struct class *cls, dev_t devt);
+
+extern int virtual_device_parent(struct device *dev);
+
+/*
+ * Platform "fixup" functions - allow the platform to have their say
+ * about devices and actions that the general device layer doesn't
+ * know about.
+ */
+/* Notify platform of device discovery */
+extern int (*platform_notify)(struct device * dev);
+
+extern int (*platform_notify_remove)(struct device * dev);
+
+
+/**
+ * get_device - atomically increment the reference count for the device.
+ *
+ */
+extern struct device * get_device(struct device * dev);
+extern void put_device(struct device * dev);
+
+
+/* drivers/base/power/shutdown.c */
+extern void device_shutdown(void);
+
+
+/* drivers/base/firmware.c */
+extern int __must_check firmware_register(struct subsystem *);
+extern void firmware_unregister(struct subsystem *);
+
+/* debugging and troubleshooting/diagnostic helpers. */
+extern const char *dev_driver_string(struct device *dev);
+#define dev_printk(level, dev, format, arg...) \
+ printk(level "%s %s: " format , dev_driver_string(dev) , (dev)->bus_id , ## arg)
+
+#ifdef DEBUG
+#define dev_dbg(dev, format, arg...) \
+ dev_printk(KERN_DEBUG , dev , format , ## arg)
+#else
+#define dev_dbg(dev, format, arg...) do { (void)(dev); } while (0)
+#endif
+
+#define dev_err(dev, format, arg...) \
+ dev_printk(KERN_ERR , dev , format , ## arg)
+#define dev_info(dev, format, arg...) \
+ dev_printk(KERN_INFO , dev , format , ## arg)
+#define dev_warn(dev, format, arg...) \
+ dev_printk(KERN_WARNING , dev , format , ## arg)
+#define dev_notice(dev, format, arg...) \
+ dev_printk(KERN_NOTICE , dev , format , ## arg)
+
+/* Create alias, so I can be autoloaded. */
+#define MODULE_ALIAS_CHARDEV(major,minor) \
+ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
+#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
+ MODULE_ALIAS("char-major-" __stringify(major) "-*")
+#endif /* _DEVICE_H_ */
diff --git a/xen/include/asm-ia64/linux-xen/linux/kobject.h b/xen/include/asm-ia64/linux-xen/linux/kobject.h
new file mode 100644
index 0000000000..d072d2a90c
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/linux/kobject.h
@@ -0,0 +1,286 @@
+/*
+ * kobject.h - generic kernel object infrastructure.
+ *
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ *
+ * This file is released under the GPLv2.
+ *
+ *
+ * Please read Documentation/kobject.txt before using the kobject
+ * interface, ESPECIALLY the parts about reference counts and object
+ * destructors.
+ */
+
+#ifndef _KOBJECT_H_
+#define _KOBJECT_H_
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/sysfs.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/kref.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+#define KOBJ_NAME_LEN 20
+#define UEVENT_HELPER_PATH_LEN 256
+
+/* path to the userspace helper executed on an event */
+extern char uevent_helper[];
+
+/* counter to tag the uevent, read only except for the kobject core */
+extern u64 uevent_seqnum;
+
+/* the actions here must match the proper string in lib/kobject_uevent.c */
+typedef int __bitwise kobject_action_t;
+enum kobject_action {
+ KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */
+ KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */
+ KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */
+ KOBJ_MOUNT = (__force kobject_action_t) 0x04, /* mount event for block devices (broken) */
+ KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
+ KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
+ KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
+};
+
+struct kobject {
+ const char * k_name;
+ char name[KOBJ_NAME_LEN];
+ struct kref kref;
+ struct list_head entry;
+ struct kobject * parent;
+ struct kset * kset;
+ struct kobj_type * ktype;
+ struct dentry * dentry;
+ wait_queue_head_t poll;
+};
+
+extern int kobject_set_name(struct kobject *, const char *, ...)
+ __attribute__((format(printf,2,3)));
+
+static inline const char * kobject_name(const struct kobject * kobj)
+{
+ return kobj->k_name;
+}
+
+extern void kobject_init(struct kobject *);
+extern void kobject_cleanup(struct kobject *);
+
+extern int __must_check kobject_add(struct kobject *);
+extern void kobject_del(struct kobject *);
+
+extern int __must_check kobject_rename(struct kobject *, const char *new_name);
+
+extern int __must_check kobject_register(struct kobject *);
+extern void kobject_unregister(struct kobject *);
+
+extern struct kobject * kobject_get(struct kobject *);
+extern void kobject_put(struct kobject *);
+
+extern struct kobject *kobject_add_dir(struct kobject *, const char *);
+
+extern char * kobject_get_path(struct kobject *, gfp_t);
+
+struct kobj_type {
+ void (*release)(struct kobject *);
+ struct sysfs_ops * sysfs_ops;
+ struct attribute ** default_attrs;
+};
+
+
+/**
+ * kset - a set of kobjects of a specific type, belonging
+ * to a specific subsystem.
+ *
+ * All kobjects of a kset should be embedded in an identical
+ * type. This type may have a descriptor, which the kset points
+ * to. This allows there to exist sets of objects of the same
+ * type in different subsystems.
+ *
+ * A subsystem does not have to be a list of only one type
+ * of object; multiple ksets can belong to one subsystem. All
+ * ksets of a subsystem share the subsystem's lock.
+ *
+ * Each kset can support specific event variables; it can
+ * supress the event generation or add subsystem specific
+ * variables carried with the event.
+ */
+struct kset_uevent_ops {
+ int (*filter)(struct kset *kset, struct kobject *kobj);
+ const char *(*name)(struct kset *kset, struct kobject *kobj);
+ int (*uevent)(struct kset *kset, struct kobject *kobj, char **envp,
+ int num_envp, char *buffer, int buffer_size);
+};
+
+struct kset {
+ struct subsystem * subsys;
+ struct kobj_type * ktype;
+ struct list_head list;
+ spinlock_t list_lock;
+ struct kobject kobj;
+ struct kset_uevent_ops * uevent_ops;
+};
+
+
+extern void kset_init(struct kset * k);
+extern int __must_check kset_add(struct kset * k);
+extern int __must_check kset_register(struct kset * k);
+extern void kset_unregister(struct kset * k);
+
+static inline struct kset * to_kset(struct kobject * kobj)
+{
+ return kobj ? container_of(kobj,struct kset,kobj) : NULL;
+}
+
+static inline struct kset * kset_get(struct kset * k)
+{
+ return k ? to_kset(kobject_get(&k->kobj)) : NULL;
+}
+
+static inline void kset_put(struct kset * k)
+{
+ kobject_put(&k->kobj);
+}
+
+static inline struct kobj_type * get_ktype(struct kobject * k)
+{
+ if (k->kset && k->kset->ktype)
+ return k->kset->ktype;
+ else
+ return k->ktype;
+}
+
+extern struct kobject * kset_find_obj(struct kset *, const char *);
+
+
+/**
+ * Use this when initializing an embedded kset with no other
+ * fields to initialize.
+ */
+#define set_kset_name(str) .kset = { .kobj = { .name = str } }
+
+
+
+struct subsystem {
+ struct kset kset;
+#ifndef XEN
+ struct rw_semaphore rwsem;
+#endif
+};
+
+#define decl_subsys(_name,_type,_uevent_ops) \
+struct subsystem _name##_subsys = { \
+ .kset = { \
+ .kobj = { .name = __stringify(_name) }, \
+ .ktype = _type, \
+ .uevent_ops =_uevent_ops, \
+ } \
+}
+#define decl_subsys_name(_varname,_name,_type,_uevent_ops) \
+struct subsystem _varname##_subsys = { \
+ .kset = { \
+ .kobj = { .name = __stringify(_name) }, \
+ .ktype = _type, \
+ .uevent_ops =_uevent_ops, \
+ } \
+}
+
+/* The global /sys/kernel/ subsystem for people to chain off of */
+extern struct subsystem kernel_subsys;
+/* The global /sys/hypervisor/ subsystem */
+extern struct subsystem hypervisor_subsys;
+
+/**
+ * Helpers for setting the kset of registered objects.
+ * Often, a registered object belongs to a kset embedded in a
+ * subsystem. These do no magic, just make the resulting code
+ * easier to follow.
+ */
+
+/**
+ * kobj_set_kset_s(obj,subsys) - set kset for embedded kobject.
+ * @obj: ptr to some object type.
+ * @subsys: a subsystem object (not a ptr).
+ *
+ * Can be used for any object type with an embedded ->kobj.
+ */
+
+#define kobj_set_kset_s(obj,subsys) \
+ (obj)->kobj.kset = &(subsys).kset
+
+/**
+ * kset_set_kset_s(obj,subsys) - set kset for embedded kset.
+ * @obj: ptr to some object type.
+ * @subsys: a subsystem object (not a ptr).
+ *
+ * Can be used for any object type with an embedded ->kset.
+ * Sets the kset of @obj's embedded kobject (via its embedded
+ * kset) to @subsys.kset. This makes @obj a member of that
+ * kset.
+ */
+
+#define kset_set_kset_s(obj,subsys) \
+ (obj)->kset.kobj.kset = &(subsys).kset
+
+/**
+ * subsys_set_kset(obj,subsys) - set kset for subsystem
+ * @obj: ptr to some object type.
+ * @subsys: a subsystem object (not a ptr).
+ *
+ * Can be used for any object type with an embedded ->subsys.
+ * Sets the kset of @obj's kobject to @subsys.kset. This makes
+ * the object a member of that kset.
+ */
+
+#define subsys_set_kset(obj,_subsys) \
+ (obj)->subsys.kset.kobj.kset = &(_subsys).kset
+
+extern void subsystem_init(struct subsystem *);
+extern int __must_check subsystem_register(struct subsystem *);
+extern void subsystem_unregister(struct subsystem *);
+
+static inline struct subsystem * subsys_get(struct subsystem * s)
+{
+ return s ? container_of(kset_get(&s->kset),struct subsystem,kset) : NULL;
+}
+
+static inline void subsys_put(struct subsystem * s)
+{
+ kset_put(&s->kset);
+}
+
+struct subsys_attribute {
+#ifndef XEN
+ struct attribute attr;
+#endif
+ ssize_t (*show)(struct subsystem *, char *);
+ ssize_t (*store)(struct subsystem *, const char *, size_t);
+};
+
+extern int __must_check subsys_create_file(struct subsystem * ,
+ struct subsys_attribute *);
+
+#if defined(CONFIG_HOTPLUG)
+void kobject_uevent(struct kobject *kobj, enum kobject_action action);
+
+int add_uevent_var(char **envp, int num_envp, int *cur_index,
+ char *buffer, int buffer_size, int *cur_len,
+ const char *format, ...)
+ __attribute__((format (printf, 7, 8)));
+#else
+static inline void kobject_uevent(struct kobject *kobj, enum kobject_action action) { }
+
+static inline int add_uevent_var(char **envp, int num_envp, int *cur_index,
+ char *buffer, int buffer_size, int *cur_len,
+ const char *format, ...)
+{ return 0; }
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _KOBJECT_H_ */
diff --git a/xen/include/asm-ia64/linux-xen/linux/pci.h b/xen/include/asm-ia64/linux-xen/linux/pci.h
new file mode 100644
index 0000000000..118201a9a7
--- /dev/null
+++ b/xen/include/asm-ia64/linux-xen/linux/pci.h
@@ -0,0 +1,820 @@
+/*
+ * pci.h
+ *
+ * PCI defines and function prototypes
+ * Copyright 1994, Drew Eckhardt
+ * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
+ *
+ * For more information, please consult the following manuals (look at
+ * http://www.pcisig.com/ for how to get them):
+ *
+ * PCI BIOS Specification
+ * PCI Local Bus Specification
+ * PCI to PCI Bridge Specification
+ * PCI System Design Guide
+ */
+
+#ifndef LINUX_PCI_H
+#define LINUX_PCI_H
+
+/* Include the pci register defines */
+#include <linux/pci_regs.h>
+
+/* Include the ID list */
+#include <linux/pci_ids.h>
+#ifdef XEN
+#include <asm/processor.h>
+#endif
+
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:3 = slot
+ * 2:0 = function
+ */
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn) ((devfn) & 0x07)
+
+/* Ioctls for /proc/bus/pci/X/Y nodes. */
+#define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8)
+#define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */
+#define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */
+#define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */
+#define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */
+
+#ifdef __KERNEL__
+
+#include <linux/mod_devicetable.h>
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+
+/* File state for mmap()s on /proc/bus/pci/X/Y */
+enum pci_mmap_state {
+ pci_mmap_io,
+ pci_mmap_mem
+};
+
+/* This defines the direction arg to the DMA mapping routines. */
+#define PCI_DMA_BIDIRECTIONAL 0
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#define PCI_DMA_NONE 3
+
+#define DEVICE_COUNT_COMPATIBLE 4
+#define DEVICE_COUNT_RESOURCE 12
+
+typedef int __bitwise pci_power_t;
+
+#define PCI_D0 ((pci_power_t __force) 0)
+#define PCI_D1 ((pci_power_t __force) 1)
+#define PCI_D2 ((pci_power_t __force) 2)
+#define PCI_D3hot ((pci_power_t __force) 3)
+#define PCI_D3cold ((pci_power_t __force) 4)
+#define PCI_UNKNOWN ((pci_power_t __force) 5)
+#define PCI_POWER_ERROR ((pci_power_t __force) -1)
+
+/** The pci_channel state describes connectivity between the CPU and
+ * the pci device. If some PCI bus between here and the pci device
+ * has crashed or locked up, this info is reflected here.
+ */
+typedef unsigned int __bitwise pci_channel_state_t;
+
+enum pci_channel_state {
+ /* I/O channel is in normal state */
+ pci_channel_io_normal = (__force pci_channel_state_t) 1,
+
+ /* I/O to channel is blocked */
+ pci_channel_io_frozen = (__force pci_channel_state_t) 2,
+
+ /* PCI card is dead */
+ pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
+};
+
+typedef unsigned short __bitwise pci_bus_flags_t;
+enum pci_bus_flags {
+ PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
+};
+
+struct pci_cap_saved_state {
+ struct hlist_node next;
+ char cap_nr;
+ u32 data[0];
+};
+
+/*
+ * The pci_dev structure is used to describe PCI devices.
+ */
+struct pci_dev {
+ struct list_head global_list; /* node in list of all PCI devices */
+ struct list_head bus_list; /* node in per-bus list */
+ struct pci_bus *bus; /* bus this device is on */
+ struct pci_bus *subordinate; /* bus this device bridges to */
+
+ void *sysdata; /* hook for sys-specific extension */
+ struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
+
+ unsigned int devfn; /* encoded device & function index */
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short subsystem_vendor;
+ unsigned short subsystem_device;
+ unsigned int class; /* 3 bytes: (base,sub,prog-if) */
+ u8 hdr_type; /* PCI header type (`multi' flag masked out) */
+ u8 rom_base_reg; /* which config register controls the ROM */
+ u8 pin; /* which interrupt pin this device uses */
+
+ struct pci_driver *driver; /* which driver has allocated this device */
+ u64 dma_mask; /* Mask of the bits of bus address this
+ device implements. Normally this is
+ 0xffffffff. You only need to change
+ this if your device has broken DMA
+ or supports 64-bit transfers. */
+
+ pci_power_t current_state; /* Current operating state. In ACPI-speak,
+ this is D0-D3, D0 being fully functional,
+ and D3 being off. */
+
+ pci_channel_state_t error_state; /* current connectivity state */
+ struct device dev; /* Generic device interface */
+
+ /* device is compatible with these IDs */
+ unsigned short vendor_compatible[DEVICE_COUNT_COMPATIBLE];
+ unsigned short device_compatible[DEVICE_COUNT_COMPATIBLE];
+
+ int cfg_size; /* Size of configuration space */
+
+ /*
+ * Instead of touching interrupt line and base address registers
+ * directly, use the values stored here. They might be different!
+ */
+ unsigned int irq;
+ struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+
+ /* These fields are used by common fixups */
+ unsigned int transparent:1; /* Transparent PCI bridge */
+ unsigned int multifunction:1;/* Part of multi-function device */
+ /* keep track of device state */
+ unsigned int is_enabled:1; /* pci_enable_device has been called */
+ unsigned int is_busmaster:1; /* device is busmaster */
+ unsigned int no_msi:1; /* device may not use msi */
+ unsigned int no_d1d2:1; /* only allow d0 or d3 */
+ unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
+ unsigned int broken_parity_status:1; /* Device generates false positive parity */
+ unsigned int msi_enabled:1;
+ unsigned int msix_enabled:1;
+
+ u32 saved_config_space[16]; /* config space saved at suspend time */
+ struct hlist_head saved_cap_space;
+ struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
+ int rom_attr_enabled; /* has display of the rom attribute been enabled? */
+ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
+};
+
+#define pci_dev_g(n) list_entry(n, struct pci_dev, global_list)
+#define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list)
+#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
+#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
+
+static inline struct pci_cap_saved_state *pci_find_saved_cap(
+ struct pci_dev *pci_dev,char cap)
+{
+ struct pci_cap_saved_state *tmp;
+ struct hlist_node *pos;
+
+ hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
+ if (tmp->cap_nr == cap)
+ return tmp;
+ }
+ return NULL;
+}
+
+static inline void pci_add_saved_cap(struct pci_dev *pci_dev,
+ struct pci_cap_saved_state *new_cap)
+{
+ hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
+}
+
+static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap)
+{
+ hlist_del(&cap->next);
+}
+
+/*
+ * For PCI devices, the region numbers are assigned this way:
+ *
+ * 0-5 standard PCI regions
+ * 6 expansion ROM
+ * 7-10 bridges: address space assigned to buses behind the bridge
+ */
+
+#define PCI_ROM_RESOURCE 6
+#define PCI_BRIDGE_RESOURCES 7
+#define PCI_NUM_RESOURCES 11
+
+#ifndef PCI_BUS_NUM_RESOURCES
+#define PCI_BUS_NUM_RESOURCES 8
+#endif
+
+#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
+
+struct pci_bus {
+ struct list_head node; /* node in list of buses */
+ struct pci_bus *parent; /* parent bus this bridge is on */
+ struct list_head children; /* list of child buses */
+ struct list_head devices; /* list of devices on this bus */
+ struct pci_dev *self; /* bridge device as seen by parent */
+ struct resource *resource[PCI_BUS_NUM_RESOURCES];
+ /* address space routed to this bus */
+
+ struct pci_ops *ops; /* configuration access functions */
+ void *sysdata; /* hook for sys-specific extension */
+ struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
+
+ unsigned char number; /* bus number */
+ unsigned char primary; /* number of primary bridge */
+ unsigned char secondary; /* number of secondary bridge */
+ unsigned char subordinate; /* max number of subordinate buses */
+
+ char name[48];
+
+ unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
+ pci_bus_flags_t bus_flags; /* Inherited by child busses */
+ struct device *bridge;
+ struct class_device class_dev;
+ struct bin_attribute *legacy_io; /* legacy I/O for this bus */
+ struct bin_attribute *legacy_mem; /* legacy mem */
+};
+
+#define pci_bus_b(n) list_entry(n, struct pci_bus, node)
+#define to_pci_bus(n) container_of(n, struct pci_bus, class_dev)
+
+/*
+ * Error values that may be returned by PCI functions.
+ */
+#define PCIBIOS_SUCCESSFUL 0x00
+#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
+#define PCIBIOS_BAD_VENDOR_ID 0x83
+#define PCIBIOS_DEVICE_NOT_FOUND 0x86
+#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
+#define PCIBIOS_SET_FAILED 0x88
+#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+
+/* Low-level architecture-dependent routines */
+
+struct pci_ops {
+ int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
+ int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
+};
+
+struct pci_raw_ops {
+ int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val);
+ int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 val);
+};
+
+extern struct pci_raw_ops *raw_pci_ops;
+
+struct pci_bus_region {
+ unsigned long start;
+ unsigned long end;
+};
+
+struct pci_dynids {
+ spinlock_t lock; /* protects list, index */
+ struct list_head list; /* for IDs added at runtime */
+ unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
+};
+
+/* ---------------------------------------------------------------- */
+/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
+ * a set fof callbacks in struct pci_error_handlers, then that device driver
+ * will be notified of PCI bus errors, and will be driven to recovery
+ * when an error occurs.
+ */
+
+typedef unsigned int __bitwise pci_ers_result_t;
+
+enum pci_ers_result {
+ /* no result/none/not supported in device driver */
+ PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
+
+ /* Device driver can recover without slot reset */
+ PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
+
+ /* Device driver wants slot to be reset. */
+ PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
+
+ /* Device has completely failed, is unrecoverable */
+ PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
+
+ /* Device driver is fully recovered and operational */
+ PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
+};
+
+/* PCI bus error event callbacks */
+struct pci_error_handlers
+{
+ /* PCI bus error detected on this device */
+ pci_ers_result_t (*error_detected)(struct pci_dev *dev,
+ enum pci_channel_state error);
+
+ /* MMIO has been re-enabled, but not DMA */
+ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
+
+ /* PCI Express link has been reset */
+ pci_ers_result_t (*link_reset)(struct pci_dev *dev);
+
+ /* PCI slot has been reset */
+ pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
+
+ /* Device driver may resume normal operations */
+ void (*resume)(struct pci_dev *dev);
+};
+
+/* ---------------------------------------------------------------- */
+
+struct module;
+struct pci_driver {
+ struct list_head node;
+ char *name;
+ const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
+ int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
+ void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
+ int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
+ int (*suspend_late) (struct pci_dev *dev, pm_message_t state);
+ int (*resume_early) (struct pci_dev *dev);
+ int (*resume) (struct pci_dev *dev); /* Device woken up */
+ int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); /* Enable wake event */
+ void (*shutdown) (struct pci_dev *dev);
+
+ struct pci_error_handlers *err_handler;
+ struct device_driver driver;
+ struct pci_dynids dynids;
+
+ int multithread_probe;
+};
+
+#define to_pci_driver(drv) container_of(drv,struct pci_driver, driver)
+
+/**
+ * PCI_DEVICE - macro used to describe a specific pci device
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID.
+ */
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+/**
+ * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
+ * @dev_class: the class, subclass, prog-if triple for this device
+ * @dev_class_mask: the class mask for this device
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific PCI class. The vendor, device, subvendor, and subdevice
+ * fields will be set to PCI_ANY_ID.
+ */
+#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
+ .class = (dev_class), .class_mask = (dev_class_mask), \
+ .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+/*
+ * pci_module_init is obsolete, this stays here till we fix up all usages of it
+ * in the tree.
+ */
+#define pci_module_init pci_register_driver
+
+/* these external functions are only available when PCI support is enabled */
+#ifdef CONFIG_PCI
+
+extern struct bus_type pci_bus_type;
+
+/* Do NOT directly access these two variables, unless you are arch specific pci
+ * code, or pci core code. */
+extern struct list_head pci_root_buses; /* list of all known PCI buses */
+extern struct list_head pci_devices; /* list of all devices */
+
+void pcibios_fixup_bus(struct pci_bus *);
+int __must_check pcibios_enable_device(struct pci_dev *, int mask);
+char *pcibios_setup (char *str);
+
+/* Used only when drivers/pci/setup.c is used */
+void pcibios_align_resource(void *, struct resource *, resource_size_t,
+ resource_size_t);
+void pcibios_update_irq(struct pci_dev *, int irq);
+
+/* Generic PCI functions used internally */
+
+extern struct pci_bus *pci_find_bus(int domain, int busnr);
+void pci_bus_add_devices(struct pci_bus *bus);
+struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata);
+static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata)
+{
+ struct pci_bus *root_bus;
+ root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata);
+ if (root_bus)
+ pci_bus_add_devices(root_bus);
+ return root_bus;
+}
+struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata);
+struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr);
+int pci_scan_slot(struct pci_bus *bus, int devfn);
+struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn);
+void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
+unsigned int pci_scan_child_bus(struct pci_bus *bus);
+int __must_check pci_bus_add_device(struct pci_dev *dev);
+void pci_read_bridge_bases(struct pci_bus *child);
+struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res);
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
+extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
+extern void pci_dev_put(struct pci_dev *dev);
+extern void pci_remove_bus(struct pci_bus *b);
+extern void pci_remove_bus_device(struct pci_dev *dev);
+extern void pci_stop_bus_device(struct pci_dev *dev);
+void pci_setup_cardbus(struct pci_bus *bus);
+extern void pci_sort_breadthfirst(void);
+
+/* Generic PCI functions exported to card drivers */
+
+struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, const struct pci_dev *from);
+struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int device, const struct pci_dev *from);
+struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
+int pci_find_capability (struct pci_dev *dev, int cap);
+int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
+int pci_find_ext_capability (struct pci_dev *dev, int cap);
+struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+
+struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
+ struct pci_dev *from);
+struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device,
+ struct pci_dev *from);
+
+struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device,
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from);
+struct pci_dev *pci_get_slot (struct pci_bus *bus, unsigned int devfn);
+struct pci_dev *pci_get_bus_and_slot (unsigned int bus, unsigned int devfn);
+struct pci_dev *pci_get_class (unsigned int class, struct pci_dev *from);
+int pci_dev_present(const struct pci_device_id *ids);
+
+int pci_bus_read_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 *val);
+int pci_bus_read_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 *val);
+int pci_bus_read_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 *val);
+int pci_bus_write_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 val);
+int pci_bus_write_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 val);
+int pci_bus_write_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 val);
+
+static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
+{
+ return pci_bus_read_config_byte (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val)
+{
+ return pci_bus_read_config_word (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_read_config_dword(struct pci_dev *dev, int where, u32 *val)
+{
+ return pci_bus_read_config_dword (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val)
+{
+ return pci_bus_write_config_byte (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val)
+{
+ return pci_bus_write_config_word (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val)
+{
+ return pci_bus_write_config_dword (dev->bus, dev->devfn, where, val);
+}
+
+int __must_check pci_enable_device(struct pci_dev *dev);
+int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask);
+void pci_disable_device(struct pci_dev *dev);
+void pci_set_master(struct pci_dev *dev);
+#define HAVE_PCI_SET_MWI
+int __must_check pci_set_mwi(struct pci_dev *dev);
+void pci_clear_mwi(struct pci_dev *dev);
+void pci_intx(struct pci_dev *dev, int enable);
+int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
+int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
+void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
+int __must_check pci_assign_resource(struct pci_dev *dev, int i);
+int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i);
+void pci_restore_bars(struct pci_dev *dev);
+
+/* ROM control related routines */
+void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
+void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t *size);
+void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
+void pci_remove_rom(struct pci_dev *pdev);
+
+/* Power management related routines */
+int pci_save_state(struct pci_dev *dev);
+int pci_restore_state(struct pci_dev *dev);
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+
+/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
+void pci_bus_assign_resources(struct pci_bus *bus);
+void pci_bus_size_bridges(struct pci_bus *bus);
+int pci_claim_resource(struct pci_dev *, int);
+void pci_assign_unassigned_resources(void);
+void pdev_enable_device(struct pci_dev *);
+void pdev_sort_resources(struct pci_dev *, struct resource_list *);
+void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
+ int (*)(struct pci_dev *, u8, u8));
+#define HAVE_PCI_REQ_REGIONS 2
+int __must_check pci_request_regions(struct pci_dev *, const char *);
+void pci_release_regions(struct pci_dev *);
+int __must_check pci_request_region(struct pci_dev *, int, const char *);
+void pci_release_region(struct pci_dev *, int);
+
+/* drivers/pci/bus.c */
+int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
+ struct resource *res, resource_size_t size,
+ resource_size_t align, resource_size_t min,
+ unsigned int type_mask,
+ void (*alignf)(void *, struct resource *,
+ resource_size_t, resource_size_t),
+ void *alignf_data);
+void pci_enable_bridges(struct pci_bus *bus);
+
+/* Proper probing supporting hot-pluggable devices */
+int __must_check __pci_register_driver(struct pci_driver *, struct module *);
+static inline int __must_check pci_register_driver(struct pci_driver *driver)
+{
+ return __pci_register_driver(driver, THIS_MODULE);
+}
+
+void pci_unregister_driver(struct pci_driver *);
+void pci_remove_behind_bridge(struct pci_dev *);
+struct pci_driver *pci_dev_driver(const struct pci_dev *);
+const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev);
+const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev);
+int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass);
+
+void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+ void *userdata);
+int pci_cfg_space_size(struct pci_dev *dev);
+unsigned char pci_bus_max_busnr(struct pci_bus* bus);
+
+/* kmem_cache style wrapper around pci_alloc_consistent() */
+
+#include <linux/dmapool.h>
+
+#define pci_pool dma_pool
+#define pci_pool_create(name, pdev, size, align, allocation) \
+ dma_pool_create(name, &pdev->dev, size, align, allocation)
+#define pci_pool_destroy(pool) dma_pool_destroy(pool)
+#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
+#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
+
+enum pci_dma_burst_strategy {
+ PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
+ strategy_parameter is N/A */
+ PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
+ byte boundaries */
+ PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
+ strategy_parameter byte boundaries */
+};
+
+#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
+extern struct pci_dev *isa_bridge;
+#endif
+
+struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+
+
+#ifndef CONFIG_PCI_MSI
+static inline void pci_scan_msi_device(struct pci_dev *dev) {}
+static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
+static inline void pci_disable_msi(struct pci_dev *dev) {}
+static inline int pci_enable_msix(struct pci_dev* dev,
+ struct msix_entry *entries, int nvec) {return -1;}
+static inline void pci_disable_msix(struct pci_dev *dev) {}
+static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
+#else
+extern void pci_scan_msi_device(struct pci_dev *dev);
+extern int pci_enable_msi(struct pci_dev *dev);
+extern void pci_disable_msi(struct pci_dev *dev);
+extern int pci_enable_msix(struct pci_dev* dev,
+ struct msix_entry *entries, int nvec);
+extern void pci_disable_msix(struct pci_dev *dev);
+extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
+#endif
+
+#ifdef CONFIG_HT_IRQ
+/* The functions a driver should call */
+int ht_create_irq(struct pci_dev *dev, int idx);
+void ht_destroy_irq(unsigned int irq);
+#endif /* CONFIG_HT_IRQ */
+
+extern void pci_block_user_cfg_access(struct pci_dev *dev);
+extern void pci_unblock_user_cfg_access(struct pci_dev *dev);
+
+/*
+ * PCI domain support. Sometimes called PCI segment (eg by ACPI),
+ * a PCI domain is defined to be a set of PCI busses which share
+ * configuration space.
+ */
+#ifndef CONFIG_PCI_DOMAINS
+static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return 0;
+}
+#endif
+
+#else /* CONFIG_PCI is not enabled */
+
+/*
+ * If the system does not have PCI, clearly these return errors. Define
+ * these as simple inline functions to avoid hair in drivers.
+ */
+
+#define _PCI_NOP(o,s,t) \
+ static inline int pci_##o##_config_##s (struct pci_dev *dev, int where, t val) \
+ { return PCIBIOS_FUNC_NOT_SUPPORTED; }
+#define _PCI_NOP_ALL(o,x) _PCI_NOP(o,byte,u8 x) \
+ _PCI_NOP(o,word,u16 x) \
+ _PCI_NOP(o,dword,u32 x)
+_PCI_NOP_ALL(read, *)
+_PCI_NOP_ALL(write,)
+
+static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_device(unsigned int vendor,
+ unsigned int device, struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
+ unsigned int device, struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device,
+unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
+{ return NULL; }
+
+#define pci_dev_present(ids) (0)
+#define pci_dev_put(dev) do { } while (0)
+
+static inline void pci_set_master(struct pci_dev *dev) { }
+static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
+static inline void pci_disable_device(struct pci_dev *dev) { }
+static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; }
+static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;}
+static inline int __pci_register_driver(struct pci_driver *drv, struct module *owner) { return 0;}
+static inline int pci_register_driver(struct pci_driver *drv) { return 0;}
+static inline void pci_unregister_driver(struct pci_driver *drv) { }
+static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; }
+static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; }
+static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; }
+static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
+
+/* Power management related routines */
+static inline int pci_save_state(struct pci_dev *dev) { return 0; }
+static inline int pci_restore_state(struct pci_dev *dev) { return 0; }
+static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return 0; }
+static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; }
+static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; }
+
+#define isa_bridge ((struct pci_dev *)NULL)
+
+#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
+
+static inline void pci_block_user_cfg_access(struct pci_dev *dev) { }
+static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) { }
+
+#endif /* CONFIG_PCI */
+
+/* Include architecture-dependent settings and functions */
+
+#include <asm/pci.h>
+
+/* these helpers provide future and backwards compatibility
+ * for accessing popular PCI BAR info */
+#define pci_resource_start(dev,bar) ((dev)->resource[(bar)].start)
+#define pci_resource_end(dev,bar) ((dev)->resource[(bar)].end)
+#define pci_resource_flags(dev,bar) ((dev)->resource[(bar)].flags)
+#define pci_resource_len(dev,bar) \
+ ((pci_resource_start((dev),(bar)) == 0 && \
+ pci_resource_end((dev),(bar)) == \
+ pci_resource_start((dev),(bar))) ? 0 : \
+ \
+ (pci_resource_end((dev),(bar)) - \
+ pci_resource_start((dev),(bar)) + 1))
+
+/* Similar to the helpers above, these manipulate per-pci_dev
+ * driver-specific data. They are really just a wrapper around
+ * the generic device structure functions of these calls.
+ */
+static inline void *pci_get_drvdata (struct pci_dev *pdev)
+{
+ return dev_get_drvdata(&pdev->dev);
+}
+
+static inline void pci_set_drvdata (struct pci_dev *pdev, void *data)
+{
+ dev_set_drvdata(&pdev->dev, data);
+}
+
+/* If you want to know what to call your pci_dev, ask this function.
+ * Again, it's a wrapper around the generic device.
+ */
+static inline char *pci_name(struct pci_dev *pdev)
+{
+ return pdev->dev.bus_id;
+}
+
+
+/* Some archs don't want to expose struct resource to userland as-is
+ * in sysfs and /proc
+ */
+#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
+static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc, resource_size_t *start,
+ resource_size_t *end)
+{
+ *start = rsrc->start;
+ *end = rsrc->end;
+}
+#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
+
+
+/*
+ * The world is not perfect and supplies us with broken PCI devices.
+ * For at least a part of these bugs we need a work-around, so both
+ * generic (drivers/pci/quirks.c) and per-architecture code can define
+ * fixup hooks to be called for particular buggy devices.
+ */
+
+struct pci_fixup {
+ u16 vendor, device; /* You can use PCI_ANY_ID here of course */
+ void (*hook)(struct pci_dev *dev);
+};
+
+enum pci_fixup_pass {
+ pci_fixup_early, /* Before probing BARs */
+ pci_fixup_header, /* After reading configuration header */
+ pci_fixup_final, /* Final phase of device fixups */
+ pci_fixup_enable, /* pci_enable_device() time */
+};
+
+/* Anonymous variables would be nice... */
+#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \
+ static const struct pci_fixup __pci_fixup_##name __attribute_used__ \
+ __attribute__((__section__(#section))) = { vendor, device, hook };
+#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
+ vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
+ vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
+ vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
+ vendor##device##hook, vendor, device, hook)
+
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
+
+extern int pci_pci_problems;
+#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
+#define PCIPCI_TRITON 2
+#define PCIPCI_NATOMA 4
+#define PCIPCI_VIAETBF 8
+#define PCIPCI_VSFX 16
+#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
+#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
+
+#endif /* __KERNEL__ */
+#endif /* LINUX_PCI_H */
diff --git a/xen/include/asm-ia64/linux/README.origin b/xen/include/asm-ia64/linux/README.origin
index e364b52977..4b1ff51f30 100644
--- a/xen/include/asm-ia64/linux/README.origin
+++ b/xen/include/asm-ia64/linux/README.origin
@@ -24,3 +24,15 @@ time.h -> linux/include/linux/time.h
timex.h -> linux/include/linux/timex.h
topology.h -> linux/include/linux/topology.h
wait.h -> linux/include/linux/wait.h
+
+# The files below are from Linux-2.6.19
+completion.h -> linux/include/linux/completion.h
+ioport.h -> linux/include/linux/ioport.h
+klist.h -> linux/include/linux/klist.h
+kref.h -> linux/include/linux/kref.h
+mod_devicetable.h -> linux/include/linux/mod_devicetable.h
+pci_ids.h -> linux/include/linux/pci_ids.h
+pci_regs.h -> linux/include/linux/pci_regs.h
+pm.h -> linux/include/linux/pm.h
+sysfs.h -> linux/include/linux/sysfs.h
+
diff --git a/xen/include/asm-ia64/linux/asm/README.origin b/xen/include/asm-ia64/linux/asm/README.origin
index 863a538b04..48ca126508 100644
--- a/xen/include/asm-ia64/linux/asm/README.origin
+++ b/xen/include/asm-ia64/linux/asm/README.origin
@@ -21,7 +21,6 @@ intrinsics.h -> linux/include/asm-ia64/intrinsics.h
ioctl.h -> linux/include/asm-ia64/ioctl.h
irq.h -> linux/include/asm-ia64/irq.h
linkage.h -> linux/include/asm-ia64/linkage.h
-machvec.h -> linux/include/asm-ia64/machvec.h
machvec_hpsim.h -> linux/include/asm-ia64/machvec_hpsim.h
mca.h -> linux/include/asm-ia64/mca.h
nodedata.h -> linux/include/asm-ia64/nodedate.h
@@ -41,3 +40,6 @@ topology.h -> linux/include/asm-ia64/topology.h
unaligned.h -> linux/include/asm-ia64/unaligned.h
unistd.h -> linux/include/asm-ia64/unistd.h
unwind.h -> linux/include/asm-ia64/unwind.h
+
+# The files below are from Linux-2.6.19
+machvec_init.h -> linux/include/asm-ia64/machvec_init.h
diff --git a/xen/include/asm-ia64/linux/asm/machvec_init.h b/xen/include/asm-ia64/linux/asm/machvec_init.h
new file mode 100644
index 0000000000..2d36f6840f
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/machvec_init.h
@@ -0,0 +1,32 @@
+#include <asm/machvec.h>
+
+extern ia64_mv_send_ipi_t ia64_send_ipi;
+extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
+extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
+extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
+extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
+extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
+
+extern ia64_mv_inb_t __ia64_inb;
+extern ia64_mv_inw_t __ia64_inw;
+extern ia64_mv_inl_t __ia64_inl;
+extern ia64_mv_outb_t __ia64_outb;
+extern ia64_mv_outw_t __ia64_outw;
+extern ia64_mv_outl_t __ia64_outl;
+extern ia64_mv_mmiowb_t __ia64_mmiowb;
+extern ia64_mv_readb_t __ia64_readb;
+extern ia64_mv_readw_t __ia64_readw;
+extern ia64_mv_readl_t __ia64_readl;
+extern ia64_mv_readq_t __ia64_readq;
+extern ia64_mv_readb_t __ia64_readb_relaxed;
+extern ia64_mv_readw_t __ia64_readw_relaxed;
+extern ia64_mv_readl_t __ia64_readl_relaxed;
+extern ia64_mv_readq_t __ia64_readq_relaxed;
+
+#define MACHVEC_HELPER(name) \
+ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
+ = MACHVEC_INIT(name);
+
+#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name)
+
+MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
diff --git a/xen/include/asm-ia64/linux/asm/sn/README.origin b/xen/include/asm-ia64/linux/asm/sn/README.origin
new file mode 100644
index 0000000000..5d8d7cb671
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/README.origin
@@ -0,0 +1,24 @@
+# Source files in this directory are identical copies of linux-2.6.19 files:
+#
+# NOTE: DO NOT commit changes to these files! If a file
+# needs to be changed, move it to ../linux-xen and follow
+# the instructions in the README there.
+
+geo.h -> linux/include/asm-ia64/sn/geo.h
+klconfig.h -> linux/include/asm-ia64/sn/klconfig.h
+l1.h -> linux/include/asm-ia64/sn/l1.h
+leds.h -> linux/include/asm-ia64/sn/leds.h
+module.h -> linux/include/asm-ia64/sn/module.h
+pcibus_provider_defs.h -> linux/include/asm-ia64/sn/pcibus_provider_defs.h
+pcidev.h -> linux/include/asm-ia64/sn/pcidev.h
+pda.h -> linux/include/asm-ia64/sn/pda.h
+pic.h -> linux/include/asm-ia64/sn/pic.h
+shub_mmr.h -> linux/include/asm-ia64/sn/shub_mmr.h
+shubio.h -> linux/include/asm-ia64/sn/shubio.h
+simulator.h -> linux/include/asm-ia64/sn/simulator.h
+sn_cpuid.h -> linux/include/asm-ia64/sn/sn_cpuid.h
+sn_feature_sets.h -> linux/include/asm-ia64/sn/sn_feature_sets.h
+sn_sal.h -> linux/include/asm-ia64/sn/sn_sal.h
+tiocp.h -> linux/include/asm-ia64/sn/tiocp.h
+xbow.h -> linux/arch/ia64/sn/include/xtalk/xbow.h
+xwidgetdev.h -> linux/arch/ia64/sn/include/xtalk/xwidgetdev.h
diff --git a/xen/include/asm-ia64/linux/asm/sn/geo.h b/xen/include/asm-ia64/linux/asm/sn/geo.h
new file mode 100644
index 0000000000..f083c94340
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/geo.h
@@ -0,0 +1,132 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_GEO_H
+#define _ASM_IA64_SN_GEO_H
+
+/* The geoid_t implementation below is based loosely on the pcfg_t
+ implementation in sys/SN/promcfg.h. */
+
+/* Type declaractions */
+
+/* Size of a geoid_t structure (must be before decl. of geoid_u) */
+#define GEOID_SIZE 8 /* Would 16 be better? The size can
+ be different on different platforms. */
+
+#define MAX_SLOTS 0xf /* slots per module */
+#define MAX_SLABS 0xf /* slabs per slot */
+
+typedef unsigned char geo_type_t;
+
+/* Fields common to all substructures */
+typedef struct geo_common_s {
+ moduleid_t module; /* The module (box) this h/w lives in */
+ geo_type_t type; /* What type of h/w is named by this geoid_t */
+ slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */
+ slotid_t slot:4; /* slot (Blade), 0 .. 15 within module */
+} geo_common_t;
+
+/* Additional fields for particular types of hardware */
+typedef struct geo_node_s {
+ geo_common_t common; /* No additional fields needed */
+} geo_node_t;
+
+typedef struct geo_rtr_s {
+ geo_common_t common; /* No additional fields needed */
+} geo_rtr_t;
+
+typedef struct geo_iocntl_s {
+ geo_common_t common; /* No additional fields needed */
+} geo_iocntl_t;
+
+typedef struct geo_pcicard_s {
+ geo_iocntl_t common;
+ char bus; /* Bus/widget number */
+ char slot; /* PCI slot number */
+} geo_pcicard_t;
+
+/* Subcomponents of a node */
+typedef struct geo_cpu_s {
+ geo_node_t node;
+ char slice; /* Which CPU on the node */
+} geo_cpu_t;
+
+typedef struct geo_mem_s {
+ geo_node_t node;
+ char membus; /* The memory bus on the node */
+ char memslot; /* The memory slot on the bus */
+} geo_mem_t;
+
+
+typedef union geoid_u {
+ geo_common_t common;
+ geo_node_t node;
+ geo_iocntl_t iocntl;
+ geo_pcicard_t pcicard;
+ geo_rtr_t rtr;
+ geo_cpu_t cpu;
+ geo_mem_t mem;
+ char padsize[GEOID_SIZE];
+} geoid_t;
+
+
+/* Preprocessor macros */
+
+#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad:
+ module/001c07/slab/5/node/memory/2/slot/4 */
+
+/* Values for geo_type_t */
+#define GEO_TYPE_INVALID 0
+#define GEO_TYPE_MODULE 1
+#define GEO_TYPE_NODE 2
+#define GEO_TYPE_RTR 3
+#define GEO_TYPE_IOCNTL 4
+#define GEO_TYPE_IOCARD 5
+#define GEO_TYPE_CPU 6
+#define GEO_TYPE_MEM 7
+#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
+
+/* Parameter for hwcfg_format_geoid_compt() */
+#define GEO_COMPT_MODULE 1
+#define GEO_COMPT_SLAB 2
+#define GEO_COMPT_IOBUS 3
+#define GEO_COMPT_IOSLOT 4
+#define GEO_COMPT_CPU 5
+#define GEO_COMPT_MEMBUS 6
+#define GEO_COMPT_MEMSLOT 7
+
+#define GEO_INVALID_STR "<invalid>"
+
+#define INVALID_NASID ((nasid_t)-1)
+#define INVALID_CNODEID ((cnodeid_t)-1)
+#define INVALID_PNODEID ((pnodeid_t)-1)
+#define INVALID_SLAB (slabid_t)-1
+#define INVALID_SLOT (slotid_t)-1
+#define INVALID_MODULE ((moduleid_t)-1)
+
+static inline slabid_t geo_slab(geoid_t g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ INVALID_SLAB : g.common.slab;
+}
+
+static inline slotid_t geo_slot(geoid_t g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ INVALID_SLOT : g.common.slot;
+}
+
+static inline moduleid_t geo_module(geoid_t g)
+{
+ return (g.common.type == GEO_TYPE_INVALID) ?
+ INVALID_MODULE : g.common.module;
+}
+
+extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
+
+#endif /* _ASM_IA64_SN_GEO_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/klconfig.h b/xen/include/asm-ia64/linux/asm/sn/klconfig.h
new file mode 100644
index 0000000000..bcbf209d63
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/klconfig.h
@@ -0,0 +1,246 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/klconfig.h>.
+ *
+ * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_IA64_SN_KLCONFIG_H
+#define _ASM_IA64_SN_KLCONFIG_H
+
+/*
+ * The KLCONFIG structures store info about the various BOARDs found
+ * during Hardware Discovery. In addition, it stores info about the
+ * components found on the BOARDs.
+ */
+
+typedef s32 klconf_off_t;
+
+
+/* Functions/macros needed to use this structure */
+
+typedef struct kl_config_hdr {
+ char pad[20];
+ klconf_off_t ch_board_info; /* the link list of boards */
+ char pad0[88];
+} kl_config_hdr_t;
+
+
+#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
+
+/*
+ * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
+ * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to
+ * the LOCAL/current NODE. REMOTE means it is attached to a different
+ * node.(TBD - Need a way to treat ROUTER boards.)
+ *
+ * There are 2 different structures to represent these boards -
+ * lboard - Local board, rboard - remote board. These 2 structures
+ * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
+ * Figure below). The first byte of the rboard or lboard structure
+ * is used to find out its type - no unions are used.
+ * If it is a lboard, then the config info of this board will be found
+ * on the local node. (LOCAL NODE BASE + offset value gives pointer to
+ * the structure.
+ * If it is a rboard, the local structure contains the node number
+ * and the offset of the beginning of the LINKED LIST on the remote node.
+ * The details of the hardware on a remote node can be built locally,
+ * if required, by reading the LINKED LIST on the remote node and
+ * ignoring all the rboards on that node.
+ *
+ * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the
+ * First board info on the remote node. The remote node list is
+ * traversed as the local list, using the REMOTE BASE ADDRESS and not
+ * the local base address and ignoring all rboard values.
+ *
+ *
+ KLCONFIG
+
+ +------------+ +------------+ +------------+ +------------+
+ | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | board info | | | board info | | |errinfo,bptr| | | board info |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
+ +------------+ +------------+ +------------+ +------------+
+
+
+ +------------+
+ | board info |
+ +------------+ +--------------------------------+
+ | compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
+ +------------+ +--------------------------------+
+ | compt 2 |--+
+ +------------+ | +--------------------------------+
+ | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
+ +------------+ +--------------------------------+
+ | errinfo |--+
+ +------------+ | +--------------------------------+
+ +--->|r/l brd errinfo,compt err flags |
+ +--------------------------------+
+
+ *
+ * Each BOARD consists of COMPONENTs and the BOARD structure has
+ * pointers (offsets) to its COMPONENT structure.
+ * The COMPONENT structure has version info, size and speed info, revision,
+ * error info and the NIC info. This structure can accommodate any
+ * BOARD with arbitrary COMPONENT composition.
+ *
+ * The ERRORINFO part of each BOARD has error information
+ * that describes errors about the BOARD itself. It also has flags to
+ * indicate the COMPONENT(s) on the board that have errors. The error
+ * information specific to the COMPONENT is present in the respective
+ * COMPONENT structure.
+ *
+ * The ERRORINFO structure is also treated like a COMPONENT, ie. the
+ * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
+ * structure also has a pointer to the ERRORINFO structure. This is
+ * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
+ * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where
+ * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
+ * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info
+ * which is present on the REMOTE NODE.(TBD)
+ * REMOTE ERRINFO can be stored on any of the nearest nodes
+ * or on all the nearest nodes.(TBD)
+ * Like BOARD structures, REMOTE ERRINFO structures can be built locally
+ * using the rboard errinfo pointer.
+ *
+ * In order to get useful information from this Data organization, a set of
+ * interface routines are provided (TBD). The important thing to remember while
+ * manipulating the structures, is that, the NODE number information should
+ * be used. If the NODE is non-zero (remote) then each offset should
+ * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR.
+ * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
+ *
+ * Note that these structures do not provide much info about connectivity.
+ * That info will be part of HWGRAPH, which is an extension of the cfg_t
+ * data structure. (ref IP27prom/cfg.h) It has to be extended to include
+ * the IO part of the Network(TBD).
+ *
+ * The data structures below define the above concepts.
+ */
+
+
+/*
+ * BOARD classes
+ */
+
+#define KLCLASS_MASK 0xf0
+#define KLCLASS_NONE 0x00
+#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
+#define KLCLASS_CPU KLCLASS_NODE
+#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
+ and the non-graphics widget boards */
+#define KLCLASS_ROUTER 0x30 /* Router board */
+#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
+ so that we can record error info */
+#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */
+#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */
+
+#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
+
+
+/*
+ * board types
+ */
+
+#define KLTYPE_MASK 0x0f
+#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
+
+#define KLTYPE_SNIA (KLCLASS_CPU | 0x1)
+#define KLTYPE_TIO (KLCLASS_CPU | 0x2)
+
+#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
+#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
+
+#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
+
+#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
+#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4)
+#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6)
+#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7)
+#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8)
+#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9)
+#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa)
+#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb)
+#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc)
+#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd)
+
+
+/*
+ * board structures
+ */
+
+#define MAX_COMPTS_PER_BRD 24
+
+typedef struct lboard_s {
+ klconf_off_t brd_next_any; /* Next BOARD */
+ unsigned char struct_type; /* type of structure, local or remote */
+ unsigned char brd_type; /* type+class */
+ unsigned char brd_sversion; /* version of this structure */
+ unsigned char brd_brevision; /* board revision */
+ unsigned char brd_promver; /* board prom version, if any */
+ unsigned char brd_flags; /* Enabled, Disabled etc */
+ unsigned char brd_slot; /* slot number */
+ unsigned short brd_debugsw; /* Debug switches */
+ geoid_t brd_geoid; /* geo id */
+ partid_t brd_partition; /* Partition number */
+ unsigned short brd_diagval; /* diagnostic value */
+ unsigned short brd_diagparm; /* diagnostic parameter */
+ unsigned char brd_inventory; /* inventory history */
+ unsigned char brd_numcompts; /* Number of components */
+ nic_t brd_nic; /* Number in CAN */
+ nasid_t brd_nasid; /* passed parameter */
+ klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
+ klconf_off_t brd_errinfo; /* Board's error information */
+ struct lboard_s *brd_parent; /* Logical parent for this brd */
+ char pad0[4];
+ unsigned char brd_confidence; /* confidence that the board is bad */
+ nasid_t brd_owner; /* who owns this board */
+ unsigned char brd_nic_flags; /* To handle 8 more NICs */
+ char pad1[24]; /* future expansion */
+ char brd_name[32];
+ nasid_t brd_next_same_host; /* host of next brd w/same nasid */
+ klconf_off_t brd_next_same; /* Next BOARD with same nasid */
+} lboard_t;
+
+/*
+ * Generic info structure. This stores common info about a
+ * component.
+ */
+
+typedef struct klinfo_s { /* Generic info */
+ unsigned char struct_type; /* type of this structure */
+ unsigned char struct_version; /* version of this structure */
+ unsigned char flags; /* Enabled, disabled etc */
+ unsigned char revision; /* component revision */
+ unsigned short diagval; /* result of diagnostics */
+ unsigned short diagparm; /* diagnostic parameter */
+ unsigned char inventory; /* previous inventory status */
+ unsigned short partid; /* widget part number */
+ nic_t nic; /* MUst be aligned properly */
+ unsigned char physid; /* physical id of component */
+ unsigned int virtid; /* virtual id as seen by system */
+ unsigned char widid; /* Widget id - if applicable */
+ nasid_t nasid; /* node number - from parent */
+ char pad1; /* pad out structure. */
+ char pad2; /* pad out structure. */
+ void *data;
+ klconf_off_t errinfo; /* component specific errors */
+ unsigned short pad3; /* pci fields have moved over to */
+ unsigned short pad4; /* klbri_t */
+} klinfo_t ;
+
+
+static inline lboard_t *find_lboard_next(lboard_t * brd)
+{
+ if (brd && brd->brd_next_any)
+ return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any);
+ return NULL;
+}
+
+#endif /* _ASM_IA64_SN_KLCONFIG_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/l1.h b/xen/include/asm-ia64/linux/asm/sn/l1.h
new file mode 100644
index 0000000000..344bf44bb3
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/l1.h
@@ -0,0 +1,51 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#ifndef _ASM_IA64_SN_L1_H
+#define _ASM_IA64_SN_L1_H
+
+/* brick type response codes */
+#define L1_BRICKTYPE_PX 0x23 /* # */
+#define L1_BRICKTYPE_PE 0x25 /* % */
+#define L1_BRICKTYPE_N_p0 0x26 /* & */
+#define L1_BRICKTYPE_IP45 0x34 /* 4 */
+#define L1_BRICKTYPE_IP41 0x35 /* 5 */
+#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */
+#define L1_BRICKTYPE_IX 0x3d /* = */
+#define L1_BRICKTYPE_IP34 0x61 /* a */
+#define L1_BRICKTYPE_GA 0x62 /* b */
+#define L1_BRICKTYPE_C 0x63 /* c */
+#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */
+#define L1_BRICKTYPE_I 0x69 /* i */
+#define L1_BRICKTYPE_N 0x6e /* n */
+#define L1_BRICKTYPE_OPUS 0x6f /* o */
+#define L1_BRICKTYPE_P 0x70 /* p */
+#define L1_BRICKTYPE_R 0x72 /* r */
+#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
+#define L1_BRICKTYPE_X 0x78 /* x */
+#define L1_BRICKTYPE_X2 0x79 /* y */
+#define L1_BRICKTYPE_SA 0x5e /* ^ */
+#define L1_BRICKTYPE_PA 0x6a /* j */
+#define L1_BRICKTYPE_IA 0x6b /* k */
+#define L1_BRICKTYPE_ATHENA 0x2b /* + */
+#define L1_BRICKTYPE_DAYTONA 0x7a /* z */
+#define L1_BRICKTYPE_1932 0x2c /* . */
+#define L1_BRICKTYPE_191010 0x2e /* , */
+
+/* board type response codes */
+#define L1_BOARDTYPE_IP69 0x0100 /* CA */
+#define L1_BOARDTYPE_IP63 0x0200 /* CB */
+#define L1_BOARDTYPE_BASEIO 0x0300 /* IB */
+#define L1_BOARDTYPE_PCIE2SLOT 0x0400 /* IC */
+#define L1_BOARDTYPE_PCIX3SLOT 0x0500 /* ID */
+#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600 /* IE */
+#define L1_BOARDTYPE_ABACUS 0x0700 /* AB */
+#define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */
+#define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */
+
+#endif /* _ASM_IA64_SN_L1_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/leds.h b/xen/include/asm-ia64/linux/asm/sn/leds.h
new file mode 100644
index 0000000000..66cf8c4d92
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/leds.h
@@ -0,0 +1,33 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_LEDS_H
+#define _ASM_IA64_SN_LEDS_H
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/shub_mmr.h>
+
+#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
+#define LED_CPU_SHIFT 16
+
+#define LED_CPU_HEARTBEAT 0x01
+#define LED_CPU_ACTIVITY 0x02
+#define LED_ALWAYS_SET 0x00
+
+/*
+ * Basic macros for flashing the LEDS on an SGI SN.
+ */
+
+static __inline__ void
+set_led_bits(u8 value, u8 mask)
+{
+ pda->led_state = (pda->led_state & ~mask) | (value & mask);
+ *pda->led_address = (short) pda->led_state;
+}
+
+#endif /* _ASM_IA64_SN_LEDS_H */
+
diff --git a/xen/include/asm-ia64/linux/asm/sn/module.h b/xen/include/asm-ia64/linux/asm/sn/module.h
new file mode 100644
index 0000000000..734e980ece
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/module.h
@@ -0,0 +1,127 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_MODULE_H
+#define _ASM_IA64_SN_MODULE_H
+
+/* parameter for format_module_id() */
+#define MODULE_FORMAT_BRIEF 1
+#define MODULE_FORMAT_LONG 2
+#define MODULE_FORMAT_LCD 3
+
+/*
+ * Module id format
+ *
+ * 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int)
+ * 15-8 Brick type (8-bit ascii character)
+ * 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int)
+ *
+ */
+
+/*
+ * Macros for getting the brick type
+ */
+#define MODULE_BTYPE_MASK 0xff00
+#define MODULE_BTYPE_SHFT 8
+#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
+#define MODULE_BT_TO_CHAR(_b) ((char)(_b))
+#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
+
+/*
+ * Macros for getting the rack ID.
+ */
+#define MODULE_RACK_MASK 0xffff0000
+#define MODULE_RACK_SHFT 16
+#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
+
+/*
+ * Macros for getting the brick position
+ */
+#define MODULE_BPOS_MASK 0x00ff
+#define MODULE_BPOS_SHFT 0
+#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ * class (0==CPU/mixed, 1==I/O), group, number
+ *
+ * Rack number is stored just as it is displayed on the screen:
+ * a 3-decimal-digit number.
+ */
+#define RACK_CLASS_DVDR 100
+#define RACK_GROUP_DVDR 10
+#define RACK_NUM_DVDR 1
+
+#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \
+ (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
+
+#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR)
+#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \
+ RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
+#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \
+ RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \
+ RACK_GROUP_DVDR) / RACK_NUM_DVDR)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ * class 1 bit, 0==CPU/mixed, 1==I/O
+ * group 2 bits for CPU/mixed, 3 bits for I/O
+ * number 3 bits for CPU/mixed, 2 bits for I/O (1 based)
+ */
+#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2)
+#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3)
+
+#define RACK_CLASS_MASK(_r) 0x20
+#define RACK_CLASS_SHFT(_r) 5
+#define RACK_ADD_CLASS(_r, _c) \
+ ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
+
+#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r)
+#define RACK_GROUP_MASK(_r) \
+ ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
+#define RACK_ADD_GROUP(_r, _g) \
+ ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
+
+#define RACK_NUM_SHFT(_r) 0
+#define RACK_NUM_MASK(_r) \
+ ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
+#define RACK_ADD_NUM(_r, _n) \
+ ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
+
+
+/*
+ * Brick type definitions
+ */
+#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */
+
+extern char brick_types[];
+
+#define MODULE_CBRICK 0
+#define MODULE_RBRICK 1
+#define MODULE_IBRICK 2
+#define MODULE_KBRICK 3
+#define MODULE_XBRICK 4
+#define MODULE_DBRICK 5
+#define MODULE_PBRICK 6
+#define MODULE_NBRICK 7
+#define MODULE_PEBRICK 8
+#define MODULE_PXBRICK 9
+#define MODULE_IXBRICK 10
+#define MODULE_CGBRICK 11
+#define MODULE_OPUSBRICK 12
+#define MODULE_SABRICK 13 /* TIO BringUp Brick */
+#define MODULE_IABRICK 14
+#define MODULE_PABRICK 15
+#define MODULE_GABRICK 16
+#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */
+
+extern char brick_types[];
+extern void format_module_id(char *, moduleid_t, int);
+
+#endif /* _ASM_IA64_SN_MODULE_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h b/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h
new file mode 100644
index 0000000000..8f7c83d0f6
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h
@@ -0,0 +1,68 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+
+/*
+ * SN pci asic types. Do not ever renumber these or reuse values. The
+ * values must agree with what prom thinks they are.
+ */
+
+#define PCIIO_ASIC_TYPE_UNKNOWN 0
+#define PCIIO_ASIC_TYPE_PPB 1
+#define PCIIO_ASIC_TYPE_PIC 2
+#define PCIIO_ASIC_TYPE_TIOCP 3
+#define PCIIO_ASIC_TYPE_TIOCA 4
+#define PCIIO_ASIC_TYPE_TIOCE 5
+
+#define PCIIO_ASIC_MAX_TYPES 6
+
+/*
+ * Common pciio bus provider data. There should be one of these as the
+ * first field in any pciio based provider soft structure (e.g. pcibr_soft
+ * tioca_soft, etc).
+ */
+
+struct pcibus_bussoft {
+ u32 bs_asic_type; /* chipset type */
+ u32 bs_xid; /* xwidget id */
+ u32 bs_persist_busnum; /* Persistent Bus Number */
+ u32 bs_persist_segment; /* Segment Number */
+ u64 bs_legacy_io; /* legacy io pio addr */
+ u64 bs_legacy_mem; /* legacy mem pio addr */
+ u64 bs_base; /* widget base */
+ struct xwidget_info *bs_xwidget_info;
+};
+
+struct pci_controller;
+/*
+ * SN pci bus indirection
+ */
+
+struct sn_pcibus_provider {
+ dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
+ dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
+ void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
+ void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
+ void (*force_interrupt)(struct sn_irq_info *);
+ void (*target_interrupt)(struct sn_irq_info *);
+};
+
+/*
+ * Flags used by the map interfaces
+ * bits 3:0 specifies format of passed in address
+ * bit 4 specifies that address is to be used for MSI
+ */
+
+#define SN_DMA_ADDRTYPE(x) ((x) & 0xf)
+#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */
+#define SN_DMA_ADDR_XIO 2 /* address is phys memory */
+#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */
+
+extern struct sn_pcibus_provider *sn_pci_provider[];
+#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/pcidev.h b/xen/include/asm-ia64/linux/asm/sn/pcidev.h
new file mode 100644
index 0000000000..eac3561574
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/pcidev.h
@@ -0,0 +1,83 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
+#define _ASM_IA64_SN_PCI_PCIDEV_H
+
+#include <linux/pci.h>
+
+/*
+ * In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
+ * the pcidev_info structs for all devices under a controller, we extend the
+ * definition of pci_controller, via sn_pci_controller, to include a list
+ * of pcidev_info.
+ */
+struct sn_pci_controller {
+ struct pci_controller pci_controller;
+ struct list_head pcidev_info;
+};
+
+#define SN_PCI_CONTROLLER(dev) ((struct sn_pci_controller *) dev->sysdata)
+
+#define SN_PCIDEV_INFO(dev) sn_pcidev_info_get(dev)
+
+#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
+ (struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
+/*
+ * Given a pci_bus, return the sn pcibus_bussoft struct. Note that
+ * this only works for root busses, not for busses represented by PPB's.
+ */
+
+#define SN_PCIBUS_BUSSOFT(pci_bus) \
+ ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
+
+#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
+ (struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
+/*
+ * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
+ * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
+ * due to possible PPB's in the path.
+ */
+
+#define SN_PCIDEV_BUSSOFT(pci_dev) \
+ (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
+
+#define SN_PCIDEV_BUSPROVIDER(pci_dev) \
+ (SN_PCIDEV_INFO(pci_dev)->pdi_provider)
+
+#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
+#define PCIIO_SLOT_NONE 255
+#define PCIIO_FUNC_NONE 255
+#define PCIIO_VENDOR_ID_NONE (-1)
+
+struct pcidev_info {
+ u64 pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
+ u64 pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
+
+ struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
+ struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
+ struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
+
+ struct sn_irq_info *pdi_sn_irq_info;
+ struct sn_pcibus_provider *pdi_provider; /* sn pci ops */
+ struct pci_dev *host_pci_dev; /* host bus link */
+ struct list_head pdi_list; /* List of pcidev_info */
+};
+
+extern void sn_irq_fixup(struct pci_dev *pci_dev,
+ struct sn_irq_info *sn_irq_info);
+extern void sn_irq_unfixup(struct pci_dev *pci_dev);
+extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
+extern void sn_pci_controller_fixup(int segment, int busnum,
+ struct pci_bus *bus);
+extern void sn_bus_store_sysdata(struct pci_dev *dev);
+extern void sn_bus_free_sysdata(void);
+extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
+extern void sn_pci_fixup_slot(struct pci_dev *dev);
+extern void sn_pci_unfixup_slot(struct pci_dev *dev);
+extern void sn_irq_lh_init(void);
+#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/pda.h b/xen/include/asm-ia64/linux/asm/sn/pda.h
new file mode 100644
index 0000000000..1c5108d44d
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/pda.h
@@ -0,0 +1,69 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PDA_H
+#define _ASM_IA64_SN_PDA_H
+
+#include <linux/cache.h>
+#include <asm/percpu.h>
+#include <asm/system.h>
+
+
+/*
+ * CPU-specific data structure.
+ *
+ * One of these structures is allocated for each cpu of a NUMA system.
+ *
+ * This structure provides a convenient way of keeping together
+ * all SN per-cpu data structures.
+ */
+
+typedef struct pda_s {
+
+ /*
+ * Support for SN LEDs
+ */
+ volatile short *led_address;
+ u8 led_state;
+ u8 hb_state; /* supports blinking heartbeat leds */
+ unsigned int hb_count;
+
+ unsigned int idle_flag;
+
+ volatile unsigned long *bedrock_rev_id;
+ volatile unsigned long *pio_write_status_addr;
+ unsigned long pio_write_status_val;
+ volatile unsigned long *pio_shub_war_cam_addr;
+
+ unsigned long sn_in_service_ivecs[4];
+ int sn_lb_int_war_ticks;
+ int sn_last_irq;
+ int sn_first_irq;
+} pda_t;
+
+
+#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
+
+/*
+ * PDA
+ * Per-cpu private data area for each cpu. The PDA is located immediately after
+ * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
+ * cpu but only a small amout of the page is actually used. We put the SNIA PDA
+ * in the same page as the cpu_data area. Note that there is a check in the setup
+ * code to verify that we don't overflow the page.
+ *
+ * Seems like we should should cache-line align the pda so that any changes in the
+ * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
+ * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
+ */
+DECLARE_PER_CPU(struct pda_s, pda_percpu);
+
+#define pda (&__ia64_per_cpu_var(pda_percpu))
+
+#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
+
+#endif /* _ASM_IA64_SN_PDA_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/pic.h b/xen/include/asm-ia64/linux/asm/sn/pic.h
new file mode 100644
index 0000000000..5f9da5fd6e
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/pic.h
@@ -0,0 +1,261 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PIC_H
+#define _ASM_IA64_SN_PCI_PIC_H
+
+/*
+ * PIC AS DEVICE ZERO
+ * ------------------
+ *
+ * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
+ * be designated as 'device 0'. That is a departure from earlier SGI
+ * PCI bridges. Because of that we use config space 1 to access the
+ * config space of the first actual PCI device on the bus.
+ * Here's what the PIC manual says:
+ *
+ * The current PCI-X bus specification now defines that the parent
+ * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
+ * reduced the total number of devices from 8 to 4 and removed the
+ * device registers and windows, now only supporting devices 0,1,2, and
+ * 3. PIC did leave all 8 configuration space windows. The reason was
+ * there was nothing to gain by removing them. Here in lies the problem.
+ * The device numbering we do using 0 through 3 is unrelated to the device
+ * numbering which PCI-X requires in configuration space. In the past we
+ * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
+ * PCI-X requires we start a 1, not 0 and currently the PX brick
+ * does associate our:
+ *
+ * device 0 with configuration space window 1,
+ * device 1 with configuration space window 2,
+ * device 2 with configuration space window 3,
+ * device 3 with configuration space window 4.
+ *
+ * The net effect is that all config space access are off-by-one with
+ * relation to other per-slot accesses on the PIC.
+ * Here is a table that shows some of that:
+ *
+ * Internal Slot#
+ * |
+ * | 0 1 2 3
+ * ----------|---------------------------------------
+ * config | 0x21000 0x22000 0x23000 0x24000
+ * |
+ * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
+ * |
+ * odd rrb | n/a 0[1] n/a 1[1]
+ * |
+ * int dev | 00 01 10 11
+ * |
+ * ext slot# | 1 2 3 4
+ * ----------|---------------------------------------
+ */
+
+#define PIC_ATE_TARGETID_SHFT 8
+#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL
+#define PIC_PCI64_ATTR_TARG_SHFT 60
+
+
+/*****************************************************************************
+ *********************** PIC MMR structure mapping ***************************
+ *****************************************************************************/
+
+/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
+ * of a 64-bit register. When writing PIC registers, always write the
+ * entire 64 bits.
+ */
+
+struct pic {
+
+ /* 0x000000-0x00FFFF -- Local Registers */
+
+ /* 0x000000-0x000057 -- Standard Widget Configuration */
+ u64 p_wid_id; /* 0x000000 */
+ u64 p_wid_stat; /* 0x000008 */
+ u64 p_wid_err_upper; /* 0x000010 */
+ u64 p_wid_err_lower; /* 0x000018 */
+ #define p_wid_err p_wid_err_lower
+ u64 p_wid_control; /* 0x000020 */
+ u64 p_wid_req_timeout; /* 0x000028 */
+ u64 p_wid_int_upper; /* 0x000030 */
+ u64 p_wid_int_lower; /* 0x000038 */
+ #define p_wid_int p_wid_int_lower
+ u64 p_wid_err_cmdword; /* 0x000040 */
+ u64 p_wid_llp; /* 0x000048 */
+ u64 p_wid_tflush; /* 0x000050 */
+
+ /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
+ u64 p_wid_aux_err; /* 0x000058 */
+ u64 p_wid_resp_upper; /* 0x000060 */
+ u64 p_wid_resp_lower; /* 0x000068 */
+ #define p_wid_resp p_wid_resp_lower
+ u64 p_wid_tst_pin_ctrl; /* 0x000070 */
+ u64 p_wid_addr_lkerr; /* 0x000078 */
+
+ /* 0x000080-0x00008F -- PMU & MAP */
+ u64 p_dir_map; /* 0x000080 */
+ u64 _pad_000088; /* 0x000088 */
+
+ /* 0x000090-0x00009F -- SSRAM */
+ u64 p_map_fault; /* 0x000090 */
+ u64 _pad_000098; /* 0x000098 */
+
+ /* 0x0000A0-0x0000AF -- Arbitration */
+ u64 p_arb; /* 0x0000A0 */
+ u64 _pad_0000A8; /* 0x0000A8 */
+
+ /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+ u64 p_ate_parity_err; /* 0x0000B0 */
+ u64 _pad_0000B8; /* 0x0000B8 */
+
+ /* 0x0000C0-0x0000FF -- PCI/GIO */
+ u64 p_bus_timeout; /* 0x0000C0 */
+ u64 p_pci_cfg; /* 0x0000C8 */
+ u64 p_pci_err_upper; /* 0x0000D0 */
+ u64 p_pci_err_lower; /* 0x0000D8 */
+ #define p_pci_err p_pci_err_lower
+ u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
+
+ /* 0x000100-0x0001FF -- Interrupt */
+ u64 p_int_status; /* 0x000100 */
+ u64 p_int_enable; /* 0x000108 */
+ u64 p_int_rst_stat; /* 0x000110 */
+ u64 p_int_mode; /* 0x000118 */
+ u64 p_int_device; /* 0x000120 */
+ u64 p_int_host_err; /* 0x000128 */
+ u64 p_int_addr[8]; /* 0x0001{30,,,68} */
+ u64 p_err_int_view; /* 0x000170 */
+ u64 p_mult_int; /* 0x000178 */
+ u64 p_force_always[8]; /* 0x0001{80,,,B8} */
+ u64 p_force_pin[8]; /* 0x0001{C0,,,F8} */
+
+ /* 0x000200-0x000298 -- Device */
+ u64 p_device[4]; /* 0x0002{00,,,18} */
+ u64 _pad_000220[4]; /* 0x0002{20,,,38} */
+ u64 p_wr_req_buf[4]; /* 0x0002{40,,,58} */
+ u64 _pad_000260[4]; /* 0x0002{60,,,78} */
+ u64 p_rrb_map[2]; /* 0x0002{80,,,88} */
+ #define p_even_resp p_rrb_map[0] /* 0x000280 */
+ #define p_odd_resp p_rrb_map[1] /* 0x000288 */
+ u64 p_resp_status; /* 0x000290 */
+ u64 p_resp_clear; /* 0x000298 */
+
+ u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
+
+ /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+ struct {
+ u64 upper; /* 0x0003{00,,,F0} */
+ u64 lower; /* 0x0003{08,,,F8} */
+ } p_buf_addr_match[16];
+
+ /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+ struct {
+ u64 flush_w_touch; /* 0x000{400,,,5C0} */
+ u64 flush_wo_touch; /* 0x000{408,,,5C8} */
+ u64 inflight; /* 0x000{410,,,5D0} */
+ u64 prefetch; /* 0x000{418,,,5D8} */
+ u64 total_pci_retry; /* 0x000{420,,,5E0} */
+ u64 max_pci_retry; /* 0x000{428,,,5E8} */
+ u64 max_latency; /* 0x000{430,,,5F0} */
+ u64 clear_all; /* 0x000{438,,,5F8} */
+ } p_buf_count[8];
+
+
+ /* 0x000600-0x0009FF -- PCI/X registers */
+ u64 p_pcix_bus_err_addr; /* 0x000600 */
+ u64 p_pcix_bus_err_attr; /* 0x000608 */
+ u64 p_pcix_bus_err_data; /* 0x000610 */
+ u64 p_pcix_pio_split_addr; /* 0x000618 */
+ u64 p_pcix_pio_split_attr; /* 0x000620 */
+ u64 p_pcix_dma_req_err_attr; /* 0x000628 */
+ u64 p_pcix_dma_req_err_addr; /* 0x000630 */
+ u64 p_pcix_timeout; /* 0x000638 */
+
+ u64 _pad_000640[120]; /* 0x000{640,,,9F8} */
+
+ /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+ struct {
+ u64 p_buf_addr; /* 0x000{A00,,,AF0} */
+ u64 p_buf_attr; /* 0X000{A08,,,AF8} */
+ } p_pcix_read_buf_64[16];
+
+ struct {
+ u64 p_buf_addr; /* 0x000{B00,,,BE0} */
+ u64 p_buf_attr; /* 0x000{B08,,,BE8} */
+ u64 p_buf_valid; /* 0x000{B10,,,BF0} */
+ u64 __pad1; /* 0x000{B18,,,BF8} */
+ } p_pcix_write_buf_64[8];
+
+ /* End of Local Registers -- Start of Address Map space */
+
+ char _pad_000c00[0x010000 - 0x000c00];
+
+ /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
+ u64 p_int_ate_ram[1024]; /* 0x010000-0x011fff */
+
+ /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
+ u64 p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
+
+ char _pad_014000[0x18000 - 0x014000];
+
+ /* 0x18000-0x197F8 -- PIC Write Request Ram */
+ u64 p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
+ u64 p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
+ u64 p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
+
+ char _pad_019800[0x20000 - 0x019800];
+
+ /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
+ union {
+ u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
+ u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
+ u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
+ u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
+
+ /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+ union {
+ u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
+ u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
+ u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
+ u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } p_type1_cfg; /* 0x028000-0x029000 */
+
+ char _pad_029000[0x030000-0x029000];
+
+ /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } p_pci_iack; /* 0x030000-0x030007 */
+
+ char _pad_030007[0x040000-0x030008];
+
+ /* 0x040000-0x030007 -- PCIX Special Cycle */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } p_pcix_cycle; /* 0x040000-0x040007 */
+};
+
+#endif /* _ASM_IA64_SN_PCI_PIC_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/shub_mmr.h b/xen/include/asm-ia64/linux/asm/sn/shub_mmr.h
new file mode 100644
index 0000000000..7de1d1d4b7
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/shub_mmr.h
@@ -0,0 +1,502 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SHUB_MMR_H
+#define _ASM_IA64_SN_SHUB_MMR_H
+
+/* ==================================================================== */
+/* Register "SH_IPI_INT" */
+/* SHub Inter-Processor Interrupt Registers */
+/* ==================================================================== */
+#define SH1_IPI_INT __IA64_UL_CONST(0x0000000110000380)
+#define SH2_IPI_INT __IA64_UL_CONST(0x0000000010000380)
+
+/* SH_IPI_INT_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_IPI_INT_TYPE_SHFT 0
+#define SH_IPI_INT_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
+
+/* SH_IPI_INT_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_IPI_INT_AGT_SHFT 3
+#define SH_IPI_INT_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
+
+/* SH_IPI_INT_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_IPI_INT_PID_SHFT 4
+#define SH_IPI_INT_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
+
+/* SH_IPI_INT_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_IPI_INT_BASE_SHFT 21
+#define SH_IPI_INT_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
+
+/* SH_IPI_INT_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_IPI_INT_IDX_SHFT 52
+#define SH_IPI_INT_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
+
+/* SH_IPI_INT_SEND */
+/* Description: Send Interrupt Message to PI, This generates a puls */
+#define SH_IPI_INT_SEND_SHFT 63
+#define SH_IPI_INT_SEND_MASK __IA64_UL_CONST(0x8000000000000000)
+
+/* ==================================================================== */
+/* Register "SH_EVENT_OCCURRED" */
+/* SHub Interrupt Event Occurred */
+/* ==================================================================== */
+#define SH1_EVENT_OCCURRED __IA64_UL_CONST(0x0000000110010000)
+#define SH1_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000110010008)
+#define SH2_EVENT_OCCURRED __IA64_UL_CONST(0x0000000010010000)
+#define SH2_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000010010008)
+
+/* ==================================================================== */
+/* Register "SH_PI_CAM_CONTROL" */
+/* CRB CAM MMR Access Control */
+/* ==================================================================== */
+#define SH1_PI_CAM_CONTROL __IA64_UL_CONST(0x0000000120050300)
+
+/* ==================================================================== */
+/* Register "SH_SHUB_ID" */
+/* SHub ID Number */
+/* ==================================================================== */
+#define SH1_SHUB_ID __IA64_UL_CONST(0x0000000110060580)
+#define SH1_SHUB_ID_REVISION_SHFT 28
+#define SH1_SHUB_ID_REVISION_MASK __IA64_UL_CONST(0x00000000f0000000)
+
+/* ==================================================================== */
+/* Register "SH_RTC" */
+/* Real-time Clock */
+/* ==================================================================== */
+#define SH1_RTC __IA64_UL_CONST(0x00000001101c0000)
+#define SH2_RTC __IA64_UL_CONST(0x00000002101c0000)
+#define SH_RTC_MASK __IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/* Register "SH_PIO_WRITE_STATUS_0|1" */
+/* PIO Write Status for CPU 0 & 1 */
+/* ==================================================================== */
+#define SH1_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000120070200)
+#define SH1_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000120070280)
+#define SH2_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000020070200)
+#define SH2_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000020070280)
+#define SH2_PIO_WRITE_STATUS_2 __IA64_UL_CONST(0x0000000020070300)
+#define SH2_PIO_WRITE_STATUS_3 __IA64_UL_CONST(0x0000000020070380)
+
+/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */
+/* Description: Deadlock response detected */
+#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1
+#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK \
+ __IA64_UL_CONST(0x0000000000000002)
+
+/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */
+/* Description: Count of currently pending PIO writes */
+#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56
+#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK \
+ __IA64_UL_CONST(0x3f00000000000000)
+
+/* ==================================================================== */
+/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */
+/* ==================================================================== */
+#define SH1_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000120070208)
+#define SH2_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000020070208)
+
+/* ==================================================================== */
+/* Register "SH_EVENT_OCCURRED" */
+/* SHub Interrupt Event Occurred */
+/* ==================================================================== */
+/* SH_EVENT_OCCURRED_UART_INT */
+/* Description: Pending Junk Bus UART Interrupt */
+#define SH_EVENT_OCCURRED_UART_INT_SHFT 20
+#define SH_EVENT_OCCURRED_UART_INT_MASK __IA64_UL_CONST(0x0000000000100000)
+
+/* SH_EVENT_OCCURRED_IPI_INT */
+/* Description: Pending IPI Interrupt */
+#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28
+#define SH_EVENT_OCCURRED_IPI_INT_MASK __IA64_UL_CONST(0x0000000010000000)
+
+/* SH_EVENT_OCCURRED_II_INT0 */
+/* Description: Pending II 0 Interrupt */
+#define SH_EVENT_OCCURRED_II_INT0_SHFT 29
+#define SH_EVENT_OCCURRED_II_INT0_MASK __IA64_UL_CONST(0x0000000020000000)
+
+/* SH_EVENT_OCCURRED_II_INT1 */
+/* Description: Pending II 1 Interrupt */
+#define SH_EVENT_OCCURRED_II_INT1_SHFT 30
+#define SH_EVENT_OCCURRED_II_INT1_MASK __IA64_UL_CONST(0x0000000040000000)
+
+/* SH2_EVENT_OCCURRED_EXTIO_INT2 */
+/* Description: Pending SHUB 2 EXT IO INT2 */
+#define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT 33
+#define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK __IA64_UL_CONST(0x0000000200000000)
+
+/* SH2_EVENT_OCCURRED_EXTIO_INT3 */
+/* Description: Pending SHUB 2 EXT IO INT3 */
+#define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT 34
+#define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK __IA64_UL_CONST(0x0000000400000000)
+
+#define SH_ALL_INT_MASK \
+ (SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \
+ SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \
+ SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \
+ SH2_EVENT_OCCURRED_EXTIO_INT3_MASK)
+
+
+/* ==================================================================== */
+/* LEDS */
+/* ==================================================================== */
+#define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL
+#define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL
+#define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL
+#define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL
+
+#define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL
+#define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL
+#define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL
+#define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL
+
+/* ==================================================================== */
+/* Register "SH1_PTC_0" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH1_PTC_0 __IA64_UL_CONST(0x00000001101a0000)
+
+/* SH1_PTC_0_A */
+/* Description: Type */
+#define SH1_PTC_0_A_SHFT 0
+
+/* SH1_PTC_0_PS */
+/* Description: Page Size */
+#define SH1_PTC_0_PS_SHFT 2
+
+/* SH1_PTC_0_RID */
+/* Description: Region ID */
+#define SH1_PTC_0_RID_SHFT 8
+
+/* SH1_PTC_0_START */
+/* Description: Start */
+#define SH1_PTC_0_START_SHFT 63
+
+/* ==================================================================== */
+/* Register "SH1_PTC_1" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH1_PTC_1 __IA64_UL_CONST(0x00000001101a0080)
+
+/* SH1_PTC_1_START */
+/* Description: PTC_1 Start */
+#define SH1_PTC_1_START_SHFT 63
+
+/* ==================================================================== */
+/* Register "SH2_PTC" */
+/* Puge Translation Cache Message Configuration Information */
+/* ==================================================================== */
+#define SH2_PTC __IA64_UL_CONST(0x0000000170000000)
+
+/* SH2_PTC_A */
+/* Description: Type */
+#define SH2_PTC_A_SHFT 0
+
+/* SH2_PTC_PS */
+/* Description: Page Size */
+#define SH2_PTC_PS_SHFT 2
+
+/* SH2_PTC_RID */
+/* Description: Region ID */
+#define SH2_PTC_RID_SHFT 4
+
+/* SH2_PTC_START */
+/* Description: Start */
+#define SH2_PTC_START_SHFT 63
+
+/* SH2_PTC_ADDR_RID */
+/* Description: Region ID */
+#define SH2_PTC_ADDR_SHFT 4
+#define SH2_PTC_ADDR_MASK __IA64_UL_CONST(0x1ffffffffffff000)
+
+/* ==================================================================== */
+/* Register "SH_RTC1_INT_CONFIG" */
+/* SHub RTC 1 Interrupt Config Registers */
+/* ==================================================================== */
+
+#define SH1_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000110001480)
+#define SH2_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000010001480)
+#define SH_RTC1_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC1_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC1_INT_CONFIG_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_RTC1_INT_CONFIG_TYPE_SHFT 0
+#define SH_RTC1_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
+
+/* SH_RTC1_INT_CONFIG_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_RTC1_INT_CONFIG_AGT_SHFT 3
+#define SH_RTC1_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
+
+/* SH_RTC1_INT_CONFIG_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_RTC1_INT_CONFIG_PID_SHFT 4
+#define SH_RTC1_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
+
+/* SH_RTC1_INT_CONFIG_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_RTC1_INT_CONFIG_BASE_SHFT 21
+#define SH_RTC1_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
+
+/* SH_RTC1_INT_CONFIG_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_RTC1_INT_CONFIG_IDX_SHFT 52
+#define SH_RTC1_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/* Register "SH_RTC1_INT_ENABLE" */
+/* SHub RTC 1 Interrupt Enable Registers */
+/* ==================================================================== */
+
+#define SH1_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000110001500)
+#define SH2_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000010001500)
+#define SH_RTC1_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC1_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC1_INT_ENABLE_RTC1_ENABLE */
+/* Description: Enable RTC 1 Interrupt */
+#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0
+#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK \
+ __IA64_UL_CONST(0x0000000000000001)
+
+/* ==================================================================== */
+/* Register "SH_RTC2_INT_CONFIG" */
+/* SHub RTC 2 Interrupt Config Registers */
+/* ==================================================================== */
+
+#define SH1_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000110001580)
+#define SH2_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000010001580)
+#define SH_RTC2_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC2_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC2_INT_CONFIG_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_RTC2_INT_CONFIG_TYPE_SHFT 0
+#define SH_RTC2_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
+
+/* SH_RTC2_INT_CONFIG_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_RTC2_INT_CONFIG_AGT_SHFT 3
+#define SH_RTC2_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
+
+/* SH_RTC2_INT_CONFIG_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_RTC2_INT_CONFIG_PID_SHFT 4
+#define SH_RTC2_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
+
+/* SH_RTC2_INT_CONFIG_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_RTC2_INT_CONFIG_BASE_SHFT 21
+#define SH_RTC2_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
+
+/* SH_RTC2_INT_CONFIG_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_RTC2_INT_CONFIG_IDX_SHFT 52
+#define SH_RTC2_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/* Register "SH_RTC2_INT_ENABLE" */
+/* SHub RTC 2 Interrupt Enable Registers */
+/* ==================================================================== */
+
+#define SH1_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000110001600)
+#define SH2_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000010001600)
+#define SH_RTC2_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC2_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC2_INT_ENABLE_RTC2_ENABLE */
+/* Description: Enable RTC 2 Interrupt */
+#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0
+#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK \
+ __IA64_UL_CONST(0x0000000000000001)
+
+/* ==================================================================== */
+/* Register "SH_RTC3_INT_CONFIG" */
+/* SHub RTC 3 Interrupt Config Registers */
+/* ==================================================================== */
+
+#define SH1_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000110001680)
+#define SH2_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000010001680)
+#define SH_RTC3_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
+#define SH_RTC3_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC3_INT_CONFIG_TYPE */
+/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
+#define SH_RTC3_INT_CONFIG_TYPE_SHFT 0
+#define SH_RTC3_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
+
+/* SH_RTC3_INT_CONFIG_AGT */
+/* Description: Agent, must be 0 for SHub */
+#define SH_RTC3_INT_CONFIG_AGT_SHFT 3
+#define SH_RTC3_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
+
+/* SH_RTC3_INT_CONFIG_PID */
+/* Description: Processor ID, same setting as on targeted McKinley */
+#define SH_RTC3_INT_CONFIG_PID_SHFT 4
+#define SH_RTC3_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
+
+/* SH_RTC3_INT_CONFIG_BASE */
+/* Description: Optional interrupt vector area, 2MB aligned */
+#define SH_RTC3_INT_CONFIG_BASE_SHFT 21
+#define SH_RTC3_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
+
+/* SH_RTC3_INT_CONFIG_IDX */
+/* Description: Targeted McKinley interrupt vector */
+#define SH_RTC3_INT_CONFIG_IDX_SHFT 52
+#define SH_RTC3_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
+
+/* ==================================================================== */
+/* Register "SH_RTC3_INT_ENABLE" */
+/* SHub RTC 3 Interrupt Enable Registers */
+/* ==================================================================== */
+
+#define SH1_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000110001700)
+#define SH2_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000010001700)
+#define SH_RTC3_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
+#define SH_RTC3_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_RTC3_INT_ENABLE_RTC3_ENABLE */
+/* Description: Enable RTC 3 Interrupt */
+#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0
+#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK \
+ __IA64_UL_CONST(0x0000000000000001)
+
+/* SH_EVENT_OCCURRED_RTC1_INT */
+/* Description: Pending RTC 1 Interrupt */
+#define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24
+#define SH_EVENT_OCCURRED_RTC1_INT_MASK __IA64_UL_CONST(0x0000000001000000)
+
+/* SH_EVENT_OCCURRED_RTC2_INT */
+/* Description: Pending RTC 2 Interrupt */
+#define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25
+#define SH_EVENT_OCCURRED_RTC2_INT_MASK __IA64_UL_CONST(0x0000000002000000)
+
+/* SH_EVENT_OCCURRED_RTC3_INT */
+/* Description: Pending RTC 3 Interrupt */
+#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26
+#define SH_EVENT_OCCURRED_RTC3_INT_MASK __IA64_UL_CONST(0x0000000004000000)
+
+/* ==================================================================== */
+/* Register "SH_IPI_ACCESS" */
+/* CPU interrupt Access Permission Bits */
+/* ==================================================================== */
+
+#define SH1_IPI_ACCESS __IA64_UL_CONST(0x0000000110060480)
+#define SH2_IPI_ACCESS0 __IA64_UL_CONST(0x0000000010060c00)
+#define SH2_IPI_ACCESS1 __IA64_UL_CONST(0x0000000010060c80)
+#define SH2_IPI_ACCESS2 __IA64_UL_CONST(0x0000000010060d00)
+#define SH2_IPI_ACCESS3 __IA64_UL_CONST(0x0000000010060d80)
+
+/* ==================================================================== */
+/* Register "SH_INT_CMPB" */
+/* RTC Compare Value for Processor B */
+/* ==================================================================== */
+
+#define SH1_INT_CMPB __IA64_UL_CONST(0x00000001101b0080)
+#define SH2_INT_CMPB __IA64_UL_CONST(0x00000000101b0080)
+#define SH_INT_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPB_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_INT_CMPB_REAL_TIME_CMPB */
+/* Description: Real Time Clock Compare */
+#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
+#define SH_INT_CMPB_REAL_TIME_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/* Register "SH_INT_CMPC" */
+/* RTC Compare Value for Processor C */
+/* ==================================================================== */
+
+#define SH1_INT_CMPC __IA64_UL_CONST(0x00000001101b0100)
+#define SH2_INT_CMPC __IA64_UL_CONST(0x00000000101b0100)
+#define SH_INT_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPC_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_INT_CMPC_REAL_TIME_CMPC */
+/* Description: Real Time Clock Compare */
+#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define SH_INT_CMPC_REAL_TIME_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/* Register "SH_INT_CMPD" */
+/* RTC Compare Value for Processor D */
+/* ==================================================================== */
+
+#define SH1_INT_CMPD __IA64_UL_CONST(0x00000001101b0180)
+#define SH2_INT_CMPD __IA64_UL_CONST(0x00000000101b0180)
+#define SH_INT_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
+#define SH_INT_CMPD_INIT __IA64_UL_CONST(0x0000000000000000)
+
+/* SH_INT_CMPD_REAL_TIME_CMPD */
+/* Description: Real Time Clock Compare */
+#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define SH_INT_CMPD_REAL_TIME_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
+
+/* ==================================================================== */
+/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */
+/* privilege vector for acc=0 */
+/* ==================================================================== */
+#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100030300)
+
+/* ==================================================================== */
+/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */
+/* privilege vector for acc=0 */
+/* ==================================================================== */
+#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100050300)
+
+/* ==================================================================== */
+/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
+/* and SHUB2 that it makes sense to define a geberic name for the MMR. */
+/* It is acceptible to use (for example) SH_IPI_INT to reference the */
+/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */
+/* on the type of the SHUB. Do not use these #defines in performance */
+/* critical code or loops - there is a small performance penalty. */
+/* ==================================================================== */
+#define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b)
+
+#define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0)
+#define SH_IPI_INT shubmmr(SH, IPI_INT)
+#define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED)
+#define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS)
+#define SH_RTC shubmmr(SH, RTC)
+#define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG)
+#define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE)
+#define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG)
+#define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE)
+#define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG)
+#define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE)
+#define SH_INT_CMPB shubmmr(SH, INT_CMPB)
+#define SH_INT_CMPC shubmmr(SH, INT_CMPC)
+#define SH_INT_CMPD shubmmr(SH, INT_CMPD)
+
+/* ========================================================================== */
+/* Register "SH2_BT_ENG_CSR_0" */
+/* Engine 0 Control and Status Register */
+/* ========================================================================== */
+
+#define SH2_BT_ENG_CSR_0 __IA64_UL_CONST(0x0000000030040000)
+#define SH2_BT_ENG_SRC_ADDR_0 __IA64_UL_CONST(0x0000000030040080)
+#define SH2_BT_ENG_DEST_ADDR_0 __IA64_UL_CONST(0x0000000030040100)
+#define SH2_BT_ENG_NOTIF_ADDR_0 __IA64_UL_CONST(0x0000000030040180)
+
+/* ========================================================================== */
+/* BTE interfaces 1-3 */
+/* ========================================================================== */
+
+#define SH2_BT_ENG_CSR_1 __IA64_UL_CONST(0x0000000030050000)
+#define SH2_BT_ENG_CSR_2 __IA64_UL_CONST(0x0000000030060000)
+#define SH2_BT_ENG_CSR_3 __IA64_UL_CONST(0x0000000030070000)
+
+#endif /* _ASM_IA64_SN_SHUB_MMR_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/shubio.h b/xen/include/asm-ia64/linux/asm/sn/shubio.h
new file mode 100644
index 0000000000..22a6f18a53
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/shubio.h
@@ -0,0 +1,3358 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SHUBIO_H
+#define _ASM_IA64_SN_SHUBIO_H
+
+#define HUB_WIDGET_ID_MAX 0xf
+#define IIO_NUM_ITTES 7
+#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
+
+#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */
+ /* This register is also accessible from
+ * Crosstalk at address 0x0. */
+#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */
+#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */
+#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */
+#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */
+#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */
+#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */
+#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */
+#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */
+#define IIO_ILLR 0x00400130 /* IO LLP Log Register */
+#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */
+
+#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */
+#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */
+
+#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */
+#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */
+
+#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */
+#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */
+#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */
+#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */
+#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */
+#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */
+#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */
+
+#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */
+#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */
+#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */
+#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */
+#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */
+#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */
+#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */
+#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */
+#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */
+
+#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */
+#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */
+#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */
+#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */
+#define IIO_IBCR 0x00400200 /* IO BTE Control Register */
+
+#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */
+#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */
+
+#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */
+
+#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */
+#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */
+
+#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */
+#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */
+
+#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */
+#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */
+#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */
+#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */
+#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */
+
+#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */
+
+#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */
+#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */
+#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */
+#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */
+#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */
+#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */
+#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */
+#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */
+
+#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */
+#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */
+#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */
+#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */
+#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */
+#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */
+#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */
+#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */
+
+#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */
+#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */
+#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */
+#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */
+#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */
+#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */
+#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */
+#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */
+
+#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */
+#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */
+#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */
+#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */
+#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */
+
+#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */
+#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */
+#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */
+#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */
+#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */
+
+#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */
+#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */
+#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */
+#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */
+#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */
+
+#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */
+#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */
+#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */
+#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */
+#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */
+
+#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */
+#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */
+#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */
+#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */
+#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */
+
+#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */
+#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */
+#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */
+#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */
+#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */
+
+#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */
+#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */
+#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */
+#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */
+#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */
+
+#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */
+#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */
+#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */
+#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */
+#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */
+
+#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */
+#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */
+#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */
+#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */
+#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */
+
+#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */
+#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */
+#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */
+#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */
+#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */
+
+#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */
+#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */
+#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */
+#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */
+#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */
+
+#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */
+#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */
+#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */
+#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */
+#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */
+
+#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */
+#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */
+#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */
+#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */
+#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */
+
+#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */
+#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */
+#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */
+#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */
+#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */
+
+#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */
+#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */
+#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */
+#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */
+#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */
+
+#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */
+#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */
+#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */
+
+#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */
+
+#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */
+#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */
+#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */
+#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */
+#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */
+#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */
+#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */
+#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */
+#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */
+#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */
+#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */
+#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */
+
+#define IIO_IPCR 0x00430000 /* IO Performance Control */
+#define IIO_IPPR 0x00430008 /* IO Performance Profiling */
+
+/************************************************************************
+ * *
+ * Description: This register echoes some information from the *
+ * LB_REV_ID register. It is available through Crosstalk as described *
+ * above. The REV_NUM and MFG_NUM fields receive their values from *
+ * the REVISION and MANUFACTURER fields in the LB_REV_ID register. *
+ * The PART_NUM field's value is the Crosstalk device ID number that *
+ * Steve Miller assigned to the SHub chip. *
+ * *
+ ************************************************************************/
+
+typedef union ii_wid_u {
+ u64 ii_wid_regval;
+ struct {
+ u64 w_rsvd_1:1;
+ u64 w_mfg_num:11;
+ u64 w_part_num:16;
+ u64 w_rev_num:4;
+ u64 w_rsvd:32;
+ } ii_wid_fld_s;
+} ii_wid_u_t;
+
+/************************************************************************
+ * *
+ * The fields in this register are set upon detection of an error *
+ * and cleared by various mechanisms, as explained in the *
+ * description. *
+ * *
+ ************************************************************************/
+
+typedef union ii_wstat_u {
+ u64 ii_wstat_regval;
+ struct {
+ u64 w_pending:4;
+ u64 w_xt_crd_to:1;
+ u64 w_xt_tail_to:1;
+ u64 w_rsvd_3:3;
+ u64 w_tx_mx_rty:1;
+ u64 w_rsvd_2:6;
+ u64 w_llp_tx_cnt:8;
+ u64 w_rsvd_1:8;
+ u64 w_crazy:1;
+ u64 w_rsvd:31;
+ } ii_wstat_fld_s;
+} ii_wstat_u_t;
+
+/************************************************************************
+ * *
+ * Description: This is a read-write enabled register. It controls *
+ * various aspects of the Crosstalk flow control. *
+ * *
+ ************************************************************************/
+
+typedef union ii_wcr_u {
+ u64 ii_wcr_regval;
+ struct {
+ u64 w_wid:4;
+ u64 w_tag:1;
+ u64 w_rsvd_1:8;
+ u64 w_dst_crd:3;
+ u64 w_f_bad_pkt:1;
+ u64 w_dir_con:1;
+ u64 w_e_thresh:5;
+ u64 w_rsvd:41;
+ } ii_wcr_fld_s;
+} ii_wcr_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register's value is a bit vector that guards *
+ * access to local registers within the II as well as to external *
+ * Crosstalk widgets. Each bit in the register corresponds to a *
+ * particular region in the system; a region consists of one, two or *
+ * four nodes (depending on the value of the REGION_SIZE field in the *
+ * LB_REV_ID register, which is documented in Section 8.3.1.1). The *
+ * protection provided by this register applies to PIO read *
+ * operations as well as PIO write operations. The II will perform a *
+ * PIO read or write request only if the bit for the requestor's *
+ * region is set; otherwise, the II will not perform the requested *
+ * operation and will return an error response. When a PIO read or *
+ * write request targets an external Crosstalk widget, then not only *
+ * must the bit for the requestor's region be set in the ILAPR, but *
+ * also the target widget's bit in the IOWA register must be set in *
+ * order for the II to perform the requested operation; otherwise, *
+ * the II will return an error response. Hence, the protection *
+ * provided by the IOWA register supplements the protection provided *
+ * by the ILAPR for requests that target external Crosstalk widgets. *
+ * This register itself can be accessed only by the nodes whose *
+ * region ID bits are enabled in this same register. It can also be *
+ * accessed through the IAlias space by the local processors. *
+ * The reset value of this register allows access by all nodes. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ilapr_u {
+ u64 ii_ilapr_regval;
+ struct {
+ u64 i_region:64;
+ } ii_ilapr_fld_s;
+} ii_ilapr_u_t;
+
+/************************************************************************
+ * *
+ * Description: A write to this register of the 64-bit value *
+ * "SGIrules" in ASCII, will cause the bit in the ILAPR register *
+ * corresponding to the region of the requestor to be set (allow *
+ * access). A write of any other value will be ignored. Access *
+ * protection for this register is "SGIrules". *
+ * This register can also be accessed through the IAlias space. *
+ * However, this access will not change the access permissions in the *
+ * ILAPR. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ilapo_u {
+ u64 ii_ilapo_regval;
+ struct {
+ u64 i_io_ovrride:64;
+ } ii_ilapo_fld_s;
+} ii_ilapo_u_t;
+
+/************************************************************************
+ * *
+ * This register qualifies all the PIO and Graphics writes launched *
+ * from the SHUB towards a widget. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iowa_u {
+ u64 ii_iowa_regval;
+ struct {
+ u64 i_w0_oac:1;
+ u64 i_rsvd_1:7;
+ u64 i_wx_oac:8;
+ u64 i_rsvd:48;
+ } ii_iowa_fld_s;
+} ii_iowa_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register qualifies all the requests launched *
+ * from a widget towards the Shub. This register is intended to be *
+ * used by software in case of misbehaving widgets. *
+ * *
+ * *
+ ************************************************************************/
+
+typedef union ii_iiwa_u {
+ u64 ii_iiwa_regval;
+ struct {
+ u64 i_w0_iac:1;
+ u64 i_rsvd_1:7;
+ u64 i_wx_iac:8;
+ u64 i_rsvd:48;
+ } ii_iiwa_fld_s;
+} ii_iiwa_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register qualifies all the operations launched *
+ * from a widget towards the SHub. It allows individual access *
+ * control for up to 8 devices per widget. A device refers to *
+ * individual DMA master hosted by a widget. *
+ * The bits in each field of this register are cleared by the Shub *
+ * upon detection of an error which requires the device to be *
+ * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric *
+ * Crosstalk). Whether or not a device has access rights to this *
+ * Shub is determined by an AND of the device enable bit in the *
+ * appropriate field of this register and the corresponding bit in *
+ * the Wx_IAC field (for the widget which this device belongs to). *
+ * The bits in this field are set by writing a 1 to them. Incoming *
+ * replies from Crosstalk are not subject to this access control *
+ * mechanism. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iidem_u {
+ u64 ii_iidem_regval;
+ struct {
+ u64 i_w8_dxs:8;
+ u64 i_w9_dxs:8;
+ u64 i_wa_dxs:8;
+ u64 i_wb_dxs:8;
+ u64 i_wc_dxs:8;
+ u64 i_wd_dxs:8;
+ u64 i_we_dxs:8;
+ u64 i_wf_dxs:8;
+ } ii_iidem_fld_s;
+} ii_iidem_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the various programmable fields necessary *
+ * for controlling and observing the LLP signals. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ilcsr_u {
+ u64 ii_ilcsr_regval;
+ struct {
+ u64 i_nullto:6;
+ u64 i_rsvd_4:2;
+ u64 i_wrmrst:1;
+ u64 i_rsvd_3:1;
+ u64 i_llp_en:1;
+ u64 i_bm8:1;
+ u64 i_llp_stat:2;
+ u64 i_remote_power:1;
+ u64 i_rsvd_2:1;
+ u64 i_maxrtry:10;
+ u64 i_d_avail_sel:2;
+ u64 i_rsvd_1:4;
+ u64 i_maxbrst:10;
+ u64 i_rsvd:22;
+
+ } ii_ilcsr_fld_s;
+} ii_ilcsr_u_t;
+
+/************************************************************************
+ * *
+ * This is simply a status registers that monitors the LLP error *
+ * rate. *
+ * *
+ ************************************************************************/
+
+typedef union ii_illr_u {
+ u64 ii_illr_regval;
+ struct {
+ u64 i_sn_cnt:16;
+ u64 i_cb_cnt:16;
+ u64 i_rsvd:32;
+ } ii_illr_fld_s;
+} ii_illr_u_t;
+
+/************************************************************************
+ * *
+ * Description: All II-detected non-BTE error interrupts are *
+ * specified via this register. *
+ * NOTE: The PI interrupt register address is hardcoded in the II. If *
+ * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI *
+ * packet) to address offset 0x0180_0090 within the local register *
+ * address space of PI0 on the node specified by the NODE field. If *
+ * PI_ID==1, then the II sends the interrupt request to address *
+ * offset 0x01A0_0090 within the local register address space of PI1 *
+ * on the node specified by the NODE field. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iidsr_u {
+ u64 ii_iidsr_regval;
+ struct {
+ u64 i_level:8;
+ u64 i_pi_id:1;
+ u64 i_node:11;
+ u64 i_rsvd_3:4;
+ u64 i_enable:1;
+ u64 i_rsvd_2:3;
+ u64 i_int_sent:2;
+ u64 i_rsvd_1:2;
+ u64 i_pi0_forward_int:1;
+ u64 i_pi1_forward_int:1;
+ u64 i_rsvd:30;
+ } ii_iidsr_fld_s;
+} ii_iidsr_u_t;
+
+/************************************************************************
+ * *
+ * There are two instances of this register. This register is used *
+ * for matching up the incoming responses from the graphics widget to *
+ * the processor that initiated the graphics operation. The *
+ * write-responses are converted to graphics credits and returned to *
+ * the processor so that the processor interface can manage the flow *
+ * control. *
+ * *
+ ************************************************************************/
+
+typedef union ii_igfx0_u {
+ u64 ii_igfx0_regval;
+ struct {
+ u64 i_w_num:4;
+ u64 i_pi_id:1;
+ u64 i_n_num:12;
+ u64 i_p_num:1;
+ u64 i_rsvd:46;
+ } ii_igfx0_fld_s;
+} ii_igfx0_u_t;
+
+/************************************************************************
+ * *
+ * There are two instances of this register. This register is used *
+ * for matching up the incoming responses from the graphics widget to *
+ * the processor that initiated the graphics operation. The *
+ * write-responses are converted to graphics credits and returned to *
+ * the processor so that the processor interface can manage the flow *
+ * control. *
+ * *
+ ************************************************************************/
+
+typedef union ii_igfx1_u {
+ u64 ii_igfx1_regval;
+ struct {
+ u64 i_w_num:4;
+ u64 i_pi_id:1;
+ u64 i_n_num:12;
+ u64 i_p_num:1;
+ u64 i_rsvd:46;
+ } ii_igfx1_fld_s;
+} ii_igfx1_u_t;
+
+/************************************************************************
+ * *
+ * There are two instances of this registers. These registers are *
+ * used as scratch registers for software use. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iscr0_u {
+ u64 ii_iscr0_regval;
+ struct {
+ u64 i_scratch:64;
+ } ii_iscr0_fld_s;
+} ii_iscr0_u_t;
+
+/************************************************************************
+ * *
+ * There are two instances of this registers. These registers are *
+ * used as scratch registers for software use. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iscr1_u {
+ u64 ii_iscr1_regval;
+ struct {
+ u64 i_scratch:64;
+ } ii_iscr1_fld_s;
+} ii_iscr1_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the SHub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the Shub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte1_u {
+ u64 ii_itte1_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte1_fld_s;
+} ii_itte1_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the Shub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte2_u {
+ u64 ii_itte2_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte2_fld_s;
+} ii_itte2_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the SHub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte3_u {
+ u64 ii_itte3_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte3_fld_s;
+} ii_itte3_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a SHub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the SHub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the SHub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte4_u {
+ u64 ii_itte4_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte4_fld_s;
+} ii_itte4_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a SHub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the Shub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte5_u {
+ u64 ii_itte5_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte5_fld_s;
+} ii_itte5_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the Shub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte6_u {
+ u64 ii_itte6_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte6_fld_s;
+} ii_itte6_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are seven instances of translation table entry *
+ * registers. Each register maps a Shub Big Window to a 48-bit *
+ * address on Crosstalk. *
+ * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
+ * number) are used to select one of these 7 registers. The Widget *
+ * number field is then derived from the W_NUM field for synthesizing *
+ * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
+ * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
+ * are padded with zeros. Although the maximum Crosstalk space *
+ * addressable by the Shub is thus the lower 16 GBytes per widget *
+ * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
+ * space can be accessed. *
+ * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
+ * Window number) are used to select one of these 7 registers. The *
+ * Widget number field is then derived from the W_NUM field for *
+ * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
+ * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
+ * field is used as Crosstalk[47], and remainder of the Crosstalk *
+ * address bits (Crosstalk[46:34]) are always zero. While the maximum *
+ * Crosstalk space addressable by the SHub is thus the lower *
+ * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
+ * of this space can be accessed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_itte7_u {
+ u64 ii_itte7_regval;
+ struct {
+ u64 i_offset:5;
+ u64 i_rsvd_1:3;
+ u64 i_w_num:4;
+ u64 i_iosp:1;
+ u64 i_rsvd:51;
+ } ii_itte7_fld_s;
+} ii_itte7_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprb0_u {
+ u64 ii_iprb0_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprb0_fld_s;
+} ii_iprb0_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprb8_u {
+ u64 ii_iprb8_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprb8_fld_s;
+} ii_iprb8_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprb9_u {
+ u64 ii_iprb9_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprb9_fld_s;
+} ii_iprb9_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprba_u {
+ u64 ii_iprba_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprba_fld_s;
+} ii_iprba_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbb_u {
+ u64 ii_iprbb_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbb_fld_s;
+} ii_iprbb_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbc_u {
+ u64 ii_iprbc_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbc_fld_s;
+} ii_iprbc_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbd_u {
+ u64 ii_iprbd_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbd_fld_s;
+} ii_iprbd_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of SHub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbe_u {
+ u64 ii_iprbe_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbe_fld_s;
+} ii_iprbe_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 9 instances of this register, one per *
+ * actual widget in this implementation of Shub and Crossbow. *
+ * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
+ * refers to Crossbow's internal space. *
+ * This register contains the state elements per widget that are *
+ * necessary to manage the PIO flow control on Crosstalk and on the *
+ * Router Network. See the PIO Flow Control chapter for a complete *
+ * description of this register *
+ * The SPUR_WR bit requires some explanation. When this register is *
+ * written, the new value of the C field is captured in an internal *
+ * register so the hardware can remember what the programmer wrote *
+ * into the credit counter. The SPUR_WR bit sets whenever the C field *
+ * increments above this stored value, which indicates that there *
+ * have been more responses received than requests sent. The SPUR_WR *
+ * bit cannot be cleared until a value is written to the IPRBx *
+ * register; the write will correct the C field and capture its new *
+ * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
+ * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
+ * . *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprbf_u {
+ u64 ii_iprbf_regval;
+ struct {
+ u64 i_c:8;
+ u64 i_na:14;
+ u64 i_rsvd_2:2;
+ u64 i_nb:14;
+ u64 i_rsvd_1:2;
+ u64 i_m:2;
+ u64 i_f:1;
+ u64 i_of_cnt:5;
+ u64 i_error:1;
+ u64 i_rd_to:1;
+ u64 i_spur_wr:1;
+ u64 i_spur_rd:1;
+ u64 i_rsvd:11;
+ u64 i_mult_err:1;
+ } ii_iprbe_fld_s;
+} ii_iprbf_u_t;
+
+/************************************************************************
+ * *
+ * This register specifies the timeout value to use for monitoring *
+ * Crosstalk credits which are used outbound to Crosstalk. An *
+ * internal counter called the Crosstalk Credit Timeout Counter *
+ * increments every 128 II clocks. The counter starts counting *
+ * anytime the credit count drops below a threshold, and resets to *
+ * zero (stops counting) anytime the credit count is at or above the *
+ * threshold. The threshold is 1 credit in direct connect mode and 2 *
+ * in Crossbow connect mode. When the internal Crosstalk Credit *
+ * Timeout Counter reaches the value programmed in this register, a *
+ * Crosstalk Credit Timeout has occurred. The internal counter is not *
+ * readable from software, and stops counting at its maximum value, *
+ * so it cannot cause more than one interrupt. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ixcc_u {
+ u64 ii_ixcc_regval;
+ struct {
+ u64 i_time_out:26;
+ u64 i_rsvd:38;
+ } ii_ixcc_fld_s;
+} ii_ixcc_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register qualifies all the PIO and DMA *
+ * operations launched from widget 0 towards the SHub. In *
+ * addition, it also qualifies accesses by the BTE streams. *
+ * The bits in each field of this register are cleared by the SHub *
+ * upon detection of an error which requires widget 0 or the BTE *
+ * streams to be terminated. Whether or not widget x has access *
+ * rights to this SHub is determined by an AND of the device *
+ * enable bit in the appropriate field of this register and bit 0 in *
+ * the Wx_IAC field. The bits in this field are set by writing a 1 to *
+ * them. Incoming replies from Crosstalk are not subject to this *
+ * access control mechanism. *
+ * *
+ ************************************************************************/
+
+typedef union ii_imem_u {
+ u64 ii_imem_regval;
+ struct {
+ u64 i_w0_esd:1;
+ u64 i_rsvd_3:3;
+ u64 i_b0_esd:1;
+ u64 i_rsvd_2:3;
+ u64 i_b1_esd:1;
+ u64 i_rsvd_1:3;
+ u64 i_clr_precise:1;
+ u64 i_rsvd:51;
+ } ii_imem_fld_s;
+} ii_imem_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register specifies the timeout value to use for *
+ * monitoring Crosstalk tail flits coming into the Shub in the *
+ * TAIL_TO field. An internal counter associated with this register *
+ * is incremented every 128 II internal clocks (7 bits). The counter *
+ * starts counting anytime a header micropacket is received and stops *
+ * counting (and resets to zero) any time a micropacket with a Tail *
+ * bit is received. Once the counter reaches the threshold value *
+ * programmed in this register, it generates an interrupt to the *
+ * processor that is programmed into the IIDSR. The counter saturates *
+ * (does not roll over) at its maximum value, so it cannot cause *
+ * another interrupt until after it is cleared. *
+ * The register also contains the Read Response Timeout values. The *
+ * Prescalar is 23 bits, and counts II clocks. An internal counter *
+ * increments on every II clock and when it reaches the value in the *
+ * Prescalar field, all IPRTE registers with their valid bits set *
+ * have their Read Response timers bumped. Whenever any of them match *
+ * the value in the RRSP_TO field, a Read Response Timeout has *
+ * occurred, and error handling occurs as described in the Error *
+ * Handling section of this document. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ixtt_u {
+ u64 ii_ixtt_regval;
+ struct {
+ u64 i_tail_to:26;
+ u64 i_rsvd_1:6;
+ u64 i_rrsp_ps:23;
+ u64 i_rrsp_to:5;
+ u64 i_rsvd:4;
+ } ii_ixtt_fld_s;
+} ii_ixtt_u_t;
+
+/************************************************************************
+ * *
+ * Writing a 1 to the fields of this register clears the appropriate *
+ * error bits in other areas of SHub. Note that when the *
+ * E_PRB_x bits are used to clear error bits in PRB registers, *
+ * SPUR_RD and SPUR_WR may persist, because they require additional *
+ * action to clear them. See the IPRBx and IXSS Register *
+ * specifications. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ieclr_u {
+ u64 ii_ieclr_regval;
+ struct {
+ u64 i_e_prb_0:1;
+ u64 i_rsvd:7;
+ u64 i_e_prb_8:1;
+ u64 i_e_prb_9:1;
+ u64 i_e_prb_a:1;
+ u64 i_e_prb_b:1;
+ u64 i_e_prb_c:1;
+ u64 i_e_prb_d:1;
+ u64 i_e_prb_e:1;
+ u64 i_e_prb_f:1;
+ u64 i_e_crazy:1;
+ u64 i_e_bte_0:1;
+ u64 i_e_bte_1:1;
+ u64 i_reserved_1:10;
+ u64 i_spur_rd_hdr:1;
+ u64 i_cam_intr_to:1;
+ u64 i_cam_overflow:1;
+ u64 i_cam_read_miss:1;
+ u64 i_ioq_rep_underflow:1;
+ u64 i_ioq_req_underflow:1;
+ u64 i_ioq_rep_overflow:1;
+ u64 i_ioq_req_overflow:1;
+ u64 i_iiq_rep_overflow:1;
+ u64 i_iiq_req_overflow:1;
+ u64 i_ii_xn_rep_cred_overflow:1;
+ u64 i_ii_xn_req_cred_overflow:1;
+ u64 i_ii_xn_invalid_cmd:1;
+ u64 i_xn_ii_invalid_cmd:1;
+ u64 i_reserved_2:21;
+ } ii_ieclr_fld_s;
+} ii_ieclr_u_t;
+
+/************************************************************************
+ * *
+ * This register controls both BTEs. SOFT_RESET is intended for *
+ * recovery after an error. COUNT controls the total number of CRBs *
+ * that both BTEs (combined) can use, which affects total BTE *
+ * bandwidth. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibcr_u {
+ u64 ii_ibcr_regval;
+ struct {
+ u64 i_count:4;
+ u64 i_rsvd_1:4;
+ u64 i_soft_reset:1;
+ u64 i_rsvd:55;
+ } ii_ibcr_fld_s;
+} ii_ibcr_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the header of a spurious read response *
+ * received from Crosstalk. A spurious read response is defined as a *
+ * read response received by II from a widget for which (1) the SIDN *
+ * has a value between 1 and 7, inclusive (II never sends requests to *
+ * these widgets (2) there is no valid IPRTE register which *
+ * corresponds to the TNUM, or (3) the widget indicated in SIDN is *
+ * not the same as the widget recorded in the IPRTE register *
+ * referenced by the TNUM. If this condition is true, and if the *
+ * IXSS[VALID] bit is clear, then the header of the spurious read *
+ * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The *
+ * errant header is thereby captured, and no further spurious read *
+ * respones are captured until IXSS[VALID] is cleared by setting the *
+ * appropriate bit in IECLR.Everytime a spurious read response is *
+ * detected, the SPUR_RD bit of the PRB corresponding to the incoming *
+ * message's SIDN field is set. This always happens, regarless of *
+ * whether a header is captured. The programmer should check *
+ * IXSM[SIDN] to determine which widget sent the spurious response, *
+ * because there may be more than one SPUR_RD bit set in the PRB *
+ * registers. The widget indicated by IXSM[SIDN] was the first *
+ * spurious read response to be received since the last time *
+ * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB *
+ * will be set. Any SPUR_RD bits in any other PRB registers indicate *
+ * spurious messages from other widets which were detected after the *
+ * header was captured.. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ixsm_u {
+ u64 ii_ixsm_regval;
+ struct {
+ u64 i_byte_en:32;
+ u64 i_reserved:1;
+ u64 i_tag:3;
+ u64 i_alt_pactyp:4;
+ u64 i_bo:1;
+ u64 i_error:1;
+ u64 i_vbpm:1;
+ u64 i_gbr:1;
+ u64 i_ds:2;
+ u64 i_ct:1;
+ u64 i_tnum:5;
+ u64 i_pactyp:4;
+ u64 i_sidn:4;
+ u64 i_didn:4;
+ } ii_ixsm_fld_s;
+} ii_ixsm_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the sideband bits of a spurious read *
+ * response received from Crosstalk. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ixss_u {
+ u64 ii_ixss_regval;
+ struct {
+ u64 i_sideband:8;
+ u64 i_rsvd:55;
+ u64 i_valid:1;
+ } ii_ixss_fld_s;
+} ii_ixss_u_t;
+
+/************************************************************************
+ * *
+ * This register enables software to access the II LLP's test port. *
+ * Refer to the LLP 2.5 documentation for an explanation of the test *
+ * port. Software can write to this register to program the values *
+ * for the control fields (TestErrCapture, TestClear, TestFlit, *
+ * TestMask and TestSeed). Similarly, software can read from this *
+ * register to obtain the values of the test port's status outputs *
+ * (TestCBerr, TestValid and TestData). *
+ * *
+ ************************************************************************/
+
+typedef union ii_ilct_u {
+ u64 ii_ilct_regval;
+ struct {
+ u64 i_test_seed:20;
+ u64 i_test_mask:8;
+ u64 i_test_data:20;
+ u64 i_test_valid:1;
+ u64 i_test_cberr:1;
+ u64 i_test_flit:3;
+ u64 i_test_clear:1;
+ u64 i_test_err_capture:1;
+ u64 i_rsvd:9;
+ } ii_ilct_fld_s;
+} ii_ilct_u_t;
+
+/************************************************************************
+ * *
+ * If the II detects an illegal incoming Duplonet packet (request or *
+ * reply) when VALID==0 in the IIEPH1 register, then it saves the *
+ * contents of the packet's header flit in the IIEPH1 and IIEPH2 *
+ * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit, *
+ * and assigns a value to the ERR_TYPE field which indicates the *
+ * specific nature of the error. The II recognizes four different *
+ * types of errors: short request packets (ERR_TYPE==2), short reply *
+ * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long *
+ * reply packets (ERR_TYPE==5). The encodings for these types of *
+ * errors were chosen to be consistent with the same types of errors *
+ * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in *
+ * the LB unit). If the II detects an illegal incoming Duplonet *
+ * packet when VALID==1 in the IIEPH1 register, then it merely sets *
+ * the OVERRUN bit to indicate that a subsequent error has happened, *
+ * and does nothing further. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iieph1_u {
+ u64 ii_iieph1_regval;
+ struct {
+ u64 i_command:7;
+ u64 i_rsvd_5:1;
+ u64 i_suppl:14;
+ u64 i_rsvd_4:1;
+ u64 i_source:14;
+ u64 i_rsvd_3:1;
+ u64 i_err_type:4;
+ u64 i_rsvd_2:4;
+ u64 i_overrun:1;
+ u64 i_rsvd_1:3;
+ u64 i_valid:1;
+ u64 i_rsvd:13;
+ } ii_iieph1_fld_s;
+} ii_iieph1_u_t;
+
+/************************************************************************
+ * *
+ * This register holds the Address field from the header flit of an *
+ * incoming erroneous Duplonet packet, along with the tail bit which *
+ * accompanied this header flit. This register is essentially an *
+ * extension of IIEPH1. Two registers were necessary because the 64 *
+ * bits available in only a single register were insufficient to *
+ * capture the entire header flit of an erroneous packet. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iieph2_u {
+ u64 ii_iieph2_regval;
+ struct {
+ u64 i_rsvd_0:3;
+ u64 i_address:47;
+ u64 i_rsvd_1:10;
+ u64 i_tail:1;
+ u64 i_rsvd:3;
+ } ii_iieph2_fld_s;
+} ii_iieph2_u_t;
+
+/******************************/
+
+/************************************************************************
+ * *
+ * This register's value is a bit vector that guards access from SXBs *
+ * to local registers within the II as well as to external Crosstalk *
+ * widgets *
+ * *
+ ************************************************************************/
+
+typedef union ii_islapr_u {
+ u64 ii_islapr_regval;
+ struct {
+ u64 i_region:64;
+ } ii_islapr_fld_s;
+} ii_islapr_u_t;
+
+/************************************************************************
+ * *
+ * A write to this register of the 56-bit value "Pup+Bun" will cause *
+ * the bit in the ISLAPR register corresponding to the region of the *
+ * requestor to be set (access allowed). (
+ * *
+ ************************************************************************/
+
+typedef union ii_islapo_u {
+ u64 ii_islapo_regval;
+ struct {
+ u64 i_io_sbx_ovrride:56;
+ u64 i_rsvd:8;
+ } ii_islapo_fld_s;
+} ii_islapo_u_t;
+
+/************************************************************************
+ * *
+ * Determines how long the wrapper will wait aftr an interrupt is *
+ * initially issued from the II before it times out the outstanding *
+ * interrupt and drops it from the interrupt queue. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iwi_u {
+ u64 ii_iwi_regval;
+ struct {
+ u64 i_prescale:24;
+ u64 i_rsvd:8;
+ u64 i_timeout:8;
+ u64 i_rsvd1:8;
+ u64 i_intrpt_retry_period:8;
+ u64 i_rsvd2:8;
+ } ii_iwi_fld_s;
+} ii_iwi_u_t;
+
+/************************************************************************
+ * *
+ * Log errors which have occurred in the II wrapper. The errors are *
+ * cleared by writing to the IECLR register. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iwel_u {
+ u64 ii_iwel_regval;
+ struct {
+ u64 i_intr_timed_out:1;
+ u64 i_rsvd:7;
+ u64 i_cam_overflow:1;
+ u64 i_cam_read_miss:1;
+ u64 i_rsvd1:2;
+ u64 i_ioq_rep_underflow:1;
+ u64 i_ioq_req_underflow:1;
+ u64 i_ioq_rep_overflow:1;
+ u64 i_ioq_req_overflow:1;
+ u64 i_iiq_rep_overflow:1;
+ u64 i_iiq_req_overflow:1;
+ u64 i_rsvd2:6;
+ u64 i_ii_xn_rep_cred_over_under:1;
+ u64 i_ii_xn_req_cred_over_under:1;
+ u64 i_rsvd3:6;
+ u64 i_ii_xn_invalid_cmd:1;
+ u64 i_xn_ii_invalid_cmd:1;
+ u64 i_rsvd4:30;
+ } ii_iwel_fld_s;
+} ii_iwel_u_t;
+
+/************************************************************************
+ * *
+ * Controls the II wrapper. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iwc_u {
+ u64 ii_iwc_regval;
+ struct {
+ u64 i_dma_byte_swap:1;
+ u64 i_rsvd:3;
+ u64 i_cam_read_lines_reset:1;
+ u64 i_rsvd1:3;
+ u64 i_ii_xn_cred_over_under_log:1;
+ u64 i_rsvd2:19;
+ u64 i_xn_rep_iq_depth:5;
+ u64 i_rsvd3:3;
+ u64 i_xn_req_iq_depth:5;
+ u64 i_rsvd4:3;
+ u64 i_iiq_depth:6;
+ u64 i_rsvd5:12;
+ u64 i_force_rep_cred:1;
+ u64 i_force_req_cred:1;
+ } ii_iwc_fld_s;
+} ii_iwc_u_t;
+
+/************************************************************************
+ * *
+ * Status in the II wrapper. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iws_u {
+ u64 ii_iws_regval;
+ struct {
+ u64 i_xn_rep_iq_credits:5;
+ u64 i_rsvd:3;
+ u64 i_xn_req_iq_credits:5;
+ u64 i_rsvd1:51;
+ } ii_iws_fld_s;
+} ii_iws_u_t;
+
+/************************************************************************
+ * *
+ * Masks errors in the IWEL register. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iweim_u {
+ u64 ii_iweim_regval;
+ struct {
+ u64 i_intr_timed_out:1;
+ u64 i_rsvd:7;
+ u64 i_cam_overflow:1;
+ u64 i_cam_read_miss:1;
+ u64 i_rsvd1:2;
+ u64 i_ioq_rep_underflow:1;
+ u64 i_ioq_req_underflow:1;
+ u64 i_ioq_rep_overflow:1;
+ u64 i_ioq_req_overflow:1;
+ u64 i_iiq_rep_overflow:1;
+ u64 i_iiq_req_overflow:1;
+ u64 i_rsvd2:6;
+ u64 i_ii_xn_rep_cred_overflow:1;
+ u64 i_ii_xn_req_cred_overflow:1;
+ u64 i_rsvd3:6;
+ u64 i_ii_xn_invalid_cmd:1;
+ u64 i_xn_ii_invalid_cmd:1;
+ u64 i_rsvd4:30;
+ } ii_iweim_fld_s;
+} ii_iweim_u_t;
+
+/************************************************************************
+ * *
+ * A write to this register causes a particular field in the *
+ * corresponding widget's PRB entry to be adjusted up or down by 1. *
+ * This counter should be used when recovering from error and reset *
+ * conditions. Note that software would be capable of causing *
+ * inadvertent overflow or underflow of these counters. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ipca_u {
+ u64 ii_ipca_regval;
+ struct {
+ u64 i_wid:4;
+ u64 i_adjust:1;
+ u64 i_rsvd_1:3;
+ u64 i_field:2;
+ u64 i_rsvd:54;
+ } ii_ipca_fld_s;
+} ii_ipca_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte0a_u {
+ u64 ii_iprte0a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte0a_fld_s;
+} ii_iprte0a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte1a_u {
+ u64 ii_iprte1a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte1a_fld_s;
+} ii_iprte1a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte2a_u {
+ u64 ii_iprte2a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte2a_fld_s;
+} ii_iprte2a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte3a_u {
+ u64 ii_iprte3a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte3a_fld_s;
+} ii_iprte3a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte4a_u {
+ u64 ii_iprte4a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte4a_fld_s;
+} ii_iprte4a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte5a_u {
+ u64 ii_iprte5a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte5a_fld_s;
+} ii_iprte5a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte6a_u {
+ u64 ii_iprte6a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprte6a_fld_s;
+} ii_iprte6a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte7a_u {
+ u64 ii_iprte7a_regval;
+ struct {
+ u64 i_rsvd_1:54;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } ii_iprtea7_fld_s;
+} ii_iprte7a_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte0b_u {
+ u64 ii_iprte0b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte0b_fld_s;
+} ii_iprte0b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte1b_u {
+ u64 ii_iprte1b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte1b_fld_s;
+} ii_iprte1b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte2b_u {
+ u64 ii_iprte2b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte2b_fld_s;
+} ii_iprte2b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte3b_u {
+ u64 ii_iprte3b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte3b_fld_s;
+} ii_iprte3b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte4b_u {
+ u64 ii_iprte4b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte4b_fld_s;
+} ii_iprte4b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte5b_u {
+ u64 ii_iprte5b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte5b_fld_s;
+} ii_iprte5b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte6b_u {
+ u64 ii_iprte6b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+
+ } ii_iprte6b_fld_s;
+} ii_iprte6b_u_t;
+
+/************************************************************************
+ * *
+ * There are 8 instances of this register. This register contains *
+ * the information that the II has to remember once it has launched a *
+ * PIO Read operation. The contents are used to form the correct *
+ * Router Network packet and direct the Crosstalk reply to the *
+ * appropriate processor. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iprte7b_u {
+ u64 ii_iprte7b_regval;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_address:47;
+ u64 i_init:3;
+ u64 i_source:11;
+ } ii_iprte7b_fld_s;
+} ii_iprte7b_u_t;
+
+/************************************************************************
+ * *
+ * Description: SHub II contains a feature which did not exist in *
+ * the Hub which automatically cleans up after a Read Response *
+ * timeout, including deallocation of the IPRTE and recovery of IBuf *
+ * space. The inclusion of this register in SHub is for backward *
+ * compatibility *
+ * A write to this register causes an entry from the table of *
+ * outstanding PIO Read Requests to be freed and returned to the *
+ * stack of free entries. This register is used in handling the *
+ * timeout errors that result in a PIO Reply never returning from *
+ * Crosstalk. *
+ * Note that this register does not affect the contents of the IPRTE *
+ * registers. The Valid bits in those registers have to be *
+ * specifically turned off by software. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ipdr_u {
+ u64 ii_ipdr_regval;
+ struct {
+ u64 i_te:3;
+ u64 i_rsvd_1:1;
+ u64 i_pnd:1;
+ u64 i_init_rpcnt:1;
+ u64 i_rsvd:58;
+ } ii_ipdr_fld_s;
+} ii_ipdr_u_t;
+
+/************************************************************************
+ * *
+ * A write to this register causes a CRB entry to be returned to the *
+ * queue of free CRBs. The entry should have previously been cleared *
+ * (mark bit) via backdoor access to the pertinent CRB entry. This *
+ * register is used in the last step of handling the errors that are *
+ * captured and marked in CRB entries. Briefly: 1) first error for *
+ * DMA write from a particular device, and first error for a *
+ * particular BTE stream, lead to a marked CRB entry, and processor *
+ * interrupt, 2) software reads the error information captured in the *
+ * CRB entry, and presumably takes some corrective action, 3) *
+ * software clears the mark bit, and finally 4) software writes to *
+ * the ICDR register to return the CRB entry to the list of free CRB *
+ * entries. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icdr_u {
+ u64 ii_icdr_regval;
+ struct {
+ u64 i_crb_num:4;
+ u64 i_pnd:1;
+ u64 i_rsvd:59;
+ } ii_icdr_fld_s;
+} ii_icdr_u_t;
+
+/************************************************************************
+ * *
+ * This register provides debug access to two FIFOs inside of II. *
+ * Both IOQ_MAX* fields of this register contain the instantaneous *
+ * depth (in units of the number of available entries) of the *
+ * associated IOQ FIFO. A read of this register will return the *
+ * number of free entries on each FIFO at the time of the read. So *
+ * when a FIFO is idle, the associated field contains the maximum *
+ * depth of the FIFO. This register is writable for debug reasons *
+ * and is intended to be written with the maximum desired FIFO depth *
+ * while the FIFO is idle. Software must assure that II is idle when *
+ * this register is written. If there are any active entries in any *
+ * of these FIFOs when this register is written, the results are *
+ * undefined. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ifdr_u {
+ u64 ii_ifdr_regval;
+ struct {
+ u64 i_ioq_max_rq:7;
+ u64 i_set_ioq_rq:1;
+ u64 i_ioq_max_rp:7;
+ u64 i_set_ioq_rp:1;
+ u64 i_rsvd:48;
+ } ii_ifdr_fld_s;
+} ii_ifdr_u_t;
+
+/************************************************************************
+ * *
+ * This register allows the II to become sluggish in removing *
+ * messages from its inbound queue (IIQ). This will cause messages to *
+ * back up in either virtual channel. Disabling the "molasses" mode *
+ * subsequently allows the II to be tested under stress. In the *
+ * sluggish ("Molasses") mode, the localized effects of congestion *
+ * can be observed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iiap_u {
+ u64 ii_iiap_regval;
+ struct {
+ u64 i_rq_mls:6;
+ u64 i_rsvd_1:2;
+ u64 i_rp_mls:6;
+ u64 i_rsvd:50;
+ } ii_iiap_fld_s;
+} ii_iiap_u_t;
+
+/************************************************************************
+ * *
+ * This register allows several parameters of CRB operation to be *
+ * set. Note that writing to this register can have catastrophic side *
+ * effects, if the CRB is not quiescent, i.e. if the CRB is *
+ * processing protocol messages when the write occurs. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icmr_u {
+ u64 ii_icmr_regval;
+ struct {
+ u64 i_sp_msg:1;
+ u64 i_rd_hdr:1;
+ u64 i_rsvd_4:2;
+ u64 i_c_cnt:4;
+ u64 i_rsvd_3:4;
+ u64 i_clr_rqpd:1;
+ u64 i_clr_rppd:1;
+ u64 i_rsvd_2:2;
+ u64 i_fc_cnt:4;
+ u64 i_crb_vld:15;
+ u64 i_crb_mark:15;
+ u64 i_rsvd_1:2;
+ u64 i_precise:1;
+ u64 i_rsvd:11;
+ } ii_icmr_fld_s;
+} ii_icmr_u_t;
+
+/************************************************************************
+ * *
+ * This register allows control of the table portion of the CRB *
+ * logic via software. Control operations from this register have *
+ * priority over all incoming Crosstalk or BTE requests. *
+ * *
+ ************************************************************************/
+
+typedef union ii_iccr_u {
+ u64 ii_iccr_regval;
+ struct {
+ u64 i_crb_num:4;
+ u64 i_rsvd_1:4;
+ u64 i_cmd:8;
+ u64 i_pending:1;
+ u64 i_rsvd:47;
+ } ii_iccr_fld_s;
+} ii_iccr_u_t;
+
+/************************************************************************
+ * *
+ * This register allows the maximum timeout value to be programmed. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icto_u {
+ u64 ii_icto_regval;
+ struct {
+ u64 i_timeout:8;
+ u64 i_rsvd:56;
+ } ii_icto_fld_s;
+} ii_icto_u_t;
+
+/************************************************************************
+ * *
+ * This register allows the timeout prescalar to be programmed. An *
+ * internal counter is associated with this register. When the *
+ * internal counter reaches the value of the PRESCALE field, the *
+ * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] *
+ * field). The internal counter resets to zero, and then continues *
+ * counting. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ictp_u {
+ u64 ii_ictp_regval;
+ struct {
+ u64 i_prescale:24;
+ u64 i_rsvd:40;
+ } ii_ictp_fld_s;
+} ii_ictp_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * The CRB Entry registers can be conceptualized as rows and columns *
+ * (illustrated in the table above). Each row contains the 4 *
+ * registers required for a single CRB Entry. The first doubleword *
+ * (column) for each entry is labeled A, and the second doubleword *
+ * (higher address) is labeled B, the third doubleword is labeled C, *
+ * the fourth doubleword is labeled D and the fifth doubleword is *
+ * labeled E. All CRB entries have their addresses on a quarter *
+ * cacheline aligned boundary. *
+ * Upon reset, only the following fields are initialized: valid *
+ * (VLD), priority count, timeout, timeout valid, and context valid. *
+ * All other bits should be cleared by software before use (after *
+ * recovering any potential error state from before the reset). *
+ * The following four tables summarize the format for the four *
+ * registers that are used for each ICRB# Entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_a_u {
+ u64 ii_icrb0_a_regval;
+ struct {
+ u64 ia_iow:1;
+ u64 ia_vld:1;
+ u64 ia_addr:47;
+ u64 ia_tnum:5;
+ u64 ia_sidn:4;
+ u64 ia_rsvd:6;
+ } ii_icrb0_a_fld_s;
+} ii_icrb0_a_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_b_u {
+ u64 ii_icrb0_b_regval;
+ struct {
+ u64 ib_xt_err:1;
+ u64 ib_mark:1;
+ u64 ib_ln_uce:1;
+ u64 ib_errcode:3;
+ u64 ib_error:1;
+ u64 ib_stall__bte_1:1;
+ u64 ib_stall__bte_0:1;
+ u64 ib_stall__intr:1;
+ u64 ib_stall_ib:1;
+ u64 ib_intvn:1;
+ u64 ib_wb:1;
+ u64 ib_hold:1;
+ u64 ib_ack:1;
+ u64 ib_resp:1;
+ u64 ib_ack_cnt:11;
+ u64 ib_rsvd:7;
+ u64 ib_exc:5;
+ u64 ib_init:3;
+ u64 ib_imsg:8;
+ u64 ib_imsgtype:2;
+ u64 ib_use_old:1;
+ u64 ib_rsvd_1:11;
+ } ii_icrb0_b_fld_s;
+} ii_icrb0_b_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_c_u {
+ u64 ii_icrb0_c_regval;
+ struct {
+ u64 ic_source:15;
+ u64 ic_size:2;
+ u64 ic_ct:1;
+ u64 ic_bte_num:1;
+ u64 ic_gbr:1;
+ u64 ic_resprqd:1;
+ u64 ic_bo:1;
+ u64 ic_suppl:15;
+ u64 ic_rsvd:27;
+ } ii_icrb0_c_fld_s;
+} ii_icrb0_c_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_d_u {
+ u64 ii_icrb0_d_regval;
+ struct {
+ u64 id_pa_be:43;
+ u64 id_bte_op:1;
+ u64 id_pr_psc:4;
+ u64 id_pr_cnt:4;
+ u64 id_sleep:1;
+ u64 id_rsvd:11;
+ } ii_icrb0_d_fld_s;
+} ii_icrb0_d_u_t;
+
+/************************************************************************
+ * *
+ * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
+ * used for Crosstalk operations (both cacheline and partial *
+ * operations) or BTE/IO. Because the CRB entries are very wide, five *
+ * registers (_A to _E) are required to read and write each entry. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icrb0_e_u {
+ u64 ii_icrb0_e_regval;
+ struct {
+ u64 ie_timeout:8;
+ u64 ie_context:15;
+ u64 ie_rsvd:1;
+ u64 ie_tvld:1;
+ u64 ie_cvld:1;
+ u64 ie_rsvd_0:38;
+ } ii_icrb0_e_fld_s;
+} ii_icrb0_e_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the lower 64 bits of the header of the *
+ * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
+ * register is set. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icsml_u {
+ u64 ii_icsml_regval;
+ struct {
+ u64 i_tt_addr:47;
+ u64 i_newsuppl_ex:14;
+ u64 i_reserved:2;
+ u64 i_overflow:1;
+ } ii_icsml_fld_s;
+} ii_icsml_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the middle 64 bits of the header of the *
+ * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
+ * register is set. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icsmm_u {
+ u64 ii_icsmm_regval;
+ struct {
+ u64 i_tt_ack_cnt:11;
+ u64 i_reserved:53;
+ } ii_icsmm_fld_s;
+} ii_icsmm_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the microscopic state, all the inputs to *
+ * the protocol table, captured with the spurious message. Valid when *
+ * the SP_MSG bit in the ICMR register is set. *
+ * *
+ ************************************************************************/
+
+typedef union ii_icsmh_u {
+ u64 ii_icsmh_regval;
+ struct {
+ u64 i_tt_vld:1;
+ u64 i_xerr:1;
+ u64 i_ft_cwact_o:1;
+ u64 i_ft_wact_o:1;
+ u64 i_ft_active_o:1;
+ u64 i_sync:1;
+ u64 i_mnusg:1;
+ u64 i_mnusz:1;
+ u64 i_plusz:1;
+ u64 i_plusg:1;
+ u64 i_tt_exc:5;
+ u64 i_tt_wb:1;
+ u64 i_tt_hold:1;
+ u64 i_tt_ack:1;
+ u64 i_tt_resp:1;
+ u64 i_tt_intvn:1;
+ u64 i_g_stall_bte1:1;
+ u64 i_g_stall_bte0:1;
+ u64 i_g_stall_il:1;
+ u64 i_g_stall_ib:1;
+ u64 i_tt_imsg:8;
+ u64 i_tt_imsgtype:2;
+ u64 i_tt_use_old:1;
+ u64 i_tt_respreqd:1;
+ u64 i_tt_bte_num:1;
+ u64 i_cbn:1;
+ u64 i_match:1;
+ u64 i_rpcnt_lt_34:1;
+ u64 i_rpcnt_ge_34:1;
+ u64 i_rpcnt_lt_18:1;
+ u64 i_rpcnt_ge_18:1;
+ u64 i_rpcnt_lt_2:1;
+ u64 i_rpcnt_ge_2:1;
+ u64 i_rqcnt_lt_18:1;
+ u64 i_rqcnt_ge_18:1;
+ u64 i_rqcnt_lt_2:1;
+ u64 i_rqcnt_ge_2:1;
+ u64 i_tt_device:7;
+ u64 i_tt_init:3;
+ u64 i_reserved:5;
+ } ii_icsmh_fld_s;
+} ii_icsmh_u_t;
+
+/************************************************************************
+ * *
+ * The Shub DEBUG unit provides a 3-bit selection signal to the *
+ * II core and a 3-bit selection signal to the fsbclk domain in the II *
+ * wrapper. *
+ * *
+ ************************************************************************/
+
+typedef union ii_idbss_u {
+ u64 ii_idbss_regval;
+ struct {
+ u64 i_iioclk_core_submenu:3;
+ u64 i_rsvd:5;
+ u64 i_fsbclk_wrapper_submenu:3;
+ u64 i_rsvd_1:5;
+ u64 i_iioclk_menu:5;
+ u64 i_rsvd_2:43;
+ } ii_idbss_fld_s;
+} ii_idbss_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register is used to set up the length for a *
+ * transfer and then to monitor the progress of that transfer. This *
+ * register needs to be initialized before a transfer is started. A *
+ * legitimate write to this register will set the Busy bit, clear the *
+ * Error bit, and initialize the length to the value desired. *
+ * While the transfer is in progress, hardware will decrement the *
+ * length field with each successful block that is copied. Once the *
+ * transfer completes, hardware will clear the Busy bit. The length *
+ * field will also contain the number of cache lines left to be *
+ * transferred. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibls0_u {
+ u64 ii_ibls0_regval;
+ struct {
+ u64 i_length:16;
+ u64 i_error:1;
+ u64 i_rsvd_1:3;
+ u64 i_busy:1;
+ u64 i_rsvd:43;
+ } ii_ibls0_fld_s;
+} ii_ibls0_u_t;
+
+/************************************************************************
+ * *
+ * This register should be loaded before a transfer is started. The *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
+ * address as described in Section 1.3, Figure2 and Figure3. Since *
+ * the bottom 7 bits of the address are always taken to be zero, BTE *
+ * transfers are always cacheline-aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibsa0_u {
+ u64 ii_ibsa0_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:42;
+ u64 i_rsvd:15;
+ } ii_ibsa0_fld_s;
+} ii_ibsa0_u_t;
+
+/************************************************************************
+ * *
+ * This register should be loaded before a transfer is started. The *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
+ * address as described in Section 1.3, Figure2 and Figure3. Since *
+ * the bottom 7 bits of the address are always taken to be zero, BTE *
+ * transfers are always cacheline-aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibda0_u {
+ u64 ii_ibda0_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:42;
+ u64 i_rsvd:15;
+ } ii_ibda0_fld_s;
+} ii_ibda0_u_t;
+
+/************************************************************************
+ * *
+ * Writing to this register sets up the attributes of the transfer *
+ * and initiates the transfer operation. Reading this register has *
+ * the side effect of terminating any transfer in progress. Note: *
+ * stopping a transfer midstream could have an adverse impact on the *
+ * other BTE. If a BTE stream has to be stopped (due to error *
+ * handling for example), both BTE streams should be stopped and *
+ * their transfers discarded. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibct0_u {
+ u64 ii_ibct0_regval;
+ struct {
+ u64 i_zerofill:1;
+ u64 i_rsvd_2:3;
+ u64 i_notify:1;
+ u64 i_rsvd_1:3;
+ u64 i_poison:1;
+ u64 i_rsvd:55;
+ } ii_ibct0_fld_s;
+} ii_ibct0_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the address to which the WINV is sent. *
+ * This address has to be cache line aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibna0_u {
+ u64 ii_ibna0_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:42;
+ u64 i_rsvd:15;
+ } ii_ibna0_fld_s;
+} ii_ibna0_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the programmable level as well as the node *
+ * ID and PI unit of the processor to which the interrupt will be *
+ * sent. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibia0_u {
+ u64 ii_ibia0_regval;
+ struct {
+ u64 i_rsvd_2:1;
+ u64 i_node_id:11;
+ u64 i_rsvd_1:4;
+ u64 i_level:7;
+ u64 i_rsvd:41;
+ } ii_ibia0_fld_s;
+} ii_ibia0_u_t;
+
+/************************************************************************
+ * *
+ * Description: This register is used to set up the length for a *
+ * transfer and then to monitor the progress of that transfer. This *
+ * register needs to be initialized before a transfer is started. A *
+ * legitimate write to this register will set the Busy bit, clear the *
+ * Error bit, and initialize the length to the value desired. *
+ * While the transfer is in progress, hardware will decrement the *
+ * length field with each successful block that is copied. Once the *
+ * transfer completes, hardware will clear the Busy bit. The length *
+ * field will also contain the number of cache lines left to be *
+ * transferred. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibls1_u {
+ u64 ii_ibls1_regval;
+ struct {
+ u64 i_length:16;
+ u64 i_error:1;
+ u64 i_rsvd_1:3;
+ u64 i_busy:1;
+ u64 i_rsvd:43;
+ } ii_ibls1_fld_s;
+} ii_ibls1_u_t;
+
+/************************************************************************
+ * *
+ * This register should be loaded before a transfer is started. The *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
+ * address as described in Section 1.3, Figure2 and Figure3. Since *
+ * the bottom 7 bits of the address are always taken to be zero, BTE *
+ * transfers are always cacheline-aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibsa1_u {
+ u64 ii_ibsa1_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:33;
+ u64 i_rsvd:24;
+ } ii_ibsa1_fld_s;
+} ii_ibsa1_u_t;
+
+/************************************************************************
+ * *
+ * This register should be loaded before a transfer is started. The *
+ * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
+ * address as described in Section 1.3, Figure2 and Figure3. Since *
+ * the bottom 7 bits of the address are always taken to be zero, BTE *
+ * transfers are always cacheline-aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibda1_u {
+ u64 ii_ibda1_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:33;
+ u64 i_rsvd:24;
+ } ii_ibda1_fld_s;
+} ii_ibda1_u_t;
+
+/************************************************************************
+ * *
+ * Writing to this register sets up the attributes of the transfer *
+ * and initiates the transfer operation. Reading this register has *
+ * the side effect of terminating any transfer in progress. Note: *
+ * stopping a transfer midstream could have an adverse impact on the *
+ * other BTE. If a BTE stream has to be stopped (due to error *
+ * handling for example), both BTE streams should be stopped and *
+ * their transfers discarded. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibct1_u {
+ u64 ii_ibct1_regval;
+ struct {
+ u64 i_zerofill:1;
+ u64 i_rsvd_2:3;
+ u64 i_notify:1;
+ u64 i_rsvd_1:3;
+ u64 i_poison:1;
+ u64 i_rsvd:55;
+ } ii_ibct1_fld_s;
+} ii_ibct1_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the address to which the WINV is sent. *
+ * This address has to be cache line aligned. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibna1_u {
+ u64 ii_ibna1_regval;
+ struct {
+ u64 i_rsvd_1:7;
+ u64 i_addr:33;
+ u64 i_rsvd:24;
+ } ii_ibna1_fld_s;
+} ii_ibna1_u_t;
+
+/************************************************************************
+ * *
+ * This register contains the programmable level as well as the node *
+ * ID and PI unit of the processor to which the interrupt will be *
+ * sent. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ibia1_u {
+ u64 ii_ibia1_regval;
+ struct {
+ u64 i_pi_id:1;
+ u64 i_node_id:8;
+ u64 i_rsvd_1:7;
+ u64 i_level:7;
+ u64 i_rsvd:41;
+ } ii_ibia1_fld_s;
+} ii_ibia1_u_t;
+
+/************************************************************************
+ * *
+ * This register defines the resources that feed information into *
+ * the two performance counters located in the IO Performance *
+ * Profiling Register. There are 17 different quantities that can be *
+ * measured. Given these 17 different options, the two performance *
+ * counters have 15 of them in common; menu selections 0 through 0xE *
+ * are identical for each performance counter. As for the other two *
+ * options, one is available from one performance counter and the *
+ * other is available from the other performance counter. Hence, the *
+ * II supports all 17*16=272 possible combinations of quantities to *
+ * measure. *
+ * *
+ ************************************************************************/
+
+typedef union ii_ipcr_u {
+ u64 ii_ipcr_regval;
+ struct {
+ u64 i_ippr0_c:4;
+ u64 i_ippr1_c:4;
+ u64 i_icct:8;
+ u64 i_rsvd:48;
+ } ii_ipcr_fld_s;
+} ii_ipcr_u_t;
+
+/************************************************************************
+ * *
+ * *
+ * *
+ ************************************************************************/
+
+typedef union ii_ippr_u {
+ u64 ii_ippr_regval;
+ struct {
+ u64 i_ippr0:32;
+ u64 i_ippr1:32;
+ } ii_ippr_fld_s;
+} ii_ippr_u_t;
+
+/************************************************************************
+ * *
+ * The following defines which were not formed into structures are *
+ * probably indentical to another register, and the name of the *
+ * register is provided against each of these registers. This *
+ * information needs to be checked carefully *
+ * *
+ * IIO_ICRB1_A IIO_ICRB0_A *
+ * IIO_ICRB1_B IIO_ICRB0_B *
+ * IIO_ICRB1_C IIO_ICRB0_C *
+ * IIO_ICRB1_D IIO_ICRB0_D *
+ * IIO_ICRB1_E IIO_ICRB0_E *
+ * IIO_ICRB2_A IIO_ICRB0_A *
+ * IIO_ICRB2_B IIO_ICRB0_B *
+ * IIO_ICRB2_C IIO_ICRB0_C *
+ * IIO_ICRB2_D IIO_ICRB0_D *
+ * IIO_ICRB2_E IIO_ICRB0_E *
+ * IIO_ICRB3_A IIO_ICRB0_A *
+ * IIO_ICRB3_B IIO_ICRB0_B *
+ * IIO_ICRB3_C IIO_ICRB0_C *
+ * IIO_ICRB3_D IIO_ICRB0_D *
+ * IIO_ICRB3_E IIO_ICRB0_E *
+ * IIO_ICRB4_A IIO_ICRB0_A *
+ * IIO_ICRB4_B IIO_ICRB0_B *
+ * IIO_ICRB4_C IIO_ICRB0_C *
+ * IIO_ICRB4_D IIO_ICRB0_D *
+ * IIO_ICRB4_E IIO_ICRB0_E *
+ * IIO_ICRB5_A IIO_ICRB0_A *
+ * IIO_ICRB5_B IIO_ICRB0_B *
+ * IIO_ICRB5_C IIO_ICRB0_C *
+ * IIO_ICRB5_D IIO_ICRB0_D *
+ * IIO_ICRB5_E IIO_ICRB0_E *
+ * IIO_ICRB6_A IIO_ICRB0_A *
+ * IIO_ICRB6_B IIO_ICRB0_B *
+ * IIO_ICRB6_C IIO_ICRB0_C *
+ * IIO_ICRB6_D IIO_ICRB0_D *
+ * IIO_ICRB6_E IIO_ICRB0_E *
+ * IIO_ICRB7_A IIO_ICRB0_A *
+ * IIO_ICRB7_B IIO_ICRB0_B *
+ * IIO_ICRB7_C IIO_ICRB0_C *
+ * IIO_ICRB7_D IIO_ICRB0_D *
+ * IIO_ICRB7_E IIO_ICRB0_E *
+ * IIO_ICRB8_A IIO_ICRB0_A *
+ * IIO_ICRB8_B IIO_ICRB0_B *
+ * IIO_ICRB8_C IIO_ICRB0_C *
+ * IIO_ICRB8_D IIO_ICRB0_D *
+ * IIO_ICRB8_E IIO_ICRB0_E *
+ * IIO_ICRB9_A IIO_ICRB0_A *
+ * IIO_ICRB9_B IIO_ICRB0_B *
+ * IIO_ICRB9_C IIO_ICRB0_C *
+ * IIO_ICRB9_D IIO_ICRB0_D *
+ * IIO_ICRB9_E IIO_ICRB0_E *
+ * IIO_ICRBA_A IIO_ICRB0_A *
+ * IIO_ICRBA_B IIO_ICRB0_B *
+ * IIO_ICRBA_C IIO_ICRB0_C *
+ * IIO_ICRBA_D IIO_ICRB0_D *
+ * IIO_ICRBA_E IIO_ICRB0_E *
+ * IIO_ICRBB_A IIO_ICRB0_A *
+ * IIO_ICRBB_B IIO_ICRB0_B *
+ * IIO_ICRBB_C IIO_ICRB0_C *
+ * IIO_ICRBB_D IIO_ICRB0_D *
+ * IIO_ICRBB_E IIO_ICRB0_E *
+ * IIO_ICRBC_A IIO_ICRB0_A *
+ * IIO_ICRBC_B IIO_ICRB0_B *
+ * IIO_ICRBC_C IIO_ICRB0_C *
+ * IIO_ICRBC_D IIO_ICRB0_D *
+ * IIO_ICRBC_E IIO_ICRB0_E *
+ * IIO_ICRBD_A IIO_ICRB0_A *
+ * IIO_ICRBD_B IIO_ICRB0_B *
+ * IIO_ICRBD_C IIO_ICRB0_C *
+ * IIO_ICRBD_D IIO_ICRB0_D *
+ * IIO_ICRBD_E IIO_ICRB0_E *
+ * IIO_ICRBE_A IIO_ICRB0_A *
+ * IIO_ICRBE_B IIO_ICRB0_B *
+ * IIO_ICRBE_C IIO_ICRB0_C *
+ * IIO_ICRBE_D IIO_ICRB0_D *
+ * IIO_ICRBE_E IIO_ICRB0_E *
+ * *
+ ************************************************************************/
+
+/*
+ * Slightly friendlier names for some common registers.
+ */
+#define IIO_WIDGET IIO_WID /* Widget identification */
+#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
+#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
+#define IIO_PROTECT IIO_ILAPR /* IO interface protection */
+#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */
+#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */
+#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */
+#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */
+#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */
+#define IIO_LLP_LOG IIO_ILLR /* LLP log */
+#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout */
+#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
+#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
+#define IIO_IGFX_0 IIO_IGFX0
+#define IIO_IGFX_1 IIO_IGFX1
+#define IIO_IBCT_0 IIO_IBCT0
+#define IIO_IBCT_1 IIO_IBCT1
+#define IIO_IBLS_0 IIO_IBLS0
+#define IIO_IBLS_1 IIO_IBLS1
+#define IIO_IBSA_0 IIO_IBSA0
+#define IIO_IBSA_1 IIO_IBSA1
+#define IIO_IBDA_0 IIO_IBDA0
+#define IIO_IBDA_1 IIO_IBDA1
+#define IIO_IBNA_0 IIO_IBNA0
+#define IIO_IBNA_1 IIO_IBNA1
+#define IIO_IBIA_0 IIO_IBIA0
+#define IIO_IBIA_1 IIO_IBIA1
+#define IIO_IOPRB_0 IIO_IPRB0
+
+#define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x)))
+#define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x)))
+#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
+#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */
+#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */
+
+#define IIO_NUM_IPRBS 9
+
+#define IIO_LLP_CSR_IS_UP 0x00002000
+#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
+#define IIO_LLP_CSR_LLP_STAT_SHFT 12
+
+#define IIO_LLP_CB_MAX 0xffff /* in ILLR CB_CNT, Max Check Bit errors */
+#define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */
+
+/* key to IIO_PROTECT_OVRRD */
+#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
+
+/* BTE register names */
+#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
+#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
+#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
+#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
+#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
+#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
+#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
+#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */
+
+/* BTE register offsets from base */
+#define BTEOFF_STAT 0
+#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
+#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
+#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
+#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
+#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
+
+/* names used in shub diags */
+#define IIO_BASE_BTE0 IIO_IBLS_0
+#define IIO_BASE_BTE1 IIO_IBLS_1
+
+/*
+ * Macro which takes the widget number, and returns the
+ * IO PRB address of that widget.
+ * value _x is expected to be a widget number in the range
+ * 0, 8 - 0xF
+ */
+#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
+ (_x) : \
+ (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
+
+/* GFX Flow Control Node/Widget Register */
+#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */
+#define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1)
+#define IIO_IGFX_W_NUM_SHIFT 0
+#define IIO_IGFX_PI_NUM_BITS 1 /* size of PI num field */
+#define IIO_IGFX_PI_NUM_MASK ((1<<IIO_IGFX_PI_NUM_BITS)-1)
+#define IIO_IGFX_PI_NUM_SHIFT 4
+#define IIO_IGFX_N_NUM_BITS 8 /* size of node num field */
+#define IIO_IGFX_N_NUM_MASK ((1<<IIO_IGFX_N_NUM_BITS)-1)
+#define IIO_IGFX_N_NUM_SHIFT 5
+#define IIO_IGFX_P_NUM_BITS 1 /* size of processor num field */
+#define IIO_IGFX_P_NUM_MASK ((1<<IIO_IGFX_P_NUM_BITS)-1)
+#define IIO_IGFX_P_NUM_SHIFT 16
+#define IIO_IGFX_INIT(widget, pi, node, cpu) (\
+ (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \
+ (((pi) & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)| \
+ (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
+ (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
+
+/* Scratch registers (all bits available) */
+#define IIO_SCRATCH_REG0 IIO_ISCR0
+#define IIO_SCRATCH_REG1 IIO_ISCR1
+#define IIO_SCRATCH_MASK 0xffffffffffffffffUL
+
+#define IIO_SCRATCH_BIT0_0 0x0000000000000001UL
+#define IIO_SCRATCH_BIT0_1 0x0000000000000002UL
+#define IIO_SCRATCH_BIT0_2 0x0000000000000004UL
+#define IIO_SCRATCH_BIT0_3 0x0000000000000008UL
+#define IIO_SCRATCH_BIT0_4 0x0000000000000010UL
+#define IIO_SCRATCH_BIT0_5 0x0000000000000020UL
+#define IIO_SCRATCH_BIT0_6 0x0000000000000040UL
+#define IIO_SCRATCH_BIT0_7 0x0000000000000080UL
+#define IIO_SCRATCH_BIT0_8 0x0000000000000100UL
+#define IIO_SCRATCH_BIT0_9 0x0000000000000200UL
+#define IIO_SCRATCH_BIT0_A 0x0000000000000400UL
+
+#define IIO_SCRATCH_BIT1_0 0x0000000000000001UL
+#define IIO_SCRATCH_BIT1_1 0x0000000000000002UL
+/* IO Translation Table Entries */
+#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */
+ /* Hw manuals number them 1..7! */
+/*
+ * IIO_IMEM Register fields.
+ */
+#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */
+
+/*
+ * As a permanent workaround for a bug in the PI side of the shub, we've
+ * redefined big window 7 as small window 0.
+ XXX does this still apply for SN1??
+ */
+#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
+
+#define ILCSR_WARM_RESET 0x100
+
+/*
+ * CRB manipulation macros
+ * The CRB macros are slightly complicated, since there are up to
+ * four registers associated with each CRB entry.
+ */
+#define IIO_NUM_CRBS 15 /* Number of CRBs */
+#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
+#define IIO_ICRB_OFFSET 8
+#define IIO_ICRB_0 IIO_ICRB0_A
+#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
+/* XXX - This is now tuneable:
+ #define IIO_FIRST_PC_ENTRY 12
+ */
+
+#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x))))
+#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET))
+#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET))
+#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET))
+#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET))
+
+#define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7)
+
+/*
+ * values for "ecode" field
+ */
+#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
+#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
+#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
+ * e.g. WINV to a Read only line. */
+#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
+#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
+#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
+#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
+#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
+
+/*
+ * Values for field imsgtype
+ */
+#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
+#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */
+#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
+
+/*
+ * values for field initiator.
+ */
+#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
+#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
+#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */
+#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
+#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
+
+/*
+ * Number of credits Hub widget has while sending req/response to
+ * xbow.
+ * Value of 3 is required by Xbow 1.1
+ * We may be able to increase this to 4 with Xbow 1.2.
+ */
+#define HUBII_XBOW_CREDIT 3
+#define HUBII_XBOW_REV2_CREDIT 4
+
+/*
+ * Number of credits that xtalk devices should use when communicating
+ * with a SHub (depth of SHub's queue).
+ */
+#define HUB_CREDIT 4
+
+/*
+ * Some IIO_PRB fields
+ */
+#define IIO_PRB_MULTI_ERR (1LL << 63)
+#define IIO_PRB_SPUR_RD (1LL << 51)
+#define IIO_PRB_SPUR_WR (1LL << 50)
+#define IIO_PRB_RD_TO (1LL << 49)
+#define IIO_PRB_ERROR (1LL << 48)
+
+/*************************************************************************
+
+ Some of the IIO field masks and shifts are defined here.
+ This is in order to maintain compatibility in SN0 and SN1 code
+
+**************************************************************************/
+
+/*
+ * ICMR register fields
+ * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not
+ * present in SHub)
+ */
+
+#define IIO_ICMR_CRB_VLD_SHFT 20
+#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
+
+#define IIO_ICMR_FC_CNT_SHFT 16
+#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT)
+
+#define IIO_ICMR_C_CNT_SHFT 4
+#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT)
+
+#define IIO_ICMR_PRECISE (1UL << 52)
+#define IIO_ICMR_CLR_RPPD (1UL << 13)
+#define IIO_ICMR_CLR_RQPD (1UL << 12)
+
+/*
+ * IIO PIO Deallocation register field masks : (IIO_IPDR)
+ XXX present but not needed in bedrock? See the manual.
+ */
+#define IIO_IPDR_PND (1 << 4)
+
+/*
+ * IIO CRB deallocation register field masks: (IIO_ICDR)
+ */
+#define IIO_ICDR_PND (1 << 4)
+
+/*
+ * IO BTE Length/Status (IIO_IBLS) register bit field definitions
+ */
+#define IBLS_BUSY (0x1UL << 20)
+#define IBLS_ERROR_SHFT 16
+#define IBLS_ERROR (0x1UL << IBLS_ERROR_SHFT)
+#define IBLS_LENGTH_MASK 0xffff
+
+/*
+ * IO BTE Control/Terminate register (IBCT) register bit field definitions
+ */
+#define IBCT_POISON (0x1UL << 8)
+#define IBCT_NOTIFY (0x1UL << 4)
+#define IBCT_ZFIL_MODE (0x1UL << 0)
+
+/*
+ * IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2)
+ */
+#define IIEPH1_VALID (1UL << 44)
+#define IIEPH1_OVERRUN (1UL << 40)
+#define IIEPH1_ERR_TYPE_SHFT 32
+#define IIEPH1_ERR_TYPE_MASK 0xf
+#define IIEPH1_SOURCE_SHFT 20
+#define IIEPH1_SOURCE_MASK 11
+#define IIEPH1_SUPPL_SHFT 8
+#define IIEPH1_SUPPL_MASK 11
+#define IIEPH1_CMD_SHFT 0
+#define IIEPH1_CMD_MASK 7
+
+#define IIEPH2_TAIL (1UL << 40)
+#define IIEPH2_ADDRESS_SHFT 0
+#define IIEPH2_ADDRESS_MASK 38
+
+#define IIEPH1_ERR_SHORT_REQ 2
+#define IIEPH1_ERR_SHORT_REPLY 3
+#define IIEPH1_ERR_LONG_REQ 4
+#define IIEPH1_ERR_LONG_REPLY 5
+
+/*
+ * IO Error Clear register bit field definitions
+ */
+#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */
+#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */
+#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */
+#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */
+#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */
+#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */
+#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */
+#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */
+#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */
+#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */
+#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */
+#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */
+#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */
+#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */
+#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */
+
+/*
+ * IIO CRB control register Fields: IIO_ICCR
+ */
+#define IIO_ICCR_PENDING 0x10000
+#define IIO_ICCR_CMD_MASK 0xFF
+#define IIO_ICCR_CMD_SHFT 7
+#define IIO_ICCR_CMD_NOP 0x0 /* No Op */
+#define IIO_ICCR_CMD_WAKE 0x100 /* Reactivate CRB entry and process */
+#define IIO_ICCR_CMD_TIMEOUT 0x200 /* Make CRB timeout & mark invalid */
+#define IIO_ICCR_CMD_EJECT 0x400 /* Contents of entry written to memory
+ * via a WB
+ */
+#define IIO_ICCR_CMD_FLUSH 0x800
+
+/*
+ *
+ * CRB Register description.
+ *
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
+ *
+ * Many of the fields in CRB are status bits used by hardware
+ * for implementation of the protocol. It's very dangerous to
+ * mess around with the CRB registers.
+ *
+ * It's OK to read the CRB registers and try to make sense out of the
+ * fields in CRB.
+ *
+ * Updating CRB requires all activities in Hub IIO to be quiesced.
+ * otherwise, a write to CRB could corrupt other CRB entries.
+ * CRBs are here only as a back door peek to shub IIO's status.
+ * Quiescing implies no dmas no PIOs
+ * either directly from the cpu or from sn0net.
+ * this is not something that can be done easily. So, AVOID updating
+ * CRBs.
+ */
+
+/*
+ * Easy access macros for CRBs, all 5 registers (A-E)
+ */
+typedef ii_icrb0_a_u_t icrba_t;
+#define a_sidn ii_icrb0_a_fld_s.ia_sidn
+#define a_tnum ii_icrb0_a_fld_s.ia_tnum
+#define a_addr ii_icrb0_a_fld_s.ia_addr
+#define a_valid ii_icrb0_a_fld_s.ia_vld
+#define a_iow ii_icrb0_a_fld_s.ia_iow
+#define a_regvalue ii_icrb0_a_regval
+
+typedef ii_icrb0_b_u_t icrbb_t;
+#define b_use_old ii_icrb0_b_fld_s.ib_use_old
+#define b_imsgtype ii_icrb0_b_fld_s.ib_imsgtype
+#define b_imsg ii_icrb0_b_fld_s.ib_imsg
+#define b_initiator ii_icrb0_b_fld_s.ib_init
+#define b_exc ii_icrb0_b_fld_s.ib_exc
+#define b_ackcnt ii_icrb0_b_fld_s.ib_ack_cnt
+#define b_resp ii_icrb0_b_fld_s.ib_resp
+#define b_ack ii_icrb0_b_fld_s.ib_ack
+#define b_hold ii_icrb0_b_fld_s.ib_hold
+#define b_wb ii_icrb0_b_fld_s.ib_wb
+#define b_intvn ii_icrb0_b_fld_s.ib_intvn
+#define b_stall_ib ii_icrb0_b_fld_s.ib_stall_ib
+#define b_stall_int ii_icrb0_b_fld_s.ib_stall__intr
+#define b_stall_bte_0 ii_icrb0_b_fld_s.ib_stall__bte_0
+#define b_stall_bte_1 ii_icrb0_b_fld_s.ib_stall__bte_1
+#define b_error ii_icrb0_b_fld_s.ib_error
+#define b_ecode ii_icrb0_b_fld_s.ib_errcode
+#define b_lnetuce ii_icrb0_b_fld_s.ib_ln_uce
+#define b_mark ii_icrb0_b_fld_s.ib_mark
+#define b_xerr ii_icrb0_b_fld_s.ib_xt_err
+#define b_regvalue ii_icrb0_b_regval
+
+typedef ii_icrb0_c_u_t icrbc_t;
+#define c_suppl ii_icrb0_c_fld_s.ic_suppl
+#define c_barrop ii_icrb0_c_fld_s.ic_bo
+#define c_doresp ii_icrb0_c_fld_s.ic_resprqd
+#define c_gbr ii_icrb0_c_fld_s.ic_gbr
+#define c_btenum ii_icrb0_c_fld_s.ic_bte_num
+#define c_cohtrans ii_icrb0_c_fld_s.ic_ct
+#define c_xtsize ii_icrb0_c_fld_s.ic_size
+#define c_source ii_icrb0_c_fld_s.ic_source
+#define c_regvalue ii_icrb0_c_regval
+
+typedef ii_icrb0_d_u_t icrbd_t;
+#define d_sleep ii_icrb0_d_fld_s.id_sleep
+#define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt
+#define d_pripsc ii_icrb0_d_fld_s.id_pr_psc
+#define d_bteop ii_icrb0_d_fld_s.id_bte_op
+#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
+#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
+#define d_regvalue ii_icrb0_d_regval
+
+typedef ii_icrb0_e_u_t icrbe_t;
+#define icrbe_ctxtvld ii_icrb0_e_fld_s.ie_cvld
+#define icrbe_toutvld ii_icrb0_e_fld_s.ie_tvld
+#define icrbe_context ii_icrb0_e_fld_s.ie_context
+#define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout
+#define e_regvalue ii_icrb0_e_regval
+
+/* Number of widgets supported by shub */
+#define HUB_NUM_WIDGET 9
+#define HUB_WIDGET_ID_MIN 0x8
+#define HUB_WIDGET_ID_MAX 0xf
+
+#define HUB_WIDGET_PART_NUM 0xc120
+#define MAX_HUBS_PER_XBOW 2
+
+/* A few more #defines for backwards compatibility */
+#define iprb_t ii_iprb0_u_t
+#define iprb_regval ii_iprb0_regval
+#define iprb_mult_err ii_iprb0_fld_s.i_mult_err
+#define iprb_spur_rd ii_iprb0_fld_s.i_spur_rd
+#define iprb_spur_wr ii_iprb0_fld_s.i_spur_wr
+#define iprb_rd_to ii_iprb0_fld_s.i_rd_to
+#define iprb_ovflow ii_iprb0_fld_s.i_of_cnt
+#define iprb_error ii_iprb0_fld_s.i_error
+#define iprb_ff ii_iprb0_fld_s.i_f
+#define iprb_mode ii_iprb0_fld_s.i_m
+#define iprb_bnakctr ii_iprb0_fld_s.i_nb
+#define iprb_anakctr ii_iprb0_fld_s.i_na
+#define iprb_xtalkctr ii_iprb0_fld_s.i_c
+
+#define LNK_STAT_WORKING 0x2 /* LLP is working */
+
+#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
+#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
+#define IIO_WSTAT_TXRETRY_MASK 0x7F /* should be 0xFF?? */
+#define IIO_WSTAT_TXRETRY_SHFT 16
+#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
+ IIO_WSTAT_TXRETRY_MASK)
+
+/* Number of II perf. counters we can multiplex at once */
+
+#define IO_PERF_SETS 32
+
+/* Bit for the widget in inbound access register */
+#define IIO_IIWA_WIDGET(_w) ((u64)(1ULL << _w))
+/* Bit for the widget in outbound access register */
+#define IIO_IOWA_WIDGET(_w) ((u64)(1ULL << _w))
+
+/* NOTE: The following define assumes that we are going to get
+ * widget numbers from 8 thru F and the device numbers within
+ * widget from 0 thru 7.
+ */
+#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((u64)(1ULL << (8 * ((w) - 8) + (d))))
+
+/* IO Interrupt Destination Register */
+#define IIO_IIDSR_SENT_SHIFT 28
+#define IIO_IIDSR_SENT_MASK 0x30000000
+#define IIO_IIDSR_ENB_SHIFT 24
+#define IIO_IIDSR_ENB_MASK 0x01000000
+#define IIO_IIDSR_NODE_SHIFT 9
+#define IIO_IIDSR_NODE_MASK 0x000ff700
+#define IIO_IIDSR_PI_ID_SHIFT 8
+#define IIO_IIDSR_PI_ID_MASK 0x00000100
+#define IIO_IIDSR_LVL_SHIFT 0
+#define IIO_IIDSR_LVL_MASK 0x000000ff
+
+/* Xtalk timeout threshhold register (IIO_IXTT) */
+#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
+#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
+#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
+#define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT)
+#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */
+#define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT)
+
+/*
+ * The IO LLP control status register and widget control register
+ */
+
+typedef union hubii_wcr_u {
+ u64 wcr_reg_value;
+ struct {
+ u64 wcr_widget_id:4, /* LLP crossbar credit */
+ wcr_tag_mode:1, /* Tag mode */
+ wcr_rsvd1:8, /* Reserved */
+ wcr_xbar_crd:3, /* LLP crossbar credit */
+ wcr_f_bad_pkt:1, /* Force bad llp pkt enable */
+ wcr_dir_con:1, /* widget direct connect */
+ wcr_e_thresh:5, /* elasticity threshold */
+ wcr_rsvd:41; /* unused */
+ } wcr_fields_s;
+} hubii_wcr_t;
+
+#define iwcr_dir_con wcr_fields_s.wcr_dir_con
+
+/* The structures below are defined to extract and modify the ii
+performance registers */
+
+/* io_perf_sel allows the caller to specify what tests will be
+ performed */
+
+typedef union io_perf_sel {
+ u64 perf_sel_reg;
+ struct {
+ u64 perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48;
+ } perf_sel_bits;
+} io_perf_sel_t;
+
+/* io_perf_cnt is to extract the count from the shub registers. Due to
+ hardware problems there is only one counter, not two. */
+
+typedef union io_perf_cnt {
+ u64 perf_cnt;
+ struct {
+ u64 perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32;
+ } perf_cnt_bits;
+
+} io_perf_cnt_t;
+
+typedef union iprte_a {
+ u64 entry;
+ struct {
+ u64 i_rsvd_1:3;
+ u64 i_addr:38;
+ u64 i_init:3;
+ u64 i_source:8;
+ u64 i_rsvd:2;
+ u64 i_widget:4;
+ u64 i_to_cnt:5;
+ u64 i_vld:1;
+ } iprte_fields;
+} iprte_a_t;
+
+#endif /* _ASM_IA64_SN_SHUBIO_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/simulator.h b/xen/include/asm-ia64/linux/asm/sn/simulator.h
new file mode 100644
index 0000000000..c3fd3eb257
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/simulator.h
@@ -0,0 +1,20 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_SIMULATOR_H
+#define _ASM_IA64_SN_SIMULATOR_H
+
+
+#define SNMAGIC 0xaeeeeeee8badbeefL
+#define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
+
+#define SIMULATOR_SLEEP() asm("nop.i 0x8beef")
+#define IS_RUNNING_ON_SIMULATOR() (sn_prom_type)
+#define IS_RUNNING_ON_FAKE_PROM() (sn_prom_type == 2)
+extern int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
+
+#endif /* _ASM_IA64_SN_SIMULATOR_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h b/xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h
new file mode 100644
index 0000000000..a676dd9ace
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h
@@ -0,0 +1,132 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+#ifndef _ASM_IA64_SN_SN_CPUID_H
+#define _ASM_IA64_SN_SN_CPUID_H
+
+#include <linux/smp.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/intrinsics.h>
+
+
+/*
+ * Functions for converting between cpuids, nodeids and NASIDs.
+ *
+ * These are for SGI platforms only.
+ *
+ */
+
+
+
+
+/*
+ * Definitions of terms (these definitions are for IA64 ONLY. Other architectures
+ * use cpuid/cpunum quite defferently):
+ *
+ * CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies
+ * the cpu. The value cpuid has no significance on IA64 other than
+ * the boot cpu is 0.
+ * smp_processor_id() returns the cpuid of the current cpu.
+ *
+ * CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID)
+ * This is the same as 31:24 of the processor LID register
+ * hard_smp_processor_id()- cpu_physical_id of current processor
+ * cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid>
+ * cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid>
+ * * not real efficient - don't use in perf critical code
+ *
+ * SLICE - a number in the range of 0 - 3 (typically) that represents the
+ * cpu number on a brick.
+ *
+ * SUBNODE - (almost obsolete) the number of the FSB that a cpu is
+ * connected to. This is also the same as the PI number. Usually 0 or 1.
+ *
+ * NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no
+ * significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM.
+ *
+ *
+ * The macros convert between cpu physical ids & slice/nasid/cnodeid.
+ * These terms are described below:
+ *
+ *
+ * Brick
+ * ----- ----- ----- ----- CPU
+ * | 0 | | 1 | | 0 | | 1 | SLICE
+ * ----- ----- ----- -----
+ * | | | |
+ * | | | |
+ * 0 | | 2 0 | | 2 FSB SLOT
+ * ------- -------
+ * | |
+ * | |
+ * | |
+ * ------------ -------------
+ * | | | |
+ * | SHUB | | SHUB | NASID (0..MAX_NASIDS)
+ * | |----- | | CNODEID (0..num_compact_nodes-1)
+ * | | | |
+ * | | | |
+ * ------------ -------------
+ * | |
+ *
+ *
+ */
+
+#define get_node_number(addr) NASID_GET(addr)
+
+/*
+ * NOTE: on non-MP systems, only cpuid 0 exists
+ */
+
+extern short physical_node_map[]; /* indexed by nasid to get cnode */
+
+/*
+ * Macros for retrieving info about current cpu
+ */
+#define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
+#define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
+#define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
+#define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
+#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
+
+/*
+ * Macros for retrieving info about an arbitrary cpu
+ * cpuid - logical cpu id
+ */
+#define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid)
+#define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode)
+#define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
+
+
+/*
+ * Dont use the following in performance critical code. They require scans
+ * of potentially large tables.
+ */
+extern int nasid_slice_to_cpuid(int, int);
+
+/*
+ * cnodeid_to_nasid - convert a cnodeid to a NASID
+ */
+#define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid])
+
+/*
+ * nasid_to_cnodeid - convert a NASID to a cnodeid
+ */
+#define nasid_to_cnodeid(nasid) (physical_node_map[nasid])
+
+/*
+ * partition_coherence_id - get the coherence ID of the current partition
+ */
+extern u8 sn_coherency_id;
+#define partition_coherence_id() (sn_coherency_id)
+
+#endif /* _ASM_IA64_SN_SN_CPUID_H */
+
diff --git a/xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h b/xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h
new file mode 100644
index 0000000000..30dcfa442e
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_IA64_SN_FEATURE_SETS_H
+#define _ASM_IA64_SN_FEATURE_SETS_H
+
+/*
+ * SN PROM Features
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+
+/* --------------------- PROM Features -----------------------------*/
+extern int sn_prom_feature_available(int id);
+
+#define MAX_PROM_FEATURE_SETS 2
+
+/*
+ * The following defines features that may or may not be supported by the
+ * current PROM. The OS uses sn_prom_feature_available(feature) to test for
+ * the presence of a PROM feature. Down rev (old) PROMs will always test
+ * "false" for new features.
+ *
+ * Use:
+ * if (sn_prom_feature_available(PRF_XXX))
+ * ...
+ */
+
+#define PRF_PAL_CACHE_FLUSH_SAFE 0
+#define PRF_DEVICE_FLUSH_LIST 1
+#define PRF_HOTPLUG_SUPPORT 2
+
+/* --------------------- OS Features -------------------------------*/
+
+/*
+ * The following defines OS features that are optionally present in
+ * the operating system.
+ * During boot, PROM is notified of these features via a series of calls:
+ *
+ * ia64_sn_set_os_feature(feature1);
+ *
+ * Once enabled, a feature cannot be disabled.
+ *
+ * By default, features are disabled unless explicitly enabled.
+ */
+#define OSF_MCA_SLV_TO_OS_INIT_SLV 0
+#define OSF_FEAT_LOG_SBES 1
+
+#endif /* _ASM_IA64_SN_FEATURE_SETS_H */
diff --git a/xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h b/xen/include/asm-ia64/linux/asm/sn/sn_sal.h
index 34f669ad55..ba826b3f75 100644
--- a/xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h
+++ b/xen/include/asm-ia64/linux/asm/sn/sn_sal.h
@@ -8,16 +8,16 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/config.h>
#include <asm/sal.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/arch.h>
#include <asm/sn/geo.h>
#include <asm/sn/nodepda.h>
+#include <asm/sn/shub_mmr.h>
// SGI Specific Calls
#define SN_SAL_POD_MODE 0x02000001
@@ -34,8 +34,8 @@
#define SN_SAL_PRINT_ERROR 0x02000012
#define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant
#define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant
-#define SN_SAL_GET_HUB_INFO 0x0200001c
#define SN_SAL_GET_SAPIC_INFO 0x0200001d
+#define SN_SAL_GET_SN_INFO 0x0200001e
#define SN_SAL_CONSOLE_PUTC 0x02000021
#define SN_SAL_CONSOLE_GETC 0x02000022
#define SN_SAL_CONSOLE_PUTS 0x02000023
@@ -46,6 +46,7 @@
#define SN_SAL_CONSOLE_PUTB 0x02000028
#define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a
#define SN_SAL_CONSOLE_READC 0x0200002b
+#define SN_SAL_SYSCTL_OP 0x02000030
#define SN_SAL_SYSCTL_MODID_GET 0x02000031
#define SN_SAL_SYSCTL_GET 0x02000032
#define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033
@@ -54,7 +55,7 @@
#define SN_SAL_BUS_CONFIG 0x02000037
#define SN_SAL_SYS_SERIAL_GET 0x02000038
#define SN_SAL_PARTITION_SERIAL_GET 0x02000039
-#define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a
+#define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a
#define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b
#define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c
#define SN_SAL_COHERENCE 0x0200003d
@@ -63,19 +64,28 @@
#define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant
#define SN_SAL_IROUTER_OP 0x02000043
+#define SN_SAL_SYSCTL_EVENT 0x02000044
#define SN_SAL_IOIF_INTERRUPT 0x0200004a
#define SN_SAL_HWPERF_OP 0x02000050 // lock
#define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051
-
+#define SN_SAL_IOIF_PCI_SAFE 0x02000052
#define SN_SAL_IOIF_SLOT_ENABLE 0x02000053
#define SN_SAL_IOIF_SLOT_DISABLE 0x02000054
#define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055
#define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056
#define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057
-#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058
+#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 // deprecated
+#define SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST 0x0200005a
#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
+#define SN_SAL_BTE_RECOVER 0x02000061
+#define SN_SAL_RESERVED_DO_NOT_USE 0x02000062
+#define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064
+#define SN_SAL_GET_PROM_FEATURE_SET 0x02000065
+#define SN_SAL_SET_OS_FEATURE_SET 0x02000066
+#define SN_SAL_INJECT_ERROR 0x02000067
+#define SN_SAL_SET_CPU_NUMBER 0x02000068
/*
* Service-specific constants
@@ -95,6 +105,13 @@
#define SAL_INTR_FREE 2
/*
+ * operations available on the generic SN_SAL_SYSCTL_OP
+ * runtime service
+ */
+#define SAL_SYSCTL_OP_IOBOARD 0x0001 /* retrieve board type */
+#define SAL_SYSCTL_OP_TIO_JLCK_RST 0x0002 /* issue TIO clock reset */
+
+/*
* IRouter (i.e. generalized system controller) operations
*/
#define SAL_IROUTER_OPEN 0 /* open a subchannel */
@@ -112,6 +129,13 @@
#define SAL_IROUTER_INTR_XMIT SAL_CONSOLE_INTR_XMIT
#define SAL_IROUTER_INTR_RECV SAL_CONSOLE_INTR_RECV
+/*
+ * Error Handling Features
+ */
+#define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete
+#define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete
+#define SAL_ERR_FEAT_MFR_OVERRIDE 0x4
+#define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000
/*
* SAL Error Codes
@@ -122,46 +146,26 @@
#define SALRET_INVALID_ARG (-2)
#define SALRET_ERROR (-3)
+#define SN_SAL_FAKE_PROM 0x02009999
-#ifndef XEN
/**
- * sn_sal_rev_major - get the major SGI SAL revision number
- *
- * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
- * This routine simply extracts the major value from the
- * @ia64_sal_systab structure constructed by ia64_sal_init().
- */
-static inline int
-sn_sal_rev_major(void)
+ * sn_sal_revision - get the SGI SAL revision number
+ *
+ * The SGI PROM stores its version in the sal_[ab]_rev_(major|minor).
+ * This routine simply extracts the major and minor values and
+ * presents them in a u32 format.
+ *
+ * For example, version 4.05 would be represented at 0x0405.
+ */
+static inline u32
+sn_sal_rev(void)
{
- struct ia64_sal_systab *systab = efi.sal_systab;
-
- return (int)systab->sal_b_rev_major;
-}
+ struct ia64_sal_systab *systab = __va(efi.sal_systab);
-/**
- * sn_sal_rev_minor - get the minor SGI SAL revision number
- *
- * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
- * This routine simply extracts the minor value from the
- * @ia64_sal_systab structure constructed by ia64_sal_init().
- */
-static inline int
-sn_sal_rev_minor(void)
-{
- struct ia64_sal_systab *systab = efi.sal_systab;
-
- return (int)systab->sal_b_rev_minor;
+ return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor);
}
/*
- * Specify the minimum PROM revsion required for this kernel.
- * Note that they're stored in hex format...
- */
-#define SN_SAL_MIN_MAJOR 0x4 /* SN2 kernels need at least PROM 4.0 */
-#define SN_SAL_MIN_MINOR 0x0
-
-/*
* Returns the master console nasid, if the call fails, return an illegal
* value.
*/
@@ -205,29 +209,18 @@ ia64_sn_get_master_baseio_nasid(void)
return ret_stuff.v0;
}
-static inline char *
+static inline void *
ia64_sn_get_klconfig_addr(nasid_t nasid)
{
struct ia64_sal_retval ret_stuff;
- int cnodeid;
- cnodeid = nasid_to_cnodeid(nasid);
ret_stuff.status = 0;
ret_stuff.v0 = 0;
ret_stuff.v1 = 0;
ret_stuff.v2 = 0;
SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0);
-
- /*
- * We should panic if a valid cnode nasid does not produce
- * a klconfig address.
- */
- if (ret_stuff.status != 0) {
- panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", ret_stuff.status);
- }
return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
}
-#endif /* !XEN */
/*
* Returns the next console character.
@@ -281,7 +274,7 @@ ia64_sn_console_putc(char ch)
ret_stuff.v0 = 0;
ret_stuff.v1 = 0;
ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 0, 0, 0);
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (u64)ch, 0, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
@@ -298,7 +291,7 @@ ia64_sn_console_putb(const char *buf, int len)
ret_stuff.v0 = 0;
ret_stuff.v1 = 0;
ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, (uint64_t)len, 0, 0, 0, 0, 0);
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (u64)buf, (u64)len, 0, 0, 0, 0, 0);
if ( ret_stuff.status == 0 ) {
return ret_stuff.v0;
@@ -306,7 +299,6 @@ ia64_sn_console_putb(const char *buf, int len)
return (u64)0;
}
-#ifndef XEN
/*
* Print a platform error record
*/
@@ -319,7 +311,7 @@ ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
ret_stuff.v0 = 0;
ret_stuff.v1 = 0;
ret_stuff.v2 = 0;
- SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, (uint64_t)rec, 0, 0, 0, 0, 0);
+ SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (u64)hook, (u64)rec, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
@@ -342,6 +334,25 @@ ia64_sn_plat_cpei_handler(void)
}
/*
+ * Set Error Handling Features (Obsolete)
+ */
+static inline u64
+ia64_sn_plat_set_error_handling_features(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_REENTRANT(ret_stuff, SN_SAL_SET_ERROR_HANDLING_FEATURES,
+ SAL_ERR_FEAT_LOG_SBES,
+ 0, 0, 0, 0, 0, 0);
+
+ return ret_stuff.status;
+}
+
+/*
* Checks for console input.
*/
static inline u64
@@ -388,7 +399,7 @@ ia64_sn_console_intr_status(void)
* Enable an interrupt on the SAL console device.
*/
static inline void
-ia64_sn_console_intr_enable(uint64_t intr)
+ia64_sn_console_intr_enable(u64 intr)
{
struct ia64_sal_retval ret_stuff;
@@ -405,7 +416,7 @@ ia64_sn_console_intr_enable(uint64_t intr)
* Disable an interrupt on the SAL console device.
*/
static inline void
-ia64_sn_console_intr_disable(uint64_t intr)
+ia64_sn_console_intr_disable(u64 intr)
{
struct ia64_sal_retval ret_stuff;
@@ -431,7 +442,7 @@ ia64_sn_console_xmit_chars(char *buf, int len)
ret_stuff.v1 = 0;
ret_stuff.v2 = 0;
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
- (uint64_t)buf, (uint64_t)len,
+ (u64)buf, (u64)len,
0, 0, 0, 0, 0);
if (ret_stuff.status == 0) {
@@ -472,7 +483,7 @@ static inline u64
ia64_sn_pod_mode(void)
{
struct ia64_sal_retval isrv;
- SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
+ SAL_CALL_REENTRANT(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
if (isrv.status)
return 0;
return isrv.v0;
@@ -557,7 +568,8 @@ static inline u64
ia64_sn_partition_serial_get(void)
{
struct ia64_sal_retval ret_stuff;
- SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0);
+ ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0,
+ 0, 0, 0, 0, 0, 0);
if (ret_stuff.status != 0)
return 0;
return ret_stuff.v0;
@@ -565,11 +577,10 @@ ia64_sn_partition_serial_get(void)
static inline u64
sn_partition_serial_number_val(void) {
- if (sn_partition_serial_number) {
- return(sn_partition_serial_number);
- } else {
- return(sn_partition_serial_number = ia64_sn_partition_serial_get());
+ if (unlikely(sn_partition_serial_number == 0)) {
+ sn_partition_serial_number = ia64_sn_partition_serial_get();
}
+ return sn_partition_serial_number;
}
/*
@@ -581,25 +592,38 @@ ia64_sn_sysctl_partition_get(nasid_t nasid)
{
struct ia64_sal_retval ret_stuff;
SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
- 0, 0, 0, 0, 0, 0);
+ 0, 0, 0, 0, 0, 0);
if (ret_stuff.status != 0)
- return INVALID_PARTID;
+ return -1;
return ((partid_t)ret_stuff.v0);
}
/*
- * Returns the partition id of the current processor.
+ * Returns the physical address of the partition's reserved page through
+ * an iterative number of calls.
+ *
+ * On first call, 'cookie' and 'len' should be set to 0, and 'addr'
+ * set to the nasid of the partition whose reserved page's address is
+ * being sought.
+ * On subsequent calls, pass the values, that were passed back on the
+ * previous call.
+ *
+ * While the return status equals SALRET_MORE_PASSES, keep calling
+ * this function after first copying 'len' bytes starting at 'addr'
+ * into 'buf'. Once the return status equals SALRET_OK, 'addr' will
+ * be the physical address of the partition's reserved page. If the
+ * return status equals neither of these, an error as occurred.
*/
-
-extern partid_t sn_partid;
-
-static inline partid_t
-sn_local_partid(void) {
- if (sn_partid < 0) {
- return (sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())));
- } else {
- return sn_partid;
- }
+static inline s64
+sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
+{
+ struct ia64_sal_retval rv;
+ ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie,
+ *addr, buf, *len, 0, 0, 0);
+ *cookie = rv.v0;
+ *addr = rv.v1;
+ *len = rv.v2;
+ return rv.status;
}
/*
@@ -621,8 +645,8 @@ static inline int
sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
{
struct ia64_sal_retval ret_stuff;
- SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation,
- 0, 0, 0, 0);
+ ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len,
+ (u64)operation, 0, 0, 0, 0);
return ret_stuff.status;
}
@@ -646,8 +670,8 @@ sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
} else {
call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
}
- SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1,
- 0, 0, 0);
+ ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr,
+ (u64)1, 0, 0, 0);
return ret_stuff.status;
}
@@ -668,8 +692,8 @@ static inline int
sn_change_coherence(u64 *new_domain, u64 *old_domain)
{
struct ia64_sal_retval ret_stuff;
- SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
- 0, 0, 0);
+ ia64_sal_oemcall(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain,
+ (u64)old_domain, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
@@ -682,16 +706,9 @@ static inline int
sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
{
struct ia64_sal_retval ret_stuff;
- int cnodeid;
- unsigned long irq_flags;
-
- cnodeid = nasid_to_cnodeid(get_node_number(paddr));
- // spin_lock(&NODEPDA(cnodeid)->bist_lock);
- local_irq_save(irq_flags);
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
- perms, 0, 0, 0);
- local_irq_restore(irq_flags);
- // spin_unlock(&NODEPDA(cnodeid)->bist_lock);
+
+ ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len,
+ (u64)nasid_array, perms, 0, 0, 0);
return ret_stuff.status;
}
#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080
@@ -709,7 +726,8 @@ ia64_sn_power_down(void)
{
struct ia64_sal_retval ret_stuff;
SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
- while(1);
+ while(1)
+ cpu_relax();
/* never returns */
}
@@ -851,6 +869,58 @@ ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
return (int) rv.v0;
}
+/*
+ * Set up a node as the point of contact for system controller
+ * environmental event delivery.
+ */
+static inline int
+ia64_sn_sysctl_event_init(nasid_t nasid)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_EVENT, (u64) nasid,
+ 0, 0, 0, 0, 0, 0);
+ return (int) rv.v0;
+}
+
+/*
+ * Ask the system controller on the specified nasid to reset
+ * the CX corelet clock. Only valid on TIO nodes.
+ */
+static inline int
+ia64_sn_sysctl_tio_clock_reset(nasid_t nasid)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_TIO_JLCK_RST,
+ nasid, 0, 0, 0, 0, 0);
+ if (rv.status != 0)
+ return (int)rv.status;
+ if (rv.v0 != 0)
+ return (int)rv.v0;
+
+ return 0;
+}
+
+/*
+ * Get the associated ioboard type for a given nasid.
+ */
+static inline s64
+ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL_REENTRANT(isrv, SN_SAL_SYSCTL_OP, SAL_SYSCTL_OP_IOBOARD,
+ nasid, 0, 0, 0, 0, 0);
+ if (isrv.v0 != 0) {
+ *ioboard = isrv.v0;
+ return isrv.status;
+ }
+ if (isrv.v1 != 0) {
+ *ioboard = isrv.v1;
+ return isrv.status;
+ }
+
+ return isrv.status;
+}
+
/**
* ia64_sn_get_fit_compt - read a FIT entry from the PROM header
* @nasid: NASID of node to read
@@ -938,15 +1008,24 @@ ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
/*
* Returns information about the HUB/SHUB.
* In:
- * arg0 - SN_SAL_GET_HUB_INFO
+ * arg0 - SN_SAL_GET_SN_INFO
* arg1 - 0 (other values reserved for future use)
* Out:
- * v0 - shub type (0=shub1, 1=shub2)
- * v1 - masid mask (ex., 0x7ff for 11 bit nasid)
- * v2 - bit position of low nasid bit
+ * v0
+ * [7:0] - shub type (0=shub1, 1=shub2)
+ * [15:8] - Log2 max number of nodes in entire system (includes
+ * C-bricks, I-bricks, etc)
+ * [23:16] - Log2 of nodes per sharing domain
+ * [31:24] - partition ID
+ * [39:32] - coherency_id
+ * [47:40] - regionsize
+ * v1
+ * [15:0] - nasid mask (ex., 0x7ff for 11 bit nasid)
+ * [23:15] - bit position of low nasid bit
*/
static inline u64
-ia64_sn_get_hub_info(int fc, u64 *arg1, u64 *arg2, u64 *arg3)
+ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift,
+ u8 *systemsize, u8 *sharing_domain_size, u8 *partid, u8 *coher, u8 *reg)
{
struct ia64_sal_retval ret_stuff;
@@ -954,13 +1033,22 @@ ia64_sn_get_hub_info(int fc, u64 *arg1, u64 *arg2, u64 *arg3)
ret_stuff.v0 = 0;
ret_stuff.v1 = 0;
ret_stuff.v2 = 0;
- SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_HUB_INFO, fc, 0, 0, 0, 0, 0, 0);
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0);
/***** BEGIN HACK - temp til old proms no longer supported ********/
if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
- if (arg1) *arg1 = 0;
- if (arg2) *arg2 = 0x7ff;
- if (arg3) *arg3 = 38;
+ int nasid = get_sapicid() & 0xfff;
+#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL
+#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48
+ if (shubtype) *shubtype = 0;
+ if (nasid_bitmask) *nasid_bitmask = 0x7ff;
+ if (nasid_shift) *nasid_shift = 38;
+ if (systemsize) *systemsize = 10;
+ if (sharing_domain_size) *sharing_domain_size = 8;
+ if (partid) *partid = ia64_sn_sysctl_partition_get(nasid);
+ if (coher) *coher = nasid >> 9;
+ if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >>
+ SH_SHUB_ID_NODES_PER_BIT_SHFT;
return 0;
}
/***** END HACK *******/
@@ -968,9 +1056,14 @@ ia64_sn_get_hub_info(int fc, u64 *arg1, u64 *arg2, u64 *arg3)
if (ret_stuff.status < 0)
return ret_stuff.status;
- if (arg1) *arg1 = ret_stuff.v0;
- if (arg2) *arg2 = ret_stuff.v1;
- if (arg3) *arg3 = ret_stuff.v2;
+ if (shubtype) *shubtype = ret_stuff.v0 & 0xff;
+ if (systemsize) *systemsize = (ret_stuff.v0 >> 8) & 0xff;
+ if (sharing_domain_size) *sharing_domain_size = (ret_stuff.v0 >> 16) & 0xff;
+ if (partid) *partid = (ret_stuff.v0 >> 24) & 0xff;
+ if (coher) *coher = (ret_stuff.v0 >> 32) & 0xff;
+ if (reg) *reg = (ret_stuff.v0 >> 40) & 0xff;
+ if (nasid_bitmask) *nasid_bitmask = (ret_stuff.v1 & 0xffff);
+ if (nasid_shift) *nasid_shift = (ret_stuff.v1 >> 16) & 0xff;
return 0;
}
@@ -990,5 +1083,75 @@ ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
*v0 = (int) rv.v0;
return (int) rv.status;
}
-#endif /* !XEN */
+
+static inline int
+ia64_sn_ioif_get_pci_topology(u64 buf, u64 len)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, buf, len, 0, 0, 0, 0, 0);
+ return (int) rv.status;
+}
+
+/*
+ * BTE error recovery is implemented in SAL
+ */
+static inline int
+ia64_sn_bte_recovery(nasid_t nasid)
+{
+ struct ia64_sal_retval rv;
+
+ rv.status = 0;
+ SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, (u64)nasid, 0, 0, 0, 0, 0, 0);
+ if (rv.status == SALRET_NOT_IMPLEMENTED)
+ return 0;
+ return (int) rv.status;
+}
+
+static inline int
+ia64_sn_is_fake_prom(void)
+{
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_FAKE_PROM, 0, 0, 0, 0, 0, 0, 0);
+ return (rv.status == 0);
+}
+
+static inline int
+ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set)
+{
+ struct ia64_sal_retval rv;
+
+ SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0);
+ if (rv.status != 0)
+ return rv.status;
+ *feature_set = rv.v0;
+ return 0;
+}
+
+static inline int
+ia64_sn_set_os_feature(int feature)
+{
+ struct ia64_sal_retval rv;
+
+ SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0);
+ return rv.status;
+}
+
+static inline int
+sn_inject_error(u64 paddr, u64 *data, u64 *ecc)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, (u64)data,
+ (u64)ecc, 0, 0, 0, 0);
+ return ret_stuff.status;
+}
+
+static inline int
+ia64_sn_set_cpu_number(int cpu)
+{
+ struct ia64_sal_retval rv;
+
+ SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
+ return rv.status;
+}
#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/tiocp.h b/xen/include/asm-ia64/linux/asm/sn/tiocp.h
new file mode 100644
index 0000000000..e8ad0bb5b6
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/tiocp.h
@@ -0,0 +1,257 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_TIOCP_H
+#define _ASM_IA64_SN_PCI_TIOCP_H
+
+#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
+#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
+#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60)
+
+
+/*****************************************************************************
+ *********************** TIOCP MMR structure mapping ***************************
+ *****************************************************************************/
+
+struct tiocp{
+
+ /* 0x000000-0x00FFFF -- Local Registers */
+
+ /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
+ u64 cp_id; /* 0x000000 */
+ u64 cp_stat; /* 0x000008 */
+ u64 cp_err_upper; /* 0x000010 */
+ u64 cp_err_lower; /* 0x000018 */
+ #define cp_err cp_err_lower
+ u64 cp_control; /* 0x000020 */
+ u64 cp_req_timeout; /* 0x000028 */
+ u64 cp_intr_upper; /* 0x000030 */
+ u64 cp_intr_lower; /* 0x000038 */
+ #define cp_intr cp_intr_lower
+ u64 cp_err_cmdword; /* 0x000040 */
+ u64 _pad_000048; /* 0x000048 */
+ u64 cp_tflush; /* 0x000050 */
+
+ /* 0x000058-0x00007F -- Bridge-specific Configuration */
+ u64 cp_aux_err; /* 0x000058 */
+ u64 cp_resp_upper; /* 0x000060 */
+ u64 cp_resp_lower; /* 0x000068 */
+ #define cp_resp cp_resp_lower
+ u64 cp_tst_pin_ctrl; /* 0x000070 */
+ u64 cp_addr_lkerr; /* 0x000078 */
+
+ /* 0x000080-0x00008F -- PMU & MAP */
+ u64 cp_dir_map; /* 0x000080 */
+ u64 _pad_000088; /* 0x000088 */
+
+ /* 0x000090-0x00009F -- SSRAM */
+ u64 cp_map_fault; /* 0x000090 */
+ u64 _pad_000098; /* 0x000098 */
+
+ /* 0x0000A0-0x0000AF -- Arbitration */
+ u64 cp_arb; /* 0x0000A0 */
+ u64 _pad_0000A8; /* 0x0000A8 */
+
+ /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+ u64 cp_ate_parity_err; /* 0x0000B0 */
+ u64 _pad_0000B8; /* 0x0000B8 */
+
+ /* 0x0000C0-0x0000FF -- PCI/GIO */
+ u64 cp_bus_timeout; /* 0x0000C0 */
+ u64 cp_pci_cfg; /* 0x0000C8 */
+ u64 cp_pci_err_upper; /* 0x0000D0 */
+ u64 cp_pci_err_lower; /* 0x0000D8 */
+ #define cp_pci_err cp_pci_err_lower
+ u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
+
+ /* 0x000100-0x0001FF -- Interrupt */
+ u64 cp_int_status; /* 0x000100 */
+ u64 cp_int_enable; /* 0x000108 */
+ u64 cp_int_rst_stat; /* 0x000110 */
+ u64 cp_int_mode; /* 0x000118 */
+ u64 cp_int_device; /* 0x000120 */
+ u64 cp_int_host_err; /* 0x000128 */
+ u64 cp_int_addr[8]; /* 0x0001{30,,,68} */
+ u64 cp_err_int_view; /* 0x000170 */
+ u64 cp_mult_int; /* 0x000178 */
+ u64 cp_force_always[8]; /* 0x0001{80,,,B8} */
+ u64 cp_force_pin[8]; /* 0x0001{C0,,,F8} */
+
+ /* 0x000200-0x000298 -- Device */
+ u64 cp_device[4]; /* 0x0002{00,,,18} */
+ u64 _pad_000220[4]; /* 0x0002{20,,,38} */
+ u64 cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
+ u64 _pad_000260[4]; /* 0x0002{60,,,78} */
+ u64 cp_rrb_map[2]; /* 0x0002{80,,,88} */
+ #define cp_even_resp cp_rrb_map[0] /* 0x000280 */
+ #define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
+ u64 cp_resp_status; /* 0x000290 */
+ u64 cp_resp_clear; /* 0x000298 */
+
+ u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
+
+ /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+ struct {
+ u64 upper; /* 0x0003{00,,,F0} */
+ u64 lower; /* 0x0003{08,,,F8} */
+ } cp_buf_addr_match[16];
+
+ /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+ struct {
+ u64 flush_w_touch; /* 0x000{400,,,5C0} */
+ u64 flush_wo_touch; /* 0x000{408,,,5C8} */
+ u64 inflight; /* 0x000{410,,,5D0} */
+ u64 prefetch; /* 0x000{418,,,5D8} */
+ u64 total_pci_retry; /* 0x000{420,,,5E0} */
+ u64 max_pci_retry; /* 0x000{428,,,5E8} */
+ u64 max_latency; /* 0x000{430,,,5F0} */
+ u64 clear_all; /* 0x000{438,,,5F8} */
+ } cp_buf_count[8];
+
+
+ /* 0x000600-0x0009FF -- PCI/X registers */
+ u64 cp_pcix_bus_err_addr; /* 0x000600 */
+ u64 cp_pcix_bus_err_attr; /* 0x000608 */
+ u64 cp_pcix_bus_err_data; /* 0x000610 */
+ u64 cp_pcix_pio_split_addr; /* 0x000618 */
+ u64 cp_pcix_pio_split_attr; /* 0x000620 */
+ u64 cp_pcix_dma_req_err_attr; /* 0x000628 */
+ u64 cp_pcix_dma_req_err_addr; /* 0x000630 */
+ u64 cp_pcix_timeout; /* 0x000638 */
+
+ u64 _pad_000640[24]; /* 0x000{640,,,6F8} */
+
+ /* 0x000700-0x000737 -- Debug Registers */
+ u64 cp_ct_debug_ctl; /* 0x000700 */
+ u64 cp_br_debug_ctl; /* 0x000708 */
+ u64 cp_mux3_debug_ctl; /* 0x000710 */
+ u64 cp_mux4_debug_ctl; /* 0x000718 */
+ u64 cp_mux5_debug_ctl; /* 0x000720 */
+ u64 cp_mux6_debug_ctl; /* 0x000728 */
+ u64 cp_mux7_debug_ctl; /* 0x000730 */
+
+ u64 _pad_000738[89]; /* 0x000{738,,,9F8} */
+
+ /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+ struct {
+ u64 cp_buf_addr; /* 0x000{A00,,,AF0} */
+ u64 cp_buf_attr; /* 0X000{A08,,,AF8} */
+ } cp_pcix_read_buf_64[16];
+
+ struct {
+ u64 cp_buf_addr; /* 0x000{B00,,,BE0} */
+ u64 cp_buf_attr; /* 0x000{B08,,,BE8} */
+ u64 cp_buf_valid; /* 0x000{B10,,,BF0} */
+ u64 __pad1; /* 0x000{B18,,,BF8} */
+ } cp_pcix_write_buf_64[8];
+
+ /* End of Local Registers -- Start of Address Map space */
+
+ char _pad_000c00[0x010000 - 0x000c00];
+
+ /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
+ u64 cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
+
+ char _pad_012000[0x14000 - 0x012000];
+
+ /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
+ u64 cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
+
+ char _pad_016000[0x18000 - 0x016000];
+
+ /* 0x18000-0x197F8 -- TIOCP Write Request Ram */
+ u64 cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
+ u64 cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
+ u64 cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
+
+ char _pad_019800[0x1C000 - 0x019800];
+
+ /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
+ u64 cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
+ u64 cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
+ u64 cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
+
+ char _pad_01F000[0x20000 - 0x01F000];
+
+ /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
+ char _pad_020000[0x021000 - 0x20000];
+
+ /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
+ union {
+ u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
+ u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
+ u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
+ u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
+
+ /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+ union {
+ u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
+ u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
+ u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
+ u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
+ union {
+ u8 c[0x100 / 1];
+ u16 s[0x100 / 2];
+ u32 l[0x100 / 4];
+ u64 d[0x100 / 8];
+ } f[8];
+ } cp_type1_cfg; /* 0x028000-0x029000 */
+
+ char _pad_029000[0x030000-0x029000];
+
+ /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } cp_pci_iack; /* 0x030000-0x030007 */
+
+ char _pad_030007[0x040000-0x030008];
+
+ /* 0x040000-0x040007 -- PCIX Special Cycle */
+ union {
+ u8 c[8 / 1];
+ u16 s[8 / 2];
+ u32 l[8 / 4];
+ u64 d[8 / 8];
+ } cp_pcix_cycle; /* 0x040000-0x040007 */
+
+ char _pad_040007[0x200000-0x040008];
+
+ /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
+ union {
+ u8 c[0x100000 / 1];
+ u16 s[0x100000 / 2];
+ u32 l[0x100000 / 4];
+ u64 d[0x100000 / 8];
+ } cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
+
+ #define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
+
+ char _pad_800000[0xA00000-0x800000];
+
+ /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
+ union {
+ u8 c[0x100000 / 1];
+ u16 s[0x100000 / 2];
+ u32 l[0x100000 / 4];
+ u64 d[0x100000 / 8];
+ } cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
+
+ #define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
+
+};
+
+#endif /* _ASM_IA64_SN_PCI_TIOCP_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/xbow.h b/xen/include/asm-ia64/linux/asm/sn/xbow.h
new file mode 100644
index 0000000000..90f37a4133
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/xbow.h
@@ -0,0 +1,301 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All Rights
+ * Reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_XBOW_H
+#define _ASM_IA64_SN_XTALK_XBOW_H
+
+#define XBOW_PORT_8 0x8
+#define XBOW_PORT_C 0xc
+#define XBOW_PORT_F 0xf
+
+#define MAX_XBOW_PORTS 8 /* number of ports on xbow chip */
+#define BASE_XBOW_PORT XBOW_PORT_8 /* Lowest external port */
+
+#define XBOW_CREDIT 4
+
+#define MAX_XBOW_NAME 16
+
+/* Register set for each xbow link */
+typedef volatile struct xb_linkregs_s {
+/*
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+ u32 link_ibf;
+ u32 filler0; /* filler for proper alignment */
+ u32 link_control;
+ u32 filler1;
+ u32 link_status;
+ u32 filler2;
+ u32 link_arb_upper;
+ u32 filler3;
+ u32 link_arb_lower;
+ u32 filler4;
+ u32 link_status_clr;
+ u32 filler5;
+ u32 link_reset;
+ u32 filler6;
+ u32 link_aux_status;
+ u32 filler7;
+} xb_linkregs_t;
+
+typedef volatile struct xbow_s {
+ /* standard widget configuration 0x000000-0x000057 */
+ struct widget_cfg xb_widget; /* 0x000000 */
+
+ /* helper fieldnames for accessing bridge widget */
+
+#define xb_wid_id xb_widget.w_id
+#define xb_wid_stat xb_widget.w_status
+#define xb_wid_err_upper xb_widget.w_err_upper_addr
+#define xb_wid_err_lower xb_widget.w_err_lower_addr
+#define xb_wid_control xb_widget.w_control
+#define xb_wid_req_timeout xb_widget.w_req_timeout
+#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
+#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
+#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
+#define xb_wid_llp xb_widget.w_llp_cfg
+#define xb_wid_stat_clr xb_widget.w_tflush
+
+/*
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+ /* xbow-specific widget configuration 0x000058-0x0000FF */
+ u32 xb_wid_arb_reload; /* 0x00005C */
+ u32 _pad_000058;
+ u32 xb_perf_ctr_a; /* 0x000064 */
+ u32 _pad_000060;
+ u32 xb_perf_ctr_b; /* 0x00006c */
+ u32 _pad_000068;
+ u32 xb_nic; /* 0x000074 */
+ u32 _pad_000070;
+
+ /* Xbridge only */
+ u32 xb_w0_rst_fnc; /* 0x00007C */
+ u32 _pad_000078;
+ u32 xb_l8_rst_fnc; /* 0x000084 */
+ u32 _pad_000080;
+ u32 xb_l9_rst_fnc; /* 0x00008c */
+ u32 _pad_000088;
+ u32 xb_la_rst_fnc; /* 0x000094 */
+ u32 _pad_000090;
+ u32 xb_lb_rst_fnc; /* 0x00009c */
+ u32 _pad_000098;
+ u32 xb_lc_rst_fnc; /* 0x0000a4 */
+ u32 _pad_0000a0;
+ u32 xb_ld_rst_fnc; /* 0x0000ac */
+ u32 _pad_0000a8;
+ u32 xb_le_rst_fnc; /* 0x0000b4 */
+ u32 _pad_0000b0;
+ u32 xb_lf_rst_fnc; /* 0x0000bc */
+ u32 _pad_0000b8;
+ u32 xb_lock; /* 0x0000c4 */
+ u32 _pad_0000c0;
+ u32 xb_lock_clr; /* 0x0000cc */
+ u32 _pad_0000c8;
+ /* end of Xbridge only */
+ u32 _pad_0000d0[12];
+
+ /* Link Specific Registers, port 8..15 0x000100-0x000300 */
+ xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
+} xbow_t;
+
+#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
+
+#define XB_FLAGS_EXISTS 0x1 /* device exists */
+#define XB_FLAGS_MASTER 0x2
+#define XB_FLAGS_SLAVE 0x0
+#define XB_FLAGS_GBR 0x4
+#define XB_FLAGS_16BIT 0x8
+#define XB_FLAGS_8BIT 0x0
+
+/* is widget port number valid? (based on version 7.0 of xbow spec) */
+#define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F)
+
+/* whether to use upper or lower arbitration register, given source widget id */
+#define XBOW_ARB_IS_UPPER(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B)
+#define XBOW_ARB_IS_LOWER(wid) ((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F)
+
+/* offset of arbitration register, given source widget id */
+#define XBOW_ARB_OFF(wid) (XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24)
+
+#define XBOW_WID_ID WIDGET_ID
+#define XBOW_WID_STAT WIDGET_STATUS
+#define XBOW_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR
+#define XBOW_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR
+#define XBOW_WID_CONTROL WIDGET_CONTROL
+#define XBOW_WID_REQ_TO WIDGET_REQ_TIMEOUT
+#define XBOW_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR
+#define XBOW_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR
+#define XBOW_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD
+#define XBOW_WID_LLP WIDGET_LLP_CFG
+#define XBOW_WID_STAT_CLR WIDGET_TFLUSH
+#define XBOW_WID_ARB_RELOAD 0x5c
+#define XBOW_WID_PERF_CTR_A 0x64
+#define XBOW_WID_PERF_CTR_B 0x6c
+#define XBOW_WID_NIC 0x74
+
+/* Xbridge only */
+#define XBOW_W0_RST_FNC 0x00007C
+#define XBOW_L8_RST_FNC 0x000084
+#define XBOW_L9_RST_FNC 0x00008c
+#define XBOW_LA_RST_FNC 0x000094
+#define XBOW_LB_RST_FNC 0x00009c
+#define XBOW_LC_RST_FNC 0x0000a4
+#define XBOW_LD_RST_FNC 0x0000ac
+#define XBOW_LE_RST_FNC 0x0000b4
+#define XBOW_LF_RST_FNC 0x0000bc
+#define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \
+ (XBOW_W0_RST_FNC + ((x) - 7) * 8) : \
+ ((x) == 0) ? XBOW_W0_RST_FNC : 0
+#define XBOW_LOCK 0x0000c4
+#define XBOW_LOCK_CLR 0x0000cc
+/* End of Xbridge only */
+
+/* used only in ide, but defined here within the reserved portion */
+/* of the widget0 address space (before 0xf4) */
+#define XBOW_WID_UNDEF 0xe4
+
+/* xbow link register set base, legal value for x is 0x8..0xf */
+#define XB_LINK_BASE 0x100
+#define XB_LINK_OFFSET 0x40
+#define XB_LINK_REG_BASE(x) (XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET)
+
+#define XB_LINK_IBUF_FLUSH(x) (XB_LINK_REG_BASE(x) + 0x4)
+#define XB_LINK_CTRL(x) (XB_LINK_REG_BASE(x) + 0xc)
+#define XB_LINK_STATUS(x) (XB_LINK_REG_BASE(x) + 0x14)
+#define XB_LINK_ARB_UPPER(x) (XB_LINK_REG_BASE(x) + 0x1c)
+#define XB_LINK_ARB_LOWER(x) (XB_LINK_REG_BASE(x) + 0x24)
+#define XB_LINK_STATUS_CLR(x) (XB_LINK_REG_BASE(x) + 0x2c)
+#define XB_LINK_RESET(x) (XB_LINK_REG_BASE(x) + 0x34)
+#define XB_LINK_AUX_STATUS(x) (XB_LINK_REG_BASE(x) + 0x3c)
+
+/* link_control(x) */
+#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
+/* reserved: 0x40000000 */
+#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
+#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer
+ level */
+#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8
+ bit mode */
+#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP
+ packet */
+#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit
+ mask */
+#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit
+ shift */
+#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination
+ */
+#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input
+ buffer */
+/* reserved: 0x0000fe00 */
+#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
+#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
+#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
+#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
+#define XB_CTRL_RCV_IE 0x00000010 /* receive */
+#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
+/* reserved: 0x00000004 */
+#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request
+ timeout */
+#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
+
+/* link_status(x) */
+#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
+/* reserved: 0x7ff80000 */
+#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
+#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
+#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
+#define XB_STAT_BNDWDTH_ALLOC_ID_MSK 0x0000ff00 /* port bitmask */
+#define XB_STAT_RCV_CNT_OFLOW_ERR XB_CTRL_RCV_CNT_OFLOW_IE
+#define XB_STAT_XMT_CNT_OFLOW_ERR XB_CTRL_XMT_CNT_OFLOW_IE
+#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
+#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
+#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
+/* reserved: 0x00000004 */
+#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
+#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
+
+/* link_aux_status(x) */
+#define XB_AUX_STAT_RCV_CNT 0xff000000
+#define XB_AUX_STAT_XMT_CNT 0x00ff0000
+#define XB_AUX_STAT_TOUT_DST 0x0000ff00
+#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
+#define XB_AUX_STAT_PRESENT 0x00000020
+#define XB_AUX_STAT_PORT_WIDTH 0x00000010
+/* reserved: 0x0000000f */
+
+/*
+ * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
+ * register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf
+ */
+#define XB_ARB_GBR_MSK 0x1f
+#define XB_ARB_RR_MSK 0x7
+#define XB_ARB_GBR_SHFT(x) (((x) & 0x3) * 8)
+#define XB_ARB_RR_SHFT(x) (((x) & 0x3) * 8 + 5)
+#define XB_ARB_GBR_CNT(reg,x) ((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK)
+#define XB_ARB_RR_CNT(reg,x) ((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK)
+
+/* XBOW_WID_STAT */
+#define XB_WID_STAT_LINK_INTR_SHFT (24)
+#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
+#define XB_WID_STAT_LINK_INTR(x) \
+ (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
+#define XB_WID_STAT_WIDGET0_INTR 0x00800000
+#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
+#define XB_WID_STAT_REG_ACC_ERR 0x00000020
+#define XB_WID_STAT_RECV_TOUT 0x00000010 /* Xbridge only */
+#define XB_WID_STAT_ARB_TOUT 0x00000008 /* Xbridge only */
+#define XB_WID_STAT_XTALK_ERR 0x00000004
+#define XB_WID_STAT_DST_TOUT 0x00000002 /* Xbridge only */
+#define XB_WID_STAT_MULTI_ERR 0x00000001
+
+#define XB_WID_STAT_SRCID_SHFT 6
+
+/* XBOW_WID_CONTROL */
+#define XB_WID_CTRL_REG_ACC_IE XB_WID_STAT_REG_ACC_ERR
+#define XB_WID_CTRL_RECV_TOUT XB_WID_STAT_RECV_TOUT
+#define XB_WID_CTRL_ARB_TOUT XB_WID_STAT_ARB_TOUT
+#define XB_WID_CTRL_XTALK_IE XB_WID_STAT_XTALK_ERR
+
+/* XBOW_WID_INT_UPPER */
+/* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */
+
+/* XBOW WIDGET part number, in the ID register */
+#define XBOW_WIDGET_PART_NUM 0x0 /* crossbow */
+#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
+#define XBOW_WIDGET_MFGR_NUM 0x0
+#define XXBOW_WIDGET_MFGR_NUM 0x0
+#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
+
+#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
+#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
+#define XBOW_REV_1_2 0x3 /* xbow rev 1.2 is "3" */
+#define XBOW_REV_1_3 0x4 /* xbow rev 1.3 is "4" */
+#define XBOW_REV_2_0 0x5 /* xbow rev 2.0 is "5" */
+
+#define XXBOW_PART_REV_1_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x1 )
+#define XXBOW_PART_REV_2_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x2 )
+
+/* XBOW_WID_ARB_RELOAD */
+#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
+
+#define IS_XBRIDGE_XBOW(wid) \
+ (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
+ XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
+
+#define IS_PIC_XBOW(wid) \
+ (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
+ XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
+
+#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
+
+#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
diff --git a/xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h b/xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h
new file mode 100644
index 0000000000..2800eda0fd
--- /dev/null
+++ b/xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_XWIDGET_H
+#define _ASM_IA64_SN_XTALK_XWIDGET_H
+
+/* WIDGET_ID */
+#define WIDGET_REV_NUM 0xf0000000
+#define WIDGET_PART_NUM 0x0ffff000
+#define WIDGET_MFG_NUM 0x00000ffe
+#define WIDGET_REV_NUM_SHFT 28
+#define WIDGET_PART_NUM_SHFT 12
+#define WIDGET_MFG_NUM_SHFT 1
+
+#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
+#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
+#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
+#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
+ XWIDGET_REV_NUM(widgetid))
+#define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf)
+
+/* widget configuration registers */
+struct widget_cfg{
+ u32 w_id; /* 0x04 */
+ u32 w_pad_0; /* 0x00 */
+ u32 w_status; /* 0x0c */
+ u32 w_pad_1; /* 0x08 */
+ u32 w_err_upper_addr; /* 0x14 */
+ u32 w_pad_2; /* 0x10 */
+ u32 w_err_lower_addr; /* 0x1c */
+ u32 w_pad_3; /* 0x18 */
+ u32 w_control; /* 0x24 */
+ u32 w_pad_4; /* 0x20 */
+ u32 w_req_timeout; /* 0x2c */
+ u32 w_pad_5; /* 0x28 */
+ u32 w_intdest_upper_addr; /* 0x34 */
+ u32 w_pad_6; /* 0x30 */
+ u32 w_intdest_lower_addr; /* 0x3c */
+ u32 w_pad_7; /* 0x38 */
+ u32 w_err_cmd_word; /* 0x44 */
+ u32 w_pad_8; /* 0x40 */
+ u32 w_llp_cfg; /* 0x4c */
+ u32 w_pad_9; /* 0x48 */
+ u32 w_tflush; /* 0x54 */
+ u32 w_pad_10; /* 0x50 */
+};
+
+/*
+ * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
+ */
+struct xwidget_hwid{
+ int mfg_num;
+ int rev_num;
+ int part_num;
+};
+
+struct xwidget_info{
+
+ struct xwidget_hwid xwi_hwid; /* Widget Identification */
+ char xwi_masterxid; /* Hub's Widget Port Number */
+ void *xwi_hubinfo; /* Hub's provider private info */
+ u64 *xwi_hub_provider; /* prom provider functions */
+ void *xwi_vertex;
+};
+
+#endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */
diff --git a/xen/include/asm-ia64/linux/completion.h b/xen/include/asm-ia64/linux/completion.h
new file mode 100644
index 0000000000..268c5a4a2b
--- /dev/null
+++ b/xen/include/asm-ia64/linux/completion.h
@@ -0,0 +1,57 @@
+#ifndef __LINUX_COMPLETION_H
+#define __LINUX_COMPLETION_H
+
+/*
+ * (C) Copyright 2001 Linus Torvalds
+ *
+ * Atomic wait-for-completion handler data structures.
+ * See kernel/sched.c for details.
+ */
+
+#include <linux/wait.h>
+
+struct completion {
+ unsigned int done;
+ wait_queue_head_t wait;
+};
+
+#define COMPLETION_INITIALIZER(work) \
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+
+#define COMPLETION_INITIALIZER_ONSTACK(work) \
+ ({ init_completion(&work); work; })
+
+#define DECLARE_COMPLETION(work) \
+ struct completion work = COMPLETION_INITIALIZER(work)
+
+/*
+ * Lockdep needs to run a non-constant initializer for on-stack
+ * completions - so we use the _ONSTACK() variant for those that
+ * are on the kernel stack:
+ */
+#ifdef CONFIG_LOCKDEP
+# define DECLARE_COMPLETION_ONSTACK(work) \
+ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
+#else
+# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
+#endif
+
+static inline void init_completion(struct completion *x)
+{
+ x->done = 0;
+ init_waitqueue_head(&x->wait);
+}
+
+extern void FASTCALL(wait_for_completion(struct completion *));
+extern int FASTCALL(wait_for_completion_interruptible(struct completion *x));
+extern unsigned long FASTCALL(wait_for_completion_timeout(struct completion *x,
+ unsigned long timeout));
+extern unsigned long FASTCALL(wait_for_completion_interruptible_timeout(
+ struct completion *x, unsigned long timeout));
+
+extern void FASTCALL(complete(struct completion *));
+extern void FASTCALL(complete_all(struct completion *));
+
+#define INIT_COMPLETION(x) ((x).done = 0)
+
+#endif
diff --git a/xen/include/asm-ia64/linux/ioport.h b/xen/include/asm-ia64/linux/ioport.h
new file mode 100644
index 0000000000..d42c833990
--- /dev/null
+++ b/xen/include/asm-ia64/linux/ioport.h
@@ -0,0 +1,136 @@
+/*
+ * ioport.h Definitions of routines for detecting, reserving and
+ * allocating system resources.
+ *
+ * Authors: Linus Torvalds
+ */
+
+#ifndef _LINUX_IOPORT_H
+#define _LINUX_IOPORT_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+/*
+ * Resources are tree-like, allowing
+ * nesting etc..
+ */
+struct resource {
+ resource_size_t start;
+ resource_size_t end;
+ const char *name;
+ unsigned long flags;
+ struct resource *parent, *sibling, *child;
+};
+
+struct resource_list {
+ struct resource_list *next;
+ struct resource *res;
+ struct pci_dev *dev;
+};
+
+/*
+ * IO resources have these defined flags.
+ */
+#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
+
+#define IORESOURCE_IO 0x00000100 /* Resource type */
+#define IORESOURCE_MEM 0x00000200
+#define IORESOURCE_IRQ 0x00000400
+#define IORESOURCE_DMA 0x00000800
+
+#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
+#define IORESOURCE_READONLY 0x00002000
+#define IORESOURCE_CACHEABLE 0x00004000
+#define IORESOURCE_RANGELENGTH 0x00008000
+#define IORESOURCE_SHADOWABLE 0x00010000
+#define IORESOURCE_BUS_HAS_VGA 0x00080000
+
+#define IORESOURCE_DISABLED 0x10000000
+#define IORESOURCE_UNSET 0x20000000
+#define IORESOURCE_AUTO 0x40000000
+#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
+
+/* ISA PnP IRQ specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
+#define IORESOURCE_IRQ_LOWEDGE (1<<1)
+#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
+#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
+#define IORESOURCE_IRQ_SHAREABLE (1<<4)
+
+/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_DMA_TYPE_MASK (3<<0)
+#define IORESOURCE_DMA_8BIT (0<<0)
+#define IORESOURCE_DMA_8AND16BIT (1<<0)
+#define IORESOURCE_DMA_16BIT (2<<0)
+
+#define IORESOURCE_DMA_MASTER (1<<2)
+#define IORESOURCE_DMA_BYTE (1<<3)
+#define IORESOURCE_DMA_WORD (1<<4)
+
+#define IORESOURCE_DMA_SPEED_MASK (3<<6)
+#define IORESOURCE_DMA_COMPATIBLE (0<<6)
+#define IORESOURCE_DMA_TYPEA (1<<6)
+#define IORESOURCE_DMA_TYPEB (2<<6)
+#define IORESOURCE_DMA_TYPEF (3<<6)
+
+/* ISA PnP memory I/O specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
+#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
+#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
+#define IORESOURCE_MEM_TYPE_MASK (3<<3)
+#define IORESOURCE_MEM_8BIT (0<<3)
+#define IORESOURCE_MEM_16BIT (1<<3)
+#define IORESOURCE_MEM_8AND16BIT (2<<3)
+#define IORESOURCE_MEM_32BIT (3<<3)
+#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
+#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+
+/* PCI ROM control bits (IORESOURCE_BITS) */
+#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
+#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
+#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
+
+/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
+extern struct resource ioport_resource;
+extern struct resource iomem_resource;
+
+extern int request_resource(struct resource *root, struct resource *new);
+extern struct resource * ____request_resource(struct resource *root, struct resource *new);
+extern int release_resource(struct resource *new);
+extern int insert_resource(struct resource *parent, struct resource *new);
+extern int allocate_resource(struct resource *root, struct resource *new,
+ resource_size_t size, resource_size_t min,
+ resource_size_t max, resource_size_t align,
+ void (*alignf)(void *, struct resource *,
+ resource_size_t, resource_size_t),
+ void *alignf_data);
+int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+
+/* get registered SYSTEM_RAM resources in specified area */
+extern int find_next_system_ram(struct resource *res);
+
+/* Convenience shorthand with allocation */
+#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
+#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
+#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
+
+extern struct resource * __request_region(struct resource *,
+ resource_size_t start,
+ resource_size_t n, const char *name);
+
+/* Compatibility cruft */
+#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
+#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n))
+#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
+
+extern int __check_region(struct resource *, resource_size_t, resource_size_t);
+extern void __release_region(struct resource *, resource_size_t,
+ resource_size_t);
+
+static inline int __deprecated check_region(resource_size_t s,
+ resource_size_t n)
+{
+ return __check_region(&ioport_resource, s, n);
+}
+#endif /* _LINUX_IOPORT_H */
diff --git a/xen/include/asm-ia64/linux/klist.h b/xen/include/asm-ia64/linux/klist.h
new file mode 100644
index 0000000000..74071254c9
--- /dev/null
+++ b/xen/include/asm-ia64/linux/klist.h
@@ -0,0 +1,61 @@
+/*
+ * klist.h - Some generic list helpers, extending struct list_head a bit.
+ *
+ * Implementations are found in lib/klist.c
+ *
+ *
+ * Copyright (C) 2005 Patrick Mochel
+ *
+ * This file is rleased under the GPL v2.
+ */
+
+#ifndef _LINUX_KLIST_H
+#define _LINUX_KLIST_H
+
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+
+struct klist_node;
+struct klist {
+ spinlock_t k_lock;
+ struct list_head k_list;
+ void (*get)(struct klist_node *);
+ void (*put)(struct klist_node *);
+};
+
+
+extern void klist_init(struct klist * k, void (*get)(struct klist_node *),
+ void (*put)(struct klist_node *));
+
+struct klist_node {
+ struct klist * n_klist;
+ struct list_head n_node;
+ struct kref n_ref;
+ struct completion n_removed;
+};
+
+extern void klist_add_tail(struct klist_node * n, struct klist * k);
+extern void klist_add_head(struct klist_node * n, struct klist * k);
+
+extern void klist_del(struct klist_node * n);
+extern void klist_remove(struct klist_node * n);
+
+extern int klist_node_attached(struct klist_node * n);
+
+
+struct klist_iter {
+ struct klist * i_klist;
+ struct list_head * i_head;
+ struct klist_node * i_cur;
+};
+
+
+extern void klist_iter_init(struct klist * k, struct klist_iter * i);
+extern void klist_iter_init_node(struct klist * k, struct klist_iter * i,
+ struct klist_node * n);
+extern void klist_iter_exit(struct klist_iter * i);
+extern struct klist_node * klist_next(struct klist_iter * i);
+
+#endif
diff --git a/xen/include/asm-ia64/linux/kref.h b/xen/include/asm-ia64/linux/kref.h
new file mode 100644
index 0000000000..6fee353989
--- /dev/null
+++ b/xen/include/asm-ia64/linux/kref.h
@@ -0,0 +1,32 @@
+/*
+ * kref.c - library routines for handling generic reference counted objects
+ *
+ * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2004 IBM Corp.
+ *
+ * based on kobject.h which was:
+ * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (C) 2002-2003 Open Source Development Labs
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _KREF_H_
+#define _KREF_H_
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm/atomic.h>
+
+struct kref {
+ atomic_t refcount;
+};
+
+void kref_init(struct kref *kref);
+void kref_get(struct kref *kref);
+int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+
+#endif /* __KERNEL__ */
+#endif /* _KREF_H_ */
diff --git a/xen/include/asm-ia64/linux/mod_devicetable.h b/xen/include/asm-ia64/linux/mod_devicetable.h
new file mode 100644
index 0000000000..e0c393cc72
--- /dev/null
+++ b/xen/include/asm-ia64/linux/mod_devicetable.h
@@ -0,0 +1,323 @@
+/*
+ * Device tables which are exported to userspace via
+ * scripts/mod/file2alias.c. You must keep that file in sync with this
+ * header.
+ */
+
+#ifndef LINUX_MOD_DEVICETABLE_H
+#define LINUX_MOD_DEVICETABLE_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+typedef unsigned long kernel_ulong_t;
+#endif
+
+#define PCI_ANY_ID (~0)
+
+struct pci_device_id {
+ __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
+ __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
+ __u32 class, class_mask; /* (class,subclass,prog-if) triplet */
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+
+#define IEEE1394_MATCH_VENDOR_ID 0x0001
+#define IEEE1394_MATCH_MODEL_ID 0x0002
+#define IEEE1394_MATCH_SPECIFIER_ID 0x0004
+#define IEEE1394_MATCH_VERSION 0x0008
+
+struct ieee1394_device_id {
+ __u32 match_flags;
+ __u32 vendor_id;
+ __u32 model_id;
+ __u32 specifier_id;
+ __u32 version;
+ kernel_ulong_t driver_data
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+};
+
+
+/*
+ * Device table entry for "new style" table-driven USB drivers.
+ * User mode code can read these tables to choose which modules to load.
+ * Declare the table as a MODULE_DEVICE_TABLE.
+ *
+ * A probe() parameter will point to a matching entry from this table.
+ * Use the driver_info field for each match to hold information tied
+ * to that match: device quirks, etc.
+ *
+ * Terminate the driver's table with an all-zeroes entry.
+ * Use the flag values to control which fields are compared.
+ */
+
+/**
+ * struct usb_device_id - identifies USB devices for probing and hotplugging
+ * @match_flags: Bit mask controlling of the other fields are used to match
+ * against new devices. Any field except for driver_info may be used,
+ * although some only make sense in conjunction with other fields.
+ * This is usually set by a USB_DEVICE_*() macro, which sets all
+ * other fields in this structure except for driver_info.
+ * @idVendor: USB vendor ID for a device; numbers are assigned
+ * by the USB forum to its members.
+ * @idProduct: Vendor-assigned product ID.
+ * @bcdDevice_lo: Low end of range of vendor-assigned product version numbers.
+ * This is also used to identify individual product versions, for
+ * a range consisting of a single device.
+ * @bcdDevice_hi: High end of version number range. The range of product
+ * versions is inclusive.
+ * @bDeviceClass: Class of device; numbers are assigned
+ * by the USB forum. Products may choose to implement classes,
+ * or be vendor-specific. Device classes specify behavior of all
+ * the interfaces on a devices.
+ * @bDeviceSubClass: Subclass of device; associated with bDeviceClass.
+ * @bDeviceProtocol: Protocol of device; associated with bDeviceClass.
+ * @bInterfaceClass: Class of interface; numbers are assigned
+ * by the USB forum. Products may choose to implement classes,
+ * or be vendor-specific. Interface classes specify behavior only
+ * of a given interface; other interfaces may support other classes.
+ * @bInterfaceSubClass: Subclass of interface; associated with bInterfaceClass.
+ * @bInterfaceProtocol: Protocol of interface; associated with bInterfaceClass.
+ * @driver_info: Holds information used by the driver. Usually it holds
+ * a pointer to a descriptor understood by the driver, or perhaps
+ * device flags.
+ *
+ * In most cases, drivers will create a table of device IDs by using
+ * USB_DEVICE(), or similar macros designed for that purpose.
+ * They will then export it to userspace using MODULE_DEVICE_TABLE(),
+ * and provide it to the USB core through their usb_driver structure.
+ *
+ * See the usb_match_id() function for information about how matches are
+ * performed. Briefly, you will normally use one of several macros to help
+ * construct these entries. Each entry you provide will either identify
+ * one or more specific products, or will identify a class of products
+ * which have agreed to behave the same. You should put the more specific
+ * matches towards the beginning of your table, so that driver_info can
+ * record quirks of specific products.
+ */
+struct usb_device_id {
+ /* which fields to match against? */
+ __u16 match_flags;
+
+ /* Used for product specific matches; range is inclusive */
+ __u16 idVendor;
+ __u16 idProduct;
+ __u16 bcdDevice_lo;
+ __u16 bcdDevice_hi;
+
+ /* Used for device class matches */
+ __u8 bDeviceClass;
+ __u8 bDeviceSubClass;
+ __u8 bDeviceProtocol;
+
+ /* Used for interface class matches */
+ __u8 bInterfaceClass;
+ __u8 bInterfaceSubClass;
+ __u8 bInterfaceProtocol;
+
+ /* not matched against */
+ kernel_ulong_t driver_info;
+};
+
+/* Some useful macros to use to create struct usb_device_id */
+#define USB_DEVICE_ID_MATCH_VENDOR 0x0001
+#define USB_DEVICE_ID_MATCH_PRODUCT 0x0002
+#define USB_DEVICE_ID_MATCH_DEV_LO 0x0004
+#define USB_DEVICE_ID_MATCH_DEV_HI 0x0008
+#define USB_DEVICE_ID_MATCH_DEV_CLASS 0x0010
+#define USB_DEVICE_ID_MATCH_DEV_SUBCLASS 0x0020
+#define USB_DEVICE_ID_MATCH_DEV_PROTOCOL 0x0040
+#define USB_DEVICE_ID_MATCH_INT_CLASS 0x0080
+#define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
+#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
+
+/* s390 CCW devices */
+struct ccw_device_id {
+ __u16 match_flags; /* which fields to match against */
+
+ __u16 cu_type; /* control unit type */
+ __u16 dev_type; /* device type */
+ __u8 cu_model; /* control unit model */
+ __u8 dev_model; /* device model */
+
+ kernel_ulong_t driver_info;
+};
+
+#define CCW_DEVICE_ID_MATCH_CU_TYPE 0x01
+#define CCW_DEVICE_ID_MATCH_CU_MODEL 0x02
+#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04
+#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08
+
+/* s390 AP bus devices */
+struct ap_device_id {
+ __u16 match_flags; /* which fields to match against */
+ __u8 dev_type; /* device type */
+ __u8 pad1;
+ __u32 pad2;
+ kernel_ulong_t driver_info;
+};
+
+#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
+
+
+#define PNP_ID_LEN 8
+#define PNP_MAX_DEVICES 8
+
+struct pnp_device_id {
+ __u8 id[PNP_ID_LEN];
+ kernel_ulong_t driver_data;
+};
+
+struct pnp_card_device_id {
+ __u8 id[PNP_ID_LEN];
+ kernel_ulong_t driver_data;
+ struct {
+ __u8 id[PNP_ID_LEN];
+ } devs[PNP_MAX_DEVICES];
+};
+
+
+#define SERIO_ANY 0xff
+
+struct serio_device_id {
+ __u8 type;
+ __u8 extra;
+ __u8 id;
+ __u8 proto;
+};
+
+/*
+ * Struct used for matching a device
+ */
+struct of_device_id
+{
+ char name[32];
+ char type[32];
+ char compatible[128];
+#ifdef __KERNEL__
+ void *data;
+#else
+ kernel_ulong_t data;
+#endif
+};
+
+/* VIO */
+struct vio_device_id {
+ char type[32];
+ char compat[32];
+};
+
+/* PCMCIA */
+
+struct pcmcia_device_id {
+ __u16 match_flags;
+
+ __u16 manf_id;
+ __u16 card_id;
+
+ __u8 func_id;
+
+ /* for real multi-function devices */
+ __u8 function;
+
+ /* for pseudo multi-function devices */
+ __u8 device_no;
+
+ __u32 prod_id_hash[4]
+ __attribute__((aligned(sizeof(__u32))));
+
+ /* not matched against in kernelspace*/
+#ifdef __KERNEL__
+ const char * prod_id[4];
+#else
+ kernel_ulong_t prod_id[4]
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+#endif
+
+ /* not matched against */
+ kernel_ulong_t driver_info;
+#ifdef __KERNEL__
+ char * cisfile;
+#else
+ kernel_ulong_t cisfile;
+#endif
+};
+
+#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001
+#define PCMCIA_DEV_ID_MATCH_CARD_ID 0x0002
+#define PCMCIA_DEV_ID_MATCH_FUNC_ID 0x0004
+#define PCMCIA_DEV_ID_MATCH_FUNCTION 0x0008
+#define PCMCIA_DEV_ID_MATCH_PROD_ID1 0x0010
+#define PCMCIA_DEV_ID_MATCH_PROD_ID2 0x0020
+#define PCMCIA_DEV_ID_MATCH_PROD_ID3 0x0040
+#define PCMCIA_DEV_ID_MATCH_PROD_ID4 0x0080
+#define PCMCIA_DEV_ID_MATCH_DEVICE_NO 0x0100
+#define PCMCIA_DEV_ID_MATCH_FAKE_CIS 0x0200
+#define PCMCIA_DEV_ID_MATCH_ANONYMOUS 0x0400
+
+/* I2C */
+struct i2c_device_id {
+ __u16 id;
+};
+
+/* Input */
+#define INPUT_DEVICE_ID_EV_MAX 0x1f
+#define INPUT_DEVICE_ID_KEY_MAX 0x1ff
+#define INPUT_DEVICE_ID_REL_MAX 0x0f
+#define INPUT_DEVICE_ID_ABS_MAX 0x3f
+#define INPUT_DEVICE_ID_MSC_MAX 0x07
+#define INPUT_DEVICE_ID_LED_MAX 0x0f
+#define INPUT_DEVICE_ID_SND_MAX 0x07
+#define INPUT_DEVICE_ID_FF_MAX 0x7f
+#define INPUT_DEVICE_ID_SW_MAX 0x0f
+
+#define INPUT_DEVICE_ID_MATCH_BUS 1
+#define INPUT_DEVICE_ID_MATCH_VENDOR 2
+#define INPUT_DEVICE_ID_MATCH_PRODUCT 4
+#define INPUT_DEVICE_ID_MATCH_VERSION 8
+
+#define INPUT_DEVICE_ID_MATCH_EVBIT 0x0010
+#define INPUT_DEVICE_ID_MATCH_KEYBIT 0x0020
+#define INPUT_DEVICE_ID_MATCH_RELBIT 0x0040
+#define INPUT_DEVICE_ID_MATCH_ABSBIT 0x0080
+#define INPUT_DEVICE_ID_MATCH_MSCIT 0x0100
+#define INPUT_DEVICE_ID_MATCH_LEDBIT 0x0200
+#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
+#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
+#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
+
+struct input_device_id {
+
+ kernel_ulong_t flags;
+
+ __u16 bustype;
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+
+ kernel_ulong_t evbit[INPUT_DEVICE_ID_EV_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t keybit[INPUT_DEVICE_ID_KEY_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t relbit[INPUT_DEVICE_ID_REL_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t absbit[INPUT_DEVICE_ID_ABS_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t mscbit[INPUT_DEVICE_ID_MSC_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t ledbit[INPUT_DEVICE_ID_LED_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
+
+ kernel_ulong_t driver_info;
+};
+
+/* EISA */
+
+#define EISA_SIG_LEN 8
+
+/* The EISA signature, in ASCII form, null terminated */
+struct eisa_device_id {
+ char sig[EISA_SIG_LEN];
+ kernel_ulong_t driver_data;
+};
+
+#define EISA_DEVICE_MODALIAS_FMT "eisa:s%s"
+
+#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/xen/include/asm-ia64/linux/pci_ids.h b/xen/include/asm-ia64/linux/pci_ids.h
new file mode 100644
index 0000000000..fa4e1d7997
--- /dev/null
+++ b/xen/include/asm-ia64/linux/pci_ids.h
@@ -0,0 +1,2356 @@
+/*
+ * PCI Class, Vendor and Device IDs
+ *
+ * Please keep sorted.
+ */
+
+/* Device classes and subclasses */
+
+#define PCI_CLASS_NOT_DEFINED 0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
+
+#define PCI_BASE_CLASS_STORAGE 0x01
+#define PCI_CLASS_STORAGE_SCSI 0x0100
+#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_FLOPPY 0x0102
+#define PCI_CLASS_STORAGE_IPI 0x0103
+#define PCI_CLASS_STORAGE_RAID 0x0104
+#define PCI_CLASS_STORAGE_SAS 0x0107
+#define PCI_CLASS_STORAGE_OTHER 0x0180
+
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_CLASS_NETWORK_ETHERNET 0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
+#define PCI_CLASS_NETWORK_FDDI 0x0202
+#define PCI_CLASS_NETWORK_ATM 0x0203
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_XGA 0x0301
+#define PCI_CLASS_DISPLAY_3D 0x0302
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
+#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
+#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
+
+#define PCI_BASE_CLASS_MEMORY 0x05
+#define PCI_CLASS_MEMORY_RAM 0x0500
+#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_OTHER 0x0580
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_HOST 0x0600
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+#define PCI_CLASS_BRIDGE_EISA 0x0602
+#define PCI_CLASS_BRIDGE_MC 0x0603
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
+#define PCI_CLASS_BRIDGE_NUBUS 0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
+#define PCI_CLASS_BRIDGE_RACEWAY 0x0608
+#define PCI_CLASS_BRIDGE_OTHER 0x0680
+
+#define PCI_BASE_CLASS_COMMUNICATION 0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702
+#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
+#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+
+#define PCI_BASE_CLASS_SYSTEM 0x08
+#define PCI_CLASS_SYSTEM_PIC 0x0800
+#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
+#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020
+#define PCI_CLASS_SYSTEM_DMA 0x0801
+#define PCI_CLASS_SYSTEM_TIMER 0x0802
+#define PCI_CLASS_SYSTEM_RTC 0x0803
+#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
+#define PCI_CLASS_SYSTEM_SDHCI 0x0805
+#define PCI_CLASS_SYSTEM_OTHER 0x0880
+
+#define PCI_BASE_CLASS_INPUT 0x09
+#define PCI_CLASS_INPUT_KEYBOARD 0x0900
+#define PCI_CLASS_INPUT_PEN 0x0901
+#define PCI_CLASS_INPUT_MOUSE 0x0902
+#define PCI_CLASS_INPUT_SCANNER 0x0903
+#define PCI_CLASS_INPUT_GAMEPORT 0x0904
+#define PCI_CLASS_INPUT_OTHER 0x0980
+
+#define PCI_BASE_CLASS_DOCKING 0x0a
+#define PCI_CLASS_DOCKING_GENERIC 0x0a00
+#define PCI_CLASS_DOCKING_OTHER 0x0a80
+
+#define PCI_BASE_CLASS_PROCESSOR 0x0b
+#define PCI_CLASS_PROCESSOR_386 0x0b00
+#define PCI_CLASS_PROCESSOR_486 0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
+#define PCI_CLASS_PROCESSOR_MIPS 0x0b30
+#define PCI_CLASS_PROCESSOR_CO 0x0b40
+
+#define PCI_BASE_CLASS_SERIAL 0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
+#define PCI_CLASS_SERIAL_ACCESS 0x0c01
+#define PCI_CLASS_SERIAL_SSA 0x0c02
+#define PCI_CLASS_SERIAL_USB 0x0c03
+#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300
+#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
+#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
+#define PCI_CLASS_SERIAL_FIBER 0x0c04
+#define PCI_CLASS_SERIAL_SMBUS 0x0c05
+
+#define PCI_BASE_CLASS_INTELLIGENT 0x0e
+#define PCI_CLASS_INTELLIGENT_I2O 0x0e00
+
+#define PCI_BASE_CLASS_SATELLITE 0x0f
+#define PCI_CLASS_SATELLITE_TV 0x0f00
+#define PCI_CLASS_SATELLITE_AUDIO 0x0f01
+#define PCI_CLASS_SATELLITE_VOICE 0x0f03
+#define PCI_CLASS_SATELLITE_DATA 0x0f04
+
+#define PCI_BASE_CLASS_CRYPT 0x10
+#define PCI_CLASS_CRYPT_NETWORK 0x1000
+#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001
+#define PCI_CLASS_CRYPT_OTHER 0x1080
+
+#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11
+#define PCI_CLASS_SP_DPIO 0x1100
+#define PCI_CLASS_SP_OTHER 0x1180
+
+#define PCI_CLASS_OTHERS 0xff
+
+/* Vendors and devices. Sort key: vendor first, device next. */
+
+#define PCI_VENDOR_ID_DYNALINK 0x0675
+#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702
+
+#define PCI_VENDOR_ID_BERKOM 0x0871
+#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1
+#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2
+#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
+#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+
+#define PCI_VENDOR_ID_COMPAQ 0x0e11
+#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
+#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
+#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011
+#define PCI_DEVICE_ID_COMPAQ_CISS 0xb060
+#define PCI_DEVICE_ID_COMPAQ_CISSB 0xb178
+#define PCI_DEVICE_ID_COMPAQ_CISSC 0x46
+#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150
+
+#define PCI_VENDOR_ID_NCR 0x1000
+#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
+#define PCI_DEVICE_ID_NCR_53C810 0x0001
+#define PCI_DEVICE_ID_NCR_53C820 0x0002
+#define PCI_DEVICE_ID_NCR_53C825 0x0003
+#define PCI_DEVICE_ID_NCR_53C815 0x0004
+#define PCI_DEVICE_ID_LSI_53C810AP 0x0005
+#define PCI_DEVICE_ID_NCR_53C860 0x0006
+#define PCI_DEVICE_ID_LSI_53C1510 0x000a
+#define PCI_DEVICE_ID_NCR_53C896 0x000b
+#define PCI_DEVICE_ID_NCR_53C895 0x000c
+#define PCI_DEVICE_ID_NCR_53C885 0x000d
+#define PCI_DEVICE_ID_NCR_53C875 0x000f
+#define PCI_DEVICE_ID_NCR_53C1510 0x0010
+#define PCI_DEVICE_ID_LSI_53C895A 0x0012
+#define PCI_DEVICE_ID_LSI_53C875A 0x0013
+#define PCI_DEVICE_ID_LSI_53C1010_33 0x0020
+#define PCI_DEVICE_ID_LSI_53C1010_66 0x0021
+#define PCI_DEVICE_ID_LSI_53C1030 0x0030
+#define PCI_DEVICE_ID_LSI_1030_53C1035 0x0032
+#define PCI_DEVICE_ID_LSI_53C1035 0x0040
+#define PCI_DEVICE_ID_NCR_53C875J 0x008f
+#define PCI_DEVICE_ID_LSI_FC909 0x0621
+#define PCI_DEVICE_ID_LSI_FC929 0x0622
+#define PCI_DEVICE_ID_LSI_FC929_LAN 0x0623
+#define PCI_DEVICE_ID_LSI_FC919 0x0624
+#define PCI_DEVICE_ID_LSI_FC919_LAN 0x0625
+#define PCI_DEVICE_ID_LSI_FC929X 0x0626
+#define PCI_DEVICE_ID_LSI_FC939X 0x0642
+#define PCI_DEVICE_ID_LSI_FC949X 0x0640
+#define PCI_DEVICE_ID_LSI_FC949ES 0x0646
+#define PCI_DEVICE_ID_LSI_FC919X 0x0628
+#define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701
+#define PCI_DEVICE_ID_LSI_61C102 0x0901
+#define PCI_DEVICE_ID_LSI_63C815 0x1000
+#define PCI_DEVICE_ID_LSI_SAS1064 0x0050
+#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411
+#define PCI_DEVICE_ID_LSI_SAS1066 0x005E
+#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
+#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C
+#define PCI_DEVICE_ID_LSI_SAS1064E 0x0056
+#define PCI_DEVICE_ID_LSI_SAS1066E 0x005A
+#define PCI_DEVICE_ID_LSI_SAS1068E 0x0058
+#define PCI_DEVICE_ID_LSI_SAS1078 0x0060
+
+#define PCI_VENDOR_ID_ATI 0x1002
+/* Mach64 */
+#define PCI_DEVICE_ID_ATI_68800 0x4158
+#define PCI_DEVICE_ID_ATI_215CT222 0x4354
+#define PCI_DEVICE_ID_ATI_210888CX 0x4358
+#define PCI_DEVICE_ID_ATI_215ET222 0x4554
+/* Mach64 / Rage */
+#define PCI_DEVICE_ID_ATI_215GB 0x4742
+#define PCI_DEVICE_ID_ATI_215GD 0x4744
+#define PCI_DEVICE_ID_ATI_215GI 0x4749
+#define PCI_DEVICE_ID_ATI_215GP 0x4750
+#define PCI_DEVICE_ID_ATI_215GQ 0x4751
+#define PCI_DEVICE_ID_ATI_215XL 0x4752
+#define PCI_DEVICE_ID_ATI_215GT 0x4754
+#define PCI_DEVICE_ID_ATI_215GTB 0x4755
+#define PCI_DEVICE_ID_ATI_215_IV 0x4756
+#define PCI_DEVICE_ID_ATI_215_IW 0x4757
+#define PCI_DEVICE_ID_ATI_215_IZ 0x475A
+#define PCI_DEVICE_ID_ATI_210888GX 0x4758
+#define PCI_DEVICE_ID_ATI_215_LB 0x4c42
+#define PCI_DEVICE_ID_ATI_215_LD 0x4c44
+#define PCI_DEVICE_ID_ATI_215_LG 0x4c47
+#define PCI_DEVICE_ID_ATI_215_LI 0x4c49
+#define PCI_DEVICE_ID_ATI_215_LM 0x4c4D
+#define PCI_DEVICE_ID_ATI_215_LN 0x4c4E
+#define PCI_DEVICE_ID_ATI_215_LR 0x4c52
+#define PCI_DEVICE_ID_ATI_215_LS 0x4c53
+#define PCI_DEVICE_ID_ATI_264_LT 0x4c54
+/* Mach64 VT */
+#define PCI_DEVICE_ID_ATI_264VT 0x5654
+#define PCI_DEVICE_ID_ATI_264VU 0x5655
+#define PCI_DEVICE_ID_ATI_264VV 0x5656
+/* Rage128 GL */
+#define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245
+#define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246
+#define PCI_DEVICE_ID_ATI_RAGE128_RG 0x5247
+/* Rage128 VR */
+#define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b
+#define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c
+#define PCI_DEVICE_ID_ATI_RAGE128_SE 0x5345
+#define PCI_DEVICE_ID_ATI_RAGE128_SF 0x5346
+#define PCI_DEVICE_ID_ATI_RAGE128_SG 0x5347
+#define PCI_DEVICE_ID_ATI_RAGE128_SH 0x5348
+#define PCI_DEVICE_ID_ATI_RAGE128_SK 0x534b
+#define PCI_DEVICE_ID_ATI_RAGE128_SL 0x534c
+#define PCI_DEVICE_ID_ATI_RAGE128_SM 0x534d
+#define PCI_DEVICE_ID_ATI_RAGE128_SN 0x534e
+/* Rage128 Ultra */
+#define PCI_DEVICE_ID_ATI_RAGE128_TF 0x5446
+#define PCI_DEVICE_ID_ATI_RAGE128_TL 0x544c
+#define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452
+#define PCI_DEVICE_ID_ATI_RAGE128_TS 0x5453
+#define PCI_DEVICE_ID_ATI_RAGE128_TT 0x5454
+#define PCI_DEVICE_ID_ATI_RAGE128_TU 0x5455
+/* Rage128 M3 */
+#define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45
+#define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46
+/* Rage128 M4 */
+#define PCI_DEVICE_ID_ATI_RAGE128_MF 0x4d46
+#define PCI_DEVICE_ID_ATI_RAGE128_ML 0x4d4c
+/* Rage128 Pro GL */
+#define PCI_DEVICE_ID_ATI_RAGE128_PA 0x5041
+#define PCI_DEVICE_ID_ATI_RAGE128_PB 0x5042
+#define PCI_DEVICE_ID_ATI_RAGE128_PC 0x5043
+#define PCI_DEVICE_ID_ATI_RAGE128_PD 0x5044
+#define PCI_DEVICE_ID_ATI_RAGE128_PE 0x5045
+#define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046
+/* Rage128 Pro VR */
+#define PCI_DEVICE_ID_ATI_RAGE128_PG 0x5047
+#define PCI_DEVICE_ID_ATI_RAGE128_PH 0x5048
+#define PCI_DEVICE_ID_ATI_RAGE128_PI 0x5049
+#define PCI_DEVICE_ID_ATI_RAGE128_PJ 0x504A
+#define PCI_DEVICE_ID_ATI_RAGE128_PK 0x504B
+#define PCI_DEVICE_ID_ATI_RAGE128_PL 0x504C
+#define PCI_DEVICE_ID_ATI_RAGE128_PM 0x504D
+#define PCI_DEVICE_ID_ATI_RAGE128_PN 0x504E
+#define PCI_DEVICE_ID_ATI_RAGE128_PO 0x504F
+#define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050
+#define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051
+#define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052
+#define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053
+#define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054
+#define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055
+#define PCI_DEVICE_ID_ATI_RAGE128_PV 0x5056
+#define PCI_DEVICE_ID_ATI_RAGE128_PW 0x5057
+#define PCI_DEVICE_ID_ATI_RAGE128_PX 0x5058
+/* Rage128 M4 */
+/* Radeon R100 */
+#define PCI_DEVICE_ID_ATI_RADEON_QD 0x5144
+#define PCI_DEVICE_ID_ATI_RADEON_QE 0x5145
+#define PCI_DEVICE_ID_ATI_RADEON_QF 0x5146
+#define PCI_DEVICE_ID_ATI_RADEON_QG 0x5147
+/* Radeon RV100 (VE) */
+#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
+#define PCI_DEVICE_ID_ATI_RADEON_QZ 0x515a
+/* Radeon R200 (8500) */
+#define PCI_DEVICE_ID_ATI_RADEON_QL 0x514c
+#define PCI_DEVICE_ID_ATI_RADEON_QN 0x514e
+#define PCI_DEVICE_ID_ATI_RADEON_QO 0x514f
+#define PCI_DEVICE_ID_ATI_RADEON_Ql 0x516c
+#define PCI_DEVICE_ID_ATI_RADEON_BB 0x4242
+/* Radeon R200 (9100) */
+#define PCI_DEVICE_ID_ATI_RADEON_QM 0x514d
+/* Radeon RV200 (7500) */
+#define PCI_DEVICE_ID_ATI_RADEON_QW 0x5157
+#define PCI_DEVICE_ID_ATI_RADEON_QX 0x5158
+/* Radeon NV-100 */
+/* Radeon RV250 (9000) */
+#define PCI_DEVICE_ID_ATI_RADEON_Id 0x4964
+#define PCI_DEVICE_ID_ATI_RADEON_Ie 0x4965
+#define PCI_DEVICE_ID_ATI_RADEON_If 0x4966
+#define PCI_DEVICE_ID_ATI_RADEON_Ig 0x4967
+/* Radeon RV280 (9200) */
+#define PCI_DEVICE_ID_ATI_RADEON_Ya 0x5961
+#define PCI_DEVICE_ID_ATI_RADEON_Yd 0x5964
+/* Radeon R300 (9500) */
+/* Radeon R300 (9700) */
+#define PCI_DEVICE_ID_ATI_RADEON_ND 0x4e44
+#define PCI_DEVICE_ID_ATI_RADEON_NE 0x4e45
+#define PCI_DEVICE_ID_ATI_RADEON_NF 0x4e46
+#define PCI_DEVICE_ID_ATI_RADEON_NG 0x4e47
+/* Radeon R350 (9800) */
+/* Radeon RV350 (9600) */
+/* Radeon M6 */
+#define PCI_DEVICE_ID_ATI_RADEON_LY 0x4c59
+#define PCI_DEVICE_ID_ATI_RADEON_LZ 0x4c5a
+/* Radeon M7 */
+#define PCI_DEVICE_ID_ATI_RADEON_LW 0x4c57
+#define PCI_DEVICE_ID_ATI_RADEON_LX 0x4c58
+/* Radeon M9 */
+#define PCI_DEVICE_ID_ATI_RADEON_Ld 0x4c64
+#define PCI_DEVICE_ID_ATI_RADEON_Le 0x4c65
+#define PCI_DEVICE_ID_ATI_RADEON_Lf 0x4c66
+#define PCI_DEVICE_ID_ATI_RADEON_Lg 0x4c67
+/* Radeon */
+/* RadeonIGP */
+#define PCI_DEVICE_ID_ATI_RS100 0xcab0
+#define PCI_DEVICE_ID_ATI_RS200 0xcab2
+#define PCI_DEVICE_ID_ATI_RS200_B 0xcbb2
+#define PCI_DEVICE_ID_ATI_RS250 0xcab3
+#define PCI_DEVICE_ID_ATI_RS300_100 0x5830
+#define PCI_DEVICE_ID_ATI_RS300_133 0x5831
+#define PCI_DEVICE_ID_ATI_RS300_166 0x5832
+#define PCI_DEVICE_ID_ATI_RS300_200 0x5833
+#define PCI_DEVICE_ID_ATI_RS350_100 0x7830
+#define PCI_DEVICE_ID_ATI_RS350_133 0x7831
+#define PCI_DEVICE_ID_ATI_RS350_166 0x7832
+#define PCI_DEVICE_ID_ATI_RS350_200 0x7833
+#define PCI_DEVICE_ID_ATI_RS400_100 0x5a30
+#define PCI_DEVICE_ID_ATI_RS400_133 0x5a31
+#define PCI_DEVICE_ID_ATI_RS400_166 0x5a32
+#define PCI_DEVICE_ID_ATI_RS400_200 0x5a33
+#define PCI_DEVICE_ID_ATI_RS480 0x5950
+/* ATI IXP Chipset */
+#define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349
+#define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353
+#define PCI_DEVICE_ID_ATI_IXP300_SMBUS 0x4363
+#define PCI_DEVICE_ID_ATI_IXP300_IDE 0x4369
+#define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e
+#define PCI_DEVICE_ID_ATI_IXP400_SMBUS 0x4372
+#define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376
+#define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379
+#define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a
+#define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380
+#define PCI_DEVICE_ID_ATI_IXP600_SRAID 0x4381
+#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
+
+#define PCI_VENDOR_ID_VLSI 0x1004
+#define PCI_DEVICE_ID_VLSI_82C592 0x0005
+#define PCI_DEVICE_ID_VLSI_82C593 0x0006
+#define PCI_DEVICE_ID_VLSI_82C594 0x0007
+#define PCI_DEVICE_ID_VLSI_82C597 0x0009
+#define PCI_DEVICE_ID_VLSI_82C541 0x000c
+#define PCI_DEVICE_ID_VLSI_82C543 0x000d
+#define PCI_DEVICE_ID_VLSI_82C532 0x0101
+#define PCI_DEVICE_ID_VLSI_82C534 0x0102
+#define PCI_DEVICE_ID_VLSI_82C535 0x0104
+#define PCI_DEVICE_ID_VLSI_82C147 0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+
+#define PCI_VENDOR_ID_ADL 0x1005
+#define PCI_DEVICE_ID_ADL_2301 0x2301
+
+#define PCI_VENDOR_ID_NS 0x100b
+#define PCI_DEVICE_ID_NS_87415 0x0002
+#define PCI_DEVICE_ID_NS_87560_LIO 0x000e
+#define PCI_DEVICE_ID_NS_87560_USB 0x0012
+#define PCI_DEVICE_ID_NS_83815 0x0020
+#define PCI_DEVICE_ID_NS_83820 0x0022
+#define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b
+#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
+#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
+#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
+#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030
+#define PCI_DEVICE_ID_NS_SATURN 0x0035
+#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500
+#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501
+#define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502
+#define PCI_DEVICE_ID_NS_SCx200_AUDIO 0x0503
+#define PCI_DEVICE_ID_NS_SCx200_VIDEO 0x0504
+#define PCI_DEVICE_ID_NS_SCx200_XBUS 0x0505
+#define PCI_DEVICE_ID_NS_SC1100_BRIDGE 0x0510
+#define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511
+#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515
+#define PCI_DEVICE_ID_NS_87410 0xd001
+
+#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028
+#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b
+
+#define PCI_VENDOR_ID_TSENG 0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000 0x3208
+
+#define PCI_VENDOR_ID_WEITEK 0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
+
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_BRD 0x0001
+#define PCI_DEVICE_ID_DEC_TULIP 0x0002
+#define PCI_DEVICE_ID_DEC_TGA 0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+#define PCI_DEVICE_ID_DEC_TGA2 0x000D
+#define PCI_DEVICE_ID_DEC_FDDI 0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
+#define PCI_DEVICE_ID_DEC_21142 0x0019
+#define PCI_DEVICE_ID_DEC_21052 0x0021
+#define PCI_DEVICE_ID_DEC_21150 0x0022
+#define PCI_DEVICE_ID_DEC_21152 0x0024
+#define PCI_DEVICE_ID_DEC_21153 0x0025
+#define PCI_DEVICE_ID_DEC_21154 0x0026
+#define PCI_DEVICE_ID_DEC_21285 0x1065
+#define PCI_DEVICE_ID_COMPAQ_42XX 0x0046
+
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5462 0x00d0
+#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
+#define PCI_DEVICE_ID_CIRRUS_4610 0x6001
+#define PCI_DEVICE_ID_CIRRUS_4612 0x6003
+#define PCI_DEVICE_ID_CIRRUS_4615 0x6004
+
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_DEVICE_ID_IBM_TR 0x0018
+#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
+#define PCI_DEVICE_ID_IBM_CPC710_PCI64 0x00fc
+#define PCI_DEVICE_ID_IBM_SNIPE 0x0180
+#define PCI_DEVICE_ID_IBM_CITRINE 0x028C
+#define PCI_DEVICE_ID_IBM_GEMSTONE 0xB166
+#define PCI_DEVICE_ID_IBM_OBSIDIAN 0x02BD
+#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1 0x0031
+#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219
+#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A
+#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251
+#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
+
+#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
+#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005
+
+#define PCI_VENDOR_ID_WD 0x101c
+#define PCI_DEVICE_ID_WD_90C 0xc24a
+
+#define PCI_VENDOR_ID_AMI 0x101e
+#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960
+#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010
+#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_K8_NB 0x1100
+#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+#define PCI_DEVICE_ID_AMD_SERENADE 0x36c0
+#define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006
+#define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007
+#define PCI_DEVICE_ID_AMD_FE_GATE_700C 0x700C
+#define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E
+#define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401
+#define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409
+#define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B
+#define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410
+#define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411
+#define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413
+#define PCI_DEVICE_ID_AMD_VIPER_7440 0x7440
+#define PCI_DEVICE_ID_AMD_OPUS_7441 0x7441
+#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
+#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
+#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
+#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
+#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
+#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a
+#define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b
+#define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d
+#define PCI_DEVICE_ID_AMD_8151_0 0x7454
+#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
+#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
+#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
+#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
+#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
+#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093
+#define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094
+#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
+#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
+#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
+
+#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
+#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
+
+#define PCI_VENDOR_ID_TRIDENT 0x1023
+#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
+#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001
+#define PCI_DEVICE_ID_TRIDENT_9320 0x9320
+#define PCI_DEVICE_ID_TRIDENT_9388 0x9388
+#define PCI_DEVICE_ID_TRIDENT_9397 0x9397
+#define PCI_DEVICE_ID_TRIDENT_939A 0x939A
+#define PCI_DEVICE_ID_TRIDENT_9520 0x9520
+#define PCI_DEVICE_ID_TRIDENT_9525 0x9525
+#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750 0x9750
+#define PCI_DEVICE_ID_TRIDENT_9850 0x9850
+#define PCI_DEVICE_ID_TRIDENT_9880 0x9880
+#define PCI_DEVICE_ID_TRIDENT_8400 0x8400
+#define PCI_DEVICE_ID_TRIDENT_8420 0x8420
+#define PCI_DEVICE_ID_TRIDENT_8500 0x8500
+
+#define PCI_VENDOR_ID_AI 0x1025
+#define PCI_DEVICE_ID_AI_M1435 0x1435
+
+#define PCI_VENDOR_ID_DELL 0x1028
+#define PCI_DEVICE_ID_DELL_RACIII 0x0008
+#define PCI_DEVICE_ID_DELL_RAC4 0x0012
+#define PCI_DEVICE_ID_DELL_PERC5 0x0015
+
+#define PCI_VENDOR_ID_MATROX 0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
+#define PCI_DEVICE_ID_MATROX_MIL 0x0519
+#define PCI_DEVICE_ID_MATROX_MYS 0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b
+#define PCI_DEVICE_ID_MATROX_MYS_AGP 0x051e
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f
+#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
+#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000
+#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001
+#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520
+#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521
+#define PCI_DEVICE_ID_MATROX_G400 0x0525
+#define PCI_DEVICE_ID_MATROX_G550 0x2527
+#define PCI_DEVICE_ID_MATROX_VIA 0x4536
+
+#define PCI_VENDOR_ID_CT 0x102c
+#define PCI_DEVICE_ID_CT_69000 0x00c0
+#define PCI_DEVICE_ID_CT_65545 0x00d8
+#define PCI_DEVICE_ID_CT_65548 0x00dc
+#define PCI_DEVICE_ID_CT_65550 0x00e0
+#define PCI_DEVICE_ID_CT_65554 0x00e4
+#define PCI_DEVICE_ID_CT_65555 0x00e5
+
+#define PCI_VENDOR_ID_MIRO 0x1031
+#define PCI_DEVICE_ID_MIRO_36050 0x5601
+#define PCI_DEVICE_ID_MIRO_DC10PLUS 0x7efe
+#define PCI_DEVICE_ID_MIRO_DC30PLUS 0xd801
+
+#define PCI_VENDOR_ID_NEC 0x1033
+#define PCI_DEVICE_ID_NEC_CBUS_1 0x0001 /* PCI-Cbus Bridge */
+#define PCI_DEVICE_ID_NEC_LOCAL 0x0002 /* Local Bridge */
+#define PCI_DEVICE_ID_NEC_ATM 0x0003 /* ATM LAN Controller */
+#define PCI_DEVICE_ID_NEC_R4000 0x0004 /* R4000 Bridge */
+#define PCI_DEVICE_ID_NEC_486 0x0005 /* 486 Like Peripheral Bus Bridge */
+#define PCI_DEVICE_ID_NEC_ACCEL_1 0x0006 /* Graphic Accelerator */
+#define PCI_DEVICE_ID_NEC_UXBUS 0x0007 /* UX-Bus Bridge */
+#define PCI_DEVICE_ID_NEC_ACCEL_2 0x0008 /* Graphic Accelerator */
+#define PCI_DEVICE_ID_NEC_GRAPH 0x0009 /* PCI-CoreGraph Bridge */
+#define PCI_DEVICE_ID_NEC_VL 0x0016 /* PCI-VL Bridge */
+#define PCI_DEVICE_ID_NEC_STARALPHA2 0x002c /* STAR ALPHA2 */
+#define PCI_DEVICE_ID_NEC_CBUS_2 0x002d /* PCI-Cbus Bridge */
+#define PCI_DEVICE_ID_NEC_USB 0x0035 /* PCI-USB Host */
+#define PCI_DEVICE_ID_NEC_CBUS_3 0x003b
+#define PCI_DEVICE_ID_NEC_NAPCCARD 0x003e
+#define PCI_DEVICE_ID_NEC_PCX2 0x0046 /* PowerVR */
+#define PCI_DEVICE_ID_NEC_NILE4 0x005a
+#define PCI_DEVICE_ID_NEC_VRC5476 0x009b
+#define PCI_DEVICE_ID_NEC_VRC4173 0x00a5
+#define PCI_DEVICE_ID_NEC_VRC5477_AC97 0x00a6
+#define PCI_DEVICE_ID_NEC_PC9821CS01 0x800c /* PC-9821-CS01 */
+#define PCI_DEVICE_ID_NEC_PC9821NRB06 0x800d /* PC-9821NR-B06 */
+
+#define PCI_VENDOR_ID_FD 0x1036
+#define PCI_DEVICE_ID_FD_36C70 0x0000
+
+#define PCI_VENDOR_ID_SI 0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP 0x0001
+#define PCI_DEVICE_ID_SI_6202 0x0002
+#define PCI_DEVICE_ID_SI_503 0x0008
+#define PCI_DEVICE_ID_SI_ACPI 0x0009
+#define PCI_DEVICE_ID_SI_SMBUS 0x0016
+#define PCI_DEVICE_ID_SI_LPC 0x0018
+#define PCI_DEVICE_ID_SI_5597_VGA 0x0200
+#define PCI_DEVICE_ID_SI_6205 0x0205
+#define PCI_DEVICE_ID_SI_501 0x0406
+#define PCI_DEVICE_ID_SI_496 0x0496
+#define PCI_DEVICE_ID_SI_300 0x0300
+#define PCI_DEVICE_ID_SI_315H 0x0310
+#define PCI_DEVICE_ID_SI_315 0x0315
+#define PCI_DEVICE_ID_SI_315PRO 0x0325
+#define PCI_DEVICE_ID_SI_530 0x0530
+#define PCI_DEVICE_ID_SI_540 0x0540
+#define PCI_DEVICE_ID_SI_550 0x0550
+#define PCI_DEVICE_ID_SI_540_VGA 0x5300
+#define PCI_DEVICE_ID_SI_550_VGA 0x5315
+#define PCI_DEVICE_ID_SI_620 0x0620
+#define PCI_DEVICE_ID_SI_630 0x0630
+#define PCI_DEVICE_ID_SI_633 0x0633
+#define PCI_DEVICE_ID_SI_635 0x0635
+#define PCI_DEVICE_ID_SI_640 0x0640
+#define PCI_DEVICE_ID_SI_645 0x0645
+#define PCI_DEVICE_ID_SI_646 0x0646
+#define PCI_DEVICE_ID_SI_648 0x0648
+#define PCI_DEVICE_ID_SI_650 0x0650
+#define PCI_DEVICE_ID_SI_651 0x0651
+#define PCI_DEVICE_ID_SI_655 0x0655
+#define PCI_DEVICE_ID_SI_661 0x0661
+#define PCI_DEVICE_ID_SI_730 0x0730
+#define PCI_DEVICE_ID_SI_733 0x0733
+#define PCI_DEVICE_ID_SI_630_VGA 0x6300
+#define PCI_DEVICE_ID_SI_735 0x0735
+#define PCI_DEVICE_ID_SI_740 0x0740
+#define PCI_DEVICE_ID_SI_741 0x0741
+#define PCI_DEVICE_ID_SI_745 0x0745
+#define PCI_DEVICE_ID_SI_746 0x0746
+#define PCI_DEVICE_ID_SI_755 0x0755
+#define PCI_DEVICE_ID_SI_760 0x0760
+#define PCI_DEVICE_ID_SI_900 0x0900
+#define PCI_DEVICE_ID_SI_961 0x0961
+#define PCI_DEVICE_ID_SI_962 0x0962
+#define PCI_DEVICE_ID_SI_963 0x0963
+#define PCI_DEVICE_ID_SI_965 0x0965
+#define PCI_DEVICE_ID_SI_966 0x0966
+#define PCI_DEVICE_ID_SI_968 0x0968
+#define PCI_DEVICE_ID_SI_5511 0x5511
+#define PCI_DEVICE_ID_SI_5513 0x5513
+#define PCI_DEVICE_ID_SI_5517 0x5517
+#define PCI_DEVICE_ID_SI_5518 0x5518
+#define PCI_DEVICE_ID_SI_5571 0x5571
+#define PCI_DEVICE_ID_SI_5581 0x5581
+#define PCI_DEVICE_ID_SI_5582 0x5582
+#define PCI_DEVICE_ID_SI_5591 0x5591
+#define PCI_DEVICE_ID_SI_5596 0x5596
+#define PCI_DEVICE_ID_SI_5597 0x5597
+#define PCI_DEVICE_ID_SI_5598 0x5598
+#define PCI_DEVICE_ID_SI_5600 0x5600
+#define PCI_DEVICE_ID_SI_7012 0x7012
+#define PCI_DEVICE_ID_SI_7013 0x7013
+#define PCI_DEVICE_ID_SI_7016 0x7016
+#define PCI_DEVICE_ID_SI_7018 0x7018
+
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX6 0x1006
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX4 0x1008
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX2 0x100a
+#define PCI_DEVICE_ID_HP_TACHYON 0x1028
+#define PCI_DEVICE_ID_HP_TACHLITE 0x1029
+#define PCI_DEVICE_ID_HP_J2585A 0x1030
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+#define PCI_DEVICE_ID_HP_J2973A 0x1040
+#define PCI_DEVICE_ID_HP_J2970A 0x1042
+#define PCI_DEVICE_ID_HP_DIVA 0x1048
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A
+#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B
+#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1
+#define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b
+#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223
+#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226
+#define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227
+#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a
+#define PCI_DEVICE_ID_HP_PCIX_LBA 0x122e
+#define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c
+#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282
+#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290
+#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301
+#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a
+#define PCI_DEVICE_ID_HP_CISSA 0x3220
+#define PCI_DEVICE_ID_HP_CISSC 0x3230
+#define PCI_DEVICE_ID_HP_CISSD 0x3238
+#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
+
+#define PCI_VENDOR_ID_PCTECH 0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_ASUSTEK 0x1043
+#define PCI_DEVICE_ID_ASUSTEK_0675 0x0675
+
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
+
+#define PCI_VENDOR_ID_OPTI 0x1045
+#define PCI_DEVICE_ID_OPTI_82C558 0xc558
+#define PCI_DEVICE_ID_OPTI_82C621 0xc621
+#define PCI_DEVICE_ID_OPTI_82C700 0xc700
+#define PCI_DEVICE_ID_OPTI_82C825 0xd568
+
+#define PCI_VENDOR_ID_ELSA 0x1048
+#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000
+#define PCI_DEVICE_ID_ELSA_QS3000 0x3000
+
+
+#define PCI_VENDOR_ID_BUSLOGIC 0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130
+
+#define PCI_VENDOR_ID_TI 0x104c
+#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
+#define PCI_DEVICE_ID_TI_4450 0x8011
+#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
+#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
+#define PCI_DEVICE_ID_TI_X515 0x8036
+#define PCI_DEVICE_ID_TI_XX12 0x8039
+#define PCI_DEVICE_ID_TI_1130 0xac12
+#define PCI_DEVICE_ID_TI_1031 0xac13
+#define PCI_DEVICE_ID_TI_1131 0xac15
+#define PCI_DEVICE_ID_TI_1250 0xac16
+#define PCI_DEVICE_ID_TI_1220 0xac17
+#define PCI_DEVICE_ID_TI_1221 0xac19
+#define PCI_DEVICE_ID_TI_1210 0xac1a
+#define PCI_DEVICE_ID_TI_1450 0xac1b
+#define PCI_DEVICE_ID_TI_1225 0xac1c
+#define PCI_DEVICE_ID_TI_1251A 0xac1d
+#define PCI_DEVICE_ID_TI_1211 0xac1e
+#define PCI_DEVICE_ID_TI_1251B 0xac1f
+#define PCI_DEVICE_ID_TI_4410 0xac41
+#define PCI_DEVICE_ID_TI_4451 0xac42
+#define PCI_DEVICE_ID_TI_4510 0xac44
+#define PCI_DEVICE_ID_TI_4520 0xac46
+#define PCI_DEVICE_ID_TI_7510 0xac47
+#define PCI_DEVICE_ID_TI_7610 0xac48
+#define PCI_DEVICE_ID_TI_7410 0xac49
+#define PCI_DEVICE_ID_TI_1410 0xac50
+#define PCI_DEVICE_ID_TI_1420 0xac51
+#define PCI_DEVICE_ID_TI_1451A 0xac52
+#define PCI_DEVICE_ID_TI_1620 0xac54
+#define PCI_DEVICE_ID_TI_1520 0xac55
+#define PCI_DEVICE_ID_TI_1510 0xac56
+#define PCI_DEVICE_ID_TI_X620 0xac8d
+#define PCI_DEVICE_ID_TI_X420 0xac8e
+
+#define PCI_VENDOR_ID_SONY 0x104d
+
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2 0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940F 0x5a5a
+#define PCI_DEVICE_ID_WINBOND2_6692 0x6692
+
+#define PCI_VENDOR_ID_ANIGMA 0x1051
+#define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100
+
+#define PCI_VENDOR_ID_EFAR 0x1055
+#define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130
+#define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463
+
+#define PCI_VENDOR_ID_MOTOROLA 0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
+#define PCI_DEVICE_ID_MOTOROLA_MPC107 0x0004
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
+#define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802
+#define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803
+#define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b
+#define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803
+#define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809
+
+#define PCI_VENDOR_ID_PROMISE 0x105a
+#define PCI_DEVICE_ID_PROMISE_20265 0x0d30
+#define PCI_DEVICE_ID_PROMISE_20267 0x4d30
+#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
+#define PCI_DEVICE_ID_PROMISE_20262 0x4d38
+#define PCI_DEVICE_ID_PROMISE_20263 0x0D38
+#define PCI_DEVICE_ID_PROMISE_20268 0x4d68
+#define PCI_DEVICE_ID_PROMISE_20269 0x4d69
+#define PCI_DEVICE_ID_PROMISE_20270 0x6268
+#define PCI_DEVICE_ID_PROMISE_20271 0x6269
+#define PCI_DEVICE_ID_PROMISE_20275 0x1275
+#define PCI_DEVICE_ID_PROMISE_20276 0x5275
+#define PCI_DEVICE_ID_PROMISE_20277 0x7275
+
+
+#define PCI_VENDOR_ID_UMC 0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
+#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
+
+
+#define PCI_VENDOR_ID_MYLEX 0x1069
+#define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001
+#define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002
+#define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010
+#define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020
+#define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050
+#define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56
+#define PCI_DEVICE_ID_MYLEX_DAC960_GEM 0xB166
+
+
+#define PCI_VENDOR_ID_APPLE 0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001
+#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e
+#define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMACP 0x0024
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d
+#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032
+#define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034
+#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b
+#define PCI_DEVICE_ID_APPLE_K2_ATA100 0x0043
+#define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b
+#define PCI_DEVICE_ID_APPLE_K2_GMAC 0x004c
+#define PCI_DEVICE_ID_APPLE_SH_ATA 0x0050
+#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051
+#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058
+#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059
+#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066
+#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069
+#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a
+#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b
+#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645
+
+#define PCI_VENDOR_ID_YAMAHA 0x1073
+#define PCI_DEVICE_ID_YAMAHA_724 0x0004
+#define PCI_DEVICE_ID_YAMAHA_724F 0x000d
+#define PCI_DEVICE_ID_YAMAHA_740 0x000a
+#define PCI_DEVICE_ID_YAMAHA_740C 0x000c
+#define PCI_DEVICE_ID_YAMAHA_744 0x0010
+#define PCI_DEVICE_ID_YAMAHA_754 0x0012
+
+
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016
+#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080
+#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216
+#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240
+#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280
+#define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100
+#define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200
+#define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300
+#define PCI_DEVICE_ID_QLOGIC_ISP2312 0x2312
+#define PCI_DEVICE_ID_QLOGIC_ISP2322 0x2322
+#define PCI_DEVICE_ID_QLOGIC_ISP6312 0x6312
+#define PCI_DEVICE_ID_QLOGIC_ISP6322 0x6322
+#define PCI_DEVICE_ID_QLOGIC_ISP2422 0x2422
+#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432
+#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512
+#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522
+#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422
+#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432
+
+#define PCI_VENDOR_ID_CYRIX 0x1078
+#define PCI_DEVICE_ID_CYRIX_5510 0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001
+#define PCI_DEVICE_ID_CYRIX_5520 0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104
+
+
+
+#define PCI_VENDOR_ID_CONTAQ 0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693
+
+
+#define PCI_VENDOR_ID_OLICOM 0x108d
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+
+#define PCI_VENDOR_ID_SUN 0x108e
+#define PCI_DEVICE_ID_SUN_EBUS 0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001
+#define PCI_DEVICE_ID_SUN_RIO_EBUS 0x1100
+#define PCI_DEVICE_ID_SUN_RIO_GEM 0x1101
+#define PCI_DEVICE_ID_SUN_RIO_1394 0x1102
+#define PCI_DEVICE_ID_SUN_RIO_USB 0x1103
+#define PCI_DEVICE_ID_SUN_GEM 0x2bad
+#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
+#define PCI_DEVICE_ID_SUN_PBM 0x8000
+#define PCI_DEVICE_ID_SUN_SCHIZO 0x8001
+#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+#define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001
+#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801
+#define PCI_DEVICE_ID_SUN_CASSINI 0xabba
+
+#define PCI_VENDOR_ID_CMD 0x1095
+#define PCI_DEVICE_ID_CMD_643 0x0643
+#define PCI_DEVICE_ID_CMD_646 0x0646
+#define PCI_DEVICE_ID_CMD_648 0x0648
+#define PCI_DEVICE_ID_CMD_649 0x0649
+
+#define PCI_DEVICE_ID_SII_680 0x0680
+#define PCI_DEVICE_ID_SII_3112 0x3112
+#define PCI_DEVICE_ID_SII_1210SA 0x0240
+
+
+#define PCI_VENDOR_ID_BROOKTREE 0x109e
+#define PCI_DEVICE_ID_BROOKTREE_878 0x0878
+#define PCI_DEVICE_ID_BROOKTREE_879 0x0879
+
+
+#define PCI_VENDOR_ID_SGI 0x10a9
+#define PCI_DEVICE_ID_SGI_IOC3 0x0003
+#define PCI_DEVICE_ID_SGI_IOC4 0x100a
+#define PCI_VENDOR_ID_SGI_LITHIUM 0x1002
+
+
+#define PCI_VENDOR_ID_WINBOND 0x10ad
+#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553 0x0565
+
+
+#define PCI_VENDOR_ID_PLX 0x10b5
+#define PCI_DEVICE_ID_PLX_R685 0x1030
+#define PCI_DEVICE_ID_PLX_ROMULUS 0x106a
+#define PCI_DEVICE_ID_PLX_SPCOM800 0x1076
+#define PCI_DEVICE_ID_PLX_1077 0x1077
+#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103
+#define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151
+#define PCI_DEVICE_ID_PLX_R753 0x1152
+#define PCI_DEVICE_ID_PLX_OLITEC 0x1187
+#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
+#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9080 0x9080
+#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
+
+#define PCI_VENDOR_ID_MADGE 0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2 0x0002
+
+#define PCI_VENDOR_ID_3COM 0x10b7
+#define PCI_DEVICE_ID_3COM_3C985 0x0001
+#define PCI_DEVICE_ID_3COM_3C940 0x1700
+#define PCI_DEVICE_ID_3COM_3C339 0x3390
+#define PCI_DEVICE_ID_3COM_3C359 0x3590
+#define PCI_DEVICE_ID_3COM_3C940B 0x80eb
+#define PCI_DEVICE_ID_3COM_3CR990 0x9900
+#define PCI_DEVICE_ID_3COM_3CR990_TX_95 0x9902
+#define PCI_DEVICE_ID_3COM_3CR990_TX_97 0x9903
+#define PCI_DEVICE_ID_3COM_3CR990B 0x9904
+#define PCI_DEVICE_ID_3COM_3CR990_FX 0x9905
+#define PCI_DEVICE_ID_3COM_3CR990SVR95 0x9908
+#define PCI_DEVICE_ID_3COM_3CR990SVR97 0x9909
+#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a
+
+
+#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1533 0x1533
+#define PCI_DEVICE_ID_AL_M1535 0x1535
+#define PCI_DEVICE_ID_AL_M1541 0x1541
+#define PCI_DEVICE_ID_AL_M1563 0x1563
+#define PCI_DEVICE_ID_AL_M1621 0x1621
+#define PCI_DEVICE_ID_AL_M1631 0x1631
+#define PCI_DEVICE_ID_AL_M1632 0x1632
+#define PCI_DEVICE_ID_AL_M1641 0x1641
+#define PCI_DEVICE_ID_AL_M1644 0x1644
+#define PCI_DEVICE_ID_AL_M1647 0x1647
+#define PCI_DEVICE_ID_AL_M1651 0x1651
+#define PCI_DEVICE_ID_AL_M1671 0x1671
+#define PCI_DEVICE_ID_AL_M1681 0x1681
+#define PCI_DEVICE_ID_AL_M1683 0x1683
+#define PCI_DEVICE_ID_AL_M1689 0x1689
+#define PCI_DEVICE_ID_AL_M5219 0x5219
+#define PCI_DEVICE_ID_AL_M5228 0x5228
+#define PCI_DEVICE_ID_AL_M5229 0x5229
+#define PCI_DEVICE_ID_AL_M5451 0x5451
+#define PCI_DEVICE_ID_AL_M7101 0x7101
+
+
+
+#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005
+#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006
+#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016
+
+
+#define PCI_VENDOR_ID_TCONRAD 0x10da
+#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508
+
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020
+#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028
+#define PCI_DEVICE_ID_NVIDIA_UTNT2 0x0029
+#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a
+#define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C
+#define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036
+#define PCI_DEVICE_ID_NVIDIA_NVENET_10 0x0037
+#define PCI_DEVICE_ID_NVIDIA_NVENET_11 0x0038
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE 0x0042
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x0045
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000 0x004E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS 0x0052
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055
+#define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056
+#define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057
+#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059
+#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065
+#define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066
+#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069
+#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085
+#define PCI_DEVICE_ID_NVIDIA_NVENET_4 0x0086
+#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089
+#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a
+#define PCI_DEVICE_ID_NVIDIA_NVENET_5 0x008c
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800 0x0098
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099
+#define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0
+#define PCI_DEVICE_ID_GEFORCE_6800A 0x00c1
+#define PCI_DEVICE_ID_GEFORCE_6800A_LE 0x00c2
+#define PCI_DEVICE_ID_GEFORCE_GO_6800 0x00c8
+#define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA 0x00c9
+#define PCI_DEVICE_ID_QUADRO_FX_GO1400 0x00cc
+#define PCI_DEVICE_ID_QUADRO_FX_1400 0x00ce
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_3 0x00d6
+#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9
+#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da
+#define PCI_DEVICE_ID_NVIDIA_NVENET_7 0x00df
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_6 0x00e6
+#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9
+#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101
+#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX 0x0110
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2 0x0111
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO 0x0112
+#define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR 0x0113
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT 0x0140
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600 0x0141
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL 0x0145
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540 0x014E
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200 0x014F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS 0x0150
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2 0x0151
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152
+#define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200 0x0164
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250 0x0166
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1 0x0167
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1 0x0168
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460 0x0170
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440 0x0171
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420 0x0172
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE 0x0173
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO 0x0174
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO 0x0175
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO 0x0177
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL 0x0178
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_200 0x017A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL 0x017B
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL 0x017C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000 0x0185
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO 0x0186
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO 0x0187
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL 0x0188
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC 0x0189
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS 0x018A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL 0x018B
+#define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0
+#define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4
+#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc
+#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1
+#define PCI_DEVICE_ID_NVIDIA_NVENET_1 0x01c3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC 0x0203
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B 0x0211
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE 0x0212
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT 0x0215
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600 0x0250
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400 0x0251
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200 0x0253
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS 0x0264
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS 0x0368
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F
+#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
+#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO 0x0286
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL 0x0288
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL 0x0289
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL 0x028C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA 0x0301
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800 0x0302
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000 0x0308
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000 0x0309
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA 0x0311
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600 0x0312
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE 0x0314
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600 0x031A
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650 0x031B
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700 0x031C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200 0x0320
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA 0x0321
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1 0x0322
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE 0x0323
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200 0x0324
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250 0x0325
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500 0x0326
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100 0x0327
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32 0x0328
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200 0x0329
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI 0x032A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500 0x032B
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300 0x032C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100 0x032D
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA 0x0330
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900 0x0331
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT 0x0332
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA 0x0333
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT 0x0334
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000 0x0338
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700 0x033F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA 0x0341
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700 0x0342
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE 0x0343
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE 0x0344
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1 0x0347
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
+#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372
+#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
+#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC
+#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
+#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448
+#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
+#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
+#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
+#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
+
+#define PCI_VENDOR_ID_IMS 0x10e0
+#define PCI_DEVICE_ID_IMS_TT128 0x9128
+#define PCI_DEVICE_ID_IMS_TT3D 0x9135
+
+
+
+
+#define PCI_VENDOR_ID_INTERG 0x10ea
+#define PCI_DEVICE_ID_INTERG_1682 0x1682
+#define PCI_DEVICE_ID_INTERG_2000 0x2000
+#define PCI_DEVICE_ID_INTERG_2010 0x2010
+#define PCI_DEVICE_ID_INTERG_5000 0x5000
+#define PCI_DEVICE_ID_INTERG_5050 0x5050
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+
+#define PCI_VENDOR_ID_XILINX 0x10ee
+#define PCI_DEVICE_ID_RME_DIGI96 0x3fc0
+#define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1
+#define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2
+#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3
+#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5
+#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6
+
+
+#define PCI_VENDOR_ID_INIT 0x1101
+
+#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */
+#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002
+
+#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */
+#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938
+
+#define PCI_VENDOR_ID_TTI 0x1103
+#define PCI_DEVICE_ID_TTI_HPT343 0x0003
+#define PCI_DEVICE_ID_TTI_HPT366 0x0004
+#define PCI_DEVICE_ID_TTI_HPT372 0x0005
+#define PCI_DEVICE_ID_TTI_HPT302 0x0006
+#define PCI_DEVICE_ID_TTI_HPT371 0x0007
+#define PCI_DEVICE_ID_TTI_HPT374 0x0008
+#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */
+
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_DEVICE_ID_VIA_8763_0 0x0198
+#define PCI_DEVICE_ID_VIA_8380_0 0x0204
+#define PCI_DEVICE_ID_VIA_3238_0 0x0238
+#define PCI_DEVICE_ID_VIA_PT880 0x0258
+#define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308
+#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259
+#define PCI_DEVICE_ID_VIA_3269_0 0x0269
+#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
+#define PCI_DEVICE_ID_VIA_3296_0 0x0296
+#define PCI_DEVICE_ID_VIA_8363_0 0x0305
+#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
+#define PCI_DEVICE_ID_VIA_8371_0 0x0391
+#define PCI_DEVICE_ID_VIA_8501_0 0x0501
+#define PCI_DEVICE_ID_VIA_82C561 0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
+#define PCI_DEVICE_ID_VIA_82C576 0x0576
+#define PCI_DEVICE_ID_VIA_SATA_EIDE 0x0581
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#define PCI_DEVICE_ID_VIA_82C596 0x0596
+#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
+#define PCI_DEVICE_ID_VIA_82C598_0 0x0598
+#define PCI_DEVICE_ID_VIA_8601_0 0x0601
+#define PCI_DEVICE_ID_VIA_8605_0 0x0605
+#define PCI_DEVICE_ID_VIA_82C686 0x0686
+#define PCI_DEVICE_ID_VIA_82C691_0 0x0691
+#define PCI_DEVICE_ID_VIA_82C576_1 0x1571
+#define PCI_DEVICE_ID_VIA_82C586_2 0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3 0x3040
+#define PCI_DEVICE_ID_VIA_82C596_3 0x3050
+#define PCI_DEVICE_ID_VIA_82C596B_3 0x3051
+#define PCI_DEVICE_ID_VIA_82C686_4 0x3057
+#define PCI_DEVICE_ID_VIA_82C686_5 0x3058
+#define PCI_DEVICE_ID_VIA_8233_5 0x3059
+#define PCI_DEVICE_ID_VIA_8233_0 0x3074
+#define PCI_DEVICE_ID_VIA_8633_0 0x3091
+#define PCI_DEVICE_ID_VIA_8367_0 0x3099
+#define PCI_DEVICE_ID_VIA_8653_0 0x3101
+#define PCI_DEVICE_ID_VIA_8622 0x3102
+#define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104
+#define PCI_DEVICE_ID_VIA_8233C_0 0x3109
+#define PCI_DEVICE_ID_VIA_8361 0x3112
+#define PCI_DEVICE_ID_VIA_XM266 0x3116
+#define PCI_DEVICE_ID_VIA_612X 0x3119
+#define PCI_DEVICE_ID_VIA_862X_0 0x3123
+#define PCI_DEVICE_ID_VIA_8753_0 0x3128
+#define PCI_DEVICE_ID_VIA_8233A 0x3147
+#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148
+#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149
+#define PCI_DEVICE_ID_VIA_XN266 0x3156
+#define PCI_DEVICE_ID_VIA_6410 0x3164
+#define PCI_DEVICE_ID_VIA_8754C_0 0x3168
+#define PCI_DEVICE_ID_VIA_8235 0x3177
+#define PCI_DEVICE_ID_VIA_8385_0 0x3188
+#define PCI_DEVICE_ID_VIA_8377_0 0x3189
+#define PCI_DEVICE_ID_VIA_8378_0 0x3205
+#define PCI_DEVICE_ID_VIA_8783_0 0x3208
+#define PCI_DEVICE_ID_VIA_8237 0x3227
+#define PCI_DEVICE_ID_VIA_8251 0x3287
+#define PCI_DEVICE_ID_VIA_8237A 0x3337
+#define PCI_DEVICE_ID_VIA_8231 0x8231
+#define PCI_DEVICE_ID_VIA_8231_4 0x8235
+#define PCI_DEVICE_ID_VIA_8365_1 0x8305
+#define PCI_DEVICE_ID_VIA_CX700 0x8324
+#define PCI_DEVICE_ID_VIA_8371_1 0x8391
+#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
+#define PCI_DEVICE_ID_VIA_838X_1 0xB188
+#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198
+
+#define PCI_VENDOR_ID_SIEMENS 0x110A
+#define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102
+
+
+#define PCI_VENDOR_ID_VORTEX 0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105
+
+#define PCI_VENDOR_ID_EF 0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
+#define PCI_VENDOR_ID_EF_ATM_LANAI2 0x0003
+#define PCI_VENDOR_ID_EF_ATM_LANAIHB 0x0005
+
+#define PCI_VENDOR_ID_IDT 0x111d
+#define PCI_DEVICE_ID_IDT_IDT77201 0x0001
+
+#define PCI_VENDOR_ID_FORE 0x1127
+#define PCI_DEVICE_ID_FORE_PCA200E 0x0300
+
+
+#define PCI_VENDOR_ID_PHILIPS 0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146
+#define PCI_DEVICE_ID_PHILIPS_SAA9730 0x9730
+
+#define PCI_VENDOR_ID_EICON 0x1133
+#define PCI_DEVICE_ID_EICON_DIVA20 0xe002
+#define PCI_DEVICE_ID_EICON_DIVA20_U 0xe004
+#define PCI_DEVICE_ID_EICON_DIVA201 0xe005
+#define PCI_DEVICE_ID_EICON_DIVA202 0xe00b
+#define PCI_DEVICE_ID_EICON_MAESTRA 0xe010
+#define PCI_DEVICE_ID_EICON_MAESTRAQ 0xe012
+#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
+#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
+
+#define PCI_VENDOR_ID_ZIATECH 0x1138
+#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
+
+
+
+#define PCI_VENDOR_ID_SYSKONNECT 0x1148
+#define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200
+#define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300
+#define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320
+#define PCI_DEVICE_ID_SYSKONNECT_9DXX 0x4400
+#define PCI_DEVICE_ID_SYSKONNECT_9MXX 0x4500
+
+
+#define PCI_VENDOR_ID_DIGI 0x114f
+#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E 0x0070
+#define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071
+#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072
+#define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073
+#define PCI_DEVICE_ID_NEO_2DB9 0x00C8
+#define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9
+#define PCI_DEVICE_ID_NEO_2RJ45 0x00CA
+#define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB
+
+
+#define PCI_VENDOR_ID_XIRCOM 0x115d
+#define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101
+#define PCI_DEVICE_ID_XIRCOM_X3201_MDM 0x0103
+
+
+#define PCI_VENDOR_ID_SERVERWORKS 0x1166
+#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008
+#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009
+#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017
+#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103
+#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203
+#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
+#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
+
+#define PCI_VENDOR_ID_SBE 0x1176
+#define PCI_DEVICE_ID_SBE_WANXL100 0x0301
+#define PCI_DEVICE_ID_SBE_WANXL200 0x0302
+#define PCI_DEVICE_ID_SBE_WANXL400 0x0104
+
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO 0x0102
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0103
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0105
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617
+
+#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
+#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030
+#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108
+#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3
+
+#define PCI_VENDOR_ID_RICOH 0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475
+#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
+#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+#define PCI_DEVICE_ID_RICOH_R5C822 0x0822
+
+#define PCI_VENDOR_ID_DLINK 0x1186
+#define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00
+
+#define PCI_VENDOR_ID_ARTOP 0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005
+#define PCI_DEVICE_ID_ARTOP_ATP860 0x0006
+#define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007
+#define PCI_DEVICE_ID_ARTOP_ATP865 0x0008
+#define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009
+#define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002
+#define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010
+#define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020
+#define PCI_DEVICE_ID_ARTOP_AEC7612S 0x8030
+#define PCI_DEVICE_ID_ARTOP_AEC7612D 0x8040
+#define PCI_DEVICE_ID_ARTOP_AEC7612SUW 0x8050
+#define PCI_DEVICE_ID_ARTOP_8060 0x8060
+
+#define PCI_VENDOR_ID_ZEITNET 0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
+
+
+#define PCI_VENDOR_ID_FUJITSU_ME 0x119e
+#define PCI_DEVICE_ID_FUJITSU_FS155 0x0001
+#define PCI_DEVICE_ID_FUJITSU_FS50 0x0003
+
+#define PCI_SUBVENDOR_ID_KEYSPAN 0x11a9
+#define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334
+
+#define PCI_VENDOR_ID_MARVELL 0x11ab
+#define PCI_DEVICE_ID_MARVELL_GT64111 0x4146
+#define PCI_DEVICE_ID_MARVELL_GT64260 0x6430
+#define PCI_DEVICE_ID_MARVELL_MV64360 0x6460
+#define PCI_DEVICE_ID_MARVELL_MV64460 0x6480
+
+#define PCI_VENDOR_ID_V3 0x11b0
+#define PCI_DEVICE_ID_V3_V960 0x0001
+#define PCI_DEVICE_ID_V3_V351 0x0002
+
+
+#define PCI_VENDOR_ID_ATT 0x11c1
+#define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480
+
+
+#define PCI_VENDOR_ID_SPECIALIX 0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000
+#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000
+#define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004
+
+
+#define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4
+#define PCI_DEVICE_ID_AD1889JS 0x1889
+
+
+#define PCI_DEVICE_ID_SEGA_BBA 0x1234
+
+#define PCI_VENDOR_ID_ZORAN 0x11de
+#define PCI_DEVICE_ID_ZORAN_36057 0x6057
+#define PCI_DEVICE_ID_ZORAN_36120 0x6120
+
+
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+
+#define PCI_VENDOR_ID_RP 0x11fe
+#define PCI_DEVICE_ID_RP32INTF 0x0001
+#define PCI_DEVICE_ID_RP8INTF 0x0002
+#define PCI_DEVICE_ID_RP16INTF 0x0003
+#define PCI_DEVICE_ID_RP4QUAD 0x0004
+#define PCI_DEVICE_ID_RP8OCTA 0x0005
+#define PCI_DEVICE_ID_RP8J 0x0006
+#define PCI_DEVICE_ID_RP4J 0x0007
+#define PCI_DEVICE_ID_RP8SNI 0x0008
+#define PCI_DEVICE_ID_RP16SNI 0x0009
+#define PCI_DEVICE_ID_RPP4 0x000A
+#define PCI_DEVICE_ID_RPP8 0x000B
+#define PCI_DEVICE_ID_RP4M 0x000D
+#define PCI_DEVICE_ID_RP2_232 0x000E
+#define PCI_DEVICE_ID_RP2_422 0x000F
+#define PCI_DEVICE_ID_URP32INTF 0x0801
+#define PCI_DEVICE_ID_URP8INTF 0x0802
+#define PCI_DEVICE_ID_URP16INTF 0x0803
+#define PCI_DEVICE_ID_URP8OCTA 0x0805
+#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C
+#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D
+#define PCI_DEVICE_ID_CRP16INTF 0x0903
+
+#define PCI_VENDOR_ID_CYCLADES 0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
+#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
+#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
+#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
+#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
+#define PCI_DEVICE_ID_PC300_RX_2 0x0300
+#define PCI_DEVICE_ID_PC300_RX_1 0x0301
+#define PCI_DEVICE_ID_PC300_TE_2 0x0310
+#define PCI_DEVICE_ID_PC300_TE_1 0x0311
+#define PCI_DEVICE_ID_PC300_TE_M_2 0x0320
+#define PCI_DEVICE_ID_PC300_TE_M_1 0x0321
+
+#define PCI_VENDOR_ID_ESSENTIAL 0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
+
+#define PCI_VENDOR_ID_O2 0x1217
+#define PCI_DEVICE_ID_O2_6729 0x6729
+#define PCI_DEVICE_ID_O2_6730 0x673a
+#define PCI_DEVICE_ID_O2_6832 0x6832
+#define PCI_DEVICE_ID_O2_6836 0x6836
+
+#define PCI_VENDOR_ID_3DFX 0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002
+#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003
+#define PCI_DEVICE_ID_3DFX_VOODOO3 0x0005
+#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009
+
+
+
+#define PCI_VENDOR_ID_AVM 0x1244
+#define PCI_DEVICE_ID_AVM_B1 0x0700
+#define PCI_DEVICE_ID_AVM_C4 0x0800
+#define PCI_DEVICE_ID_AVM_A1 0x0a00
+#define PCI_DEVICE_ID_AVM_A1_V2 0x0e00
+#define PCI_DEVICE_ID_AVM_C2 0x1100
+#define PCI_DEVICE_ID_AVM_T1 0x1200
+
+
+#define PCI_VENDOR_ID_STALLION 0x124d
+
+/* Allied Telesyn */
+#define PCI_VENDOR_ID_AT 0x1259
+#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
+#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
+
+#define PCI_VENDOR_ID_ESS 0x125d
+#define PCI_DEVICE_ID_ESS_ESS1968 0x1968
+#define PCI_DEVICE_ID_ESS_ESS1978 0x1978
+#define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988
+#define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989
+#define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990
+#define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992
+#define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998
+#define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999
+#define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a
+#define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b
+
+#define PCI_VENDOR_ID_SATSAGEM 0x1267
+#define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016
+
+
+#define PCI_VENDOR_ID_ENSONIQ 0x1274
+#define PCI_DEVICE_ID_ENSONIQ_CT5880 0x5880
+#define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000
+#define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371
+
+#define PCI_VENDOR_ID_TRANSMETA 0x1279
+#define PCI_DEVICE_ID_EFFICEON 0x0060
+
+#define PCI_VENDOR_ID_ROCKWELL 0x127A
+
+#define PCI_VENDOR_ID_ITE 0x1283
+#define PCI_DEVICE_ID_ITE_8211 0x8211
+#define PCI_DEVICE_ID_ITE_8212 0x8212
+#define PCI_DEVICE_ID_ITE_8872 0x8872
+#define PCI_DEVICE_ID_ITE_IT8330G_0 0xe886
+
+/* formerly Platform Tech */
+#define PCI_DEVICE_ID_ESS_ESS0100 0x0100
+
+#define PCI_VENDOR_ID_ALTEON 0x12ae
+
+
+#define PCI_SUBVENDOR_ID_CONNECT_TECH 0x12c4
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232 0x0001
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232 0x0002
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232 0x0003
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485 0x0004
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4 0x0005
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485 0x0006
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2 0x0007
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485 0x0008
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332
+
+
+#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16 0x0011
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC 0x0041
+#define PCI_SUBVENDOR_ID_CHASE_PCIRAS 0x124D
+#define PCI_SUBDEVICE_ID_CHASE_PCIRAS4 0xF001
+#define PCI_SUBDEVICE_ID_CHASE_PCIRAS8 0xF010
+
+#define PCI_VENDOR_ID_AUREAL 0x12eb
+#define PCI_DEVICE_ID_AUREAL_VORTEX_1 0x0001
+#define PCI_DEVICE_ID_AUREAL_VORTEX_2 0x0002
+#define PCI_DEVICE_ID_AUREAL_ADVANTAGE 0x0003
+
+#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8
+#define PCI_DEVICE_ID_LML_33R10 0x8a02
+
+
+#define PCI_VENDOR_ID_SIIG 0x131f
+#define PCI_SUBVENDOR_ID_SIIG 0x131f
+#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000
+#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001
+#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012
+#define PCI_DEVICE_ID_SIIG_1P_10x 0x1020
+#define PCI_DEVICE_ID_SIIG_2P_10x 0x1021
+#define PCI_DEVICE_ID_SIIG_2S_10x_550 0x1030
+#define PCI_DEVICE_ID_SIIG_2S_10x_650 0x1031
+#define PCI_DEVICE_ID_SIIG_2S_10x_850 0x1032
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036
+#define PCI_DEVICE_ID_SIIG_4S_10x_550 0x1050
+#define PCI_DEVICE_ID_SIIG_4S_10x_650 0x1051
+#define PCI_DEVICE_ID_SIIG_4S_10x_850 0x1052
+#define PCI_DEVICE_ID_SIIG_1S_20x_550 0x2000
+#define PCI_DEVICE_ID_SIIG_1S_20x_650 0x2001
+#define PCI_DEVICE_ID_SIIG_1S_20x_850 0x2002
+#define PCI_DEVICE_ID_SIIG_1P_20x 0x2020
+#define PCI_DEVICE_ID_SIIG_2P_20x 0x2021
+#define PCI_DEVICE_ID_SIIG_2S_20x_550 0x2030
+#define PCI_DEVICE_ID_SIIG_2S_20x_650 0x2031
+#define PCI_DEVICE_ID_SIIG_2S_20x_850 0x2032
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012
+#define PCI_DEVICE_ID_SIIG_4S_20x_550 0x2050
+#define PCI_DEVICE_ID_SIIG_4S_20x_650 0x2051
+#define PCI_DEVICE_ID_SIIG_4S_20x_850 0x2052
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062
+#define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080
+#define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
+#define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
+#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
+
+#define PCI_VENDOR_ID_RADISYS 0x1331
+
+#define PCI_VENDOR_ID_DOMEX 0x134a
+#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001
+
+#define PCI_VENDOR_ID_INTASHIELD 0x135a
+#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80
+
+#define PCI_VENDOR_ID_QUATECH 0x135C
+#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010
+#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020
+#define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050
+#define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060
+
+#define PCI_VENDOR_ID_SEALEVEL 0x135e
+#define PCI_DEVICE_ID_SEALEVEL_U530 0x7101
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM2 0x7201
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM422 0x7402
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202
+#define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401
+#define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804
+
+#define PCI_VENDOR_ID_HYPERCOPE 0x1365
+#define PCI_DEVICE_ID_HYPERCOPE_PLX 0x9050
+#define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO 0x0104
+#define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO 0x0106
+#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107
+#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108
+
+#define PCI_VENDOR_ID_KAWASAKI 0x136b
+#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
+
+#define PCI_VENDOR_ID_CNET 0x1371
+#define PCI_DEVICE_ID_CNET_GIGACARD 0x434e
+
+#define PCI_VENDOR_ID_LMC 0x1376
+#define PCI_DEVICE_ID_LMC_HSSI 0x0003
+#define PCI_DEVICE_ID_LMC_DS3 0x0004
+#define PCI_DEVICE_ID_LMC_SSI 0x0005
+#define PCI_DEVICE_ID_LMC_T1 0x0006
+
+
+#define PCI_VENDOR_ID_NETGEAR 0x1385
+#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
+
+#define PCI_VENDOR_ID_APPLICOM 0x1389
+#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001
+#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
+#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
+
+#define PCI_VENDOR_ID_MOXA 0x1393
+#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
+#define PCI_DEVICE_ID_MOXA_CP102 0x1020
+#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
+#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
+#define PCI_DEVICE_ID_MOXA_C104 0x1040
+#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
+#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
+#define PCI_DEVICE_ID_MOXA_CT114 0x1140
+#define PCI_DEVICE_ID_MOXA_CP114 0x1141
+#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
+#define PCI_DEVICE_ID_MOXA_CP132 0x1320
+#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
+#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
+#define PCI_DEVICE_ID_MOXA_C168 0x1680
+#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
+
+#define PCI_VENDOR_ID_CCD 0x1397
+#define PCI_DEVICE_ID_CCD_2BD0 0x2bd0
+#define PCI_DEVICE_ID_CCD_B000 0xb000
+#define PCI_DEVICE_ID_CCD_B006 0xb006
+#define PCI_DEVICE_ID_CCD_B007 0xb007
+#define PCI_DEVICE_ID_CCD_B008 0xb008
+#define PCI_DEVICE_ID_CCD_B009 0xb009
+#define PCI_DEVICE_ID_CCD_B00A 0xb00a
+#define PCI_DEVICE_ID_CCD_B00B 0xb00b
+#define PCI_DEVICE_ID_CCD_B00C 0xb00c
+#define PCI_DEVICE_ID_CCD_B100 0xb100
+#define PCI_DEVICE_ID_CCD_B700 0xb700
+#define PCI_DEVICE_ID_CCD_B701 0xb701
+
+#define PCI_VENDOR_ID_EXAR 0x13a8
+#define PCI_DEVICE_ID_EXAR_XR17C152 0x0152
+#define PCI_DEVICE_ID_EXAR_XR17C154 0x0154
+#define PCI_DEVICE_ID_EXAR_XR17C158 0x0158
+
+#define PCI_VENDOR_ID_MICROGATE 0x13c0
+#define PCI_DEVICE_ID_MICROGATE_USC 0x0010
+#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030
+
+#define PCI_VENDOR_ID_3WARE 0x13C1
+#define PCI_DEVICE_ID_3WARE_1000 0x1000
+#define PCI_DEVICE_ID_3WARE_7000 0x1001
+#define PCI_DEVICE_ID_3WARE_9000 0x1002
+
+#define PCI_VENDOR_ID_IOMEGA 0x13ca
+#define PCI_DEVICE_ID_IOMEGA_BUZ 0x4231
+
+#define PCI_VENDOR_ID_ABOCOM 0x13D1
+#define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1
+
+#define PCI_VENDOR_ID_CMEDIA 0x13f6
+#define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100
+#define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101
+#define PCI_DEVICE_ID_CMEDIA_CM8738 0x0111
+#define PCI_DEVICE_ID_CMEDIA_CM8738B 0x0112
+
+#define PCI_VENDOR_ID_LAVA 0x1407
+#define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */
+#define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */
+#define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */
+#define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */
+#define PCI_DEVICE_ID_LAVA_QUAD_A 0x0201 /* 2x 16650, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUAD_B 0x0202 /* 2x 16650, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_SSERIAL 0x0500 /* 1x 16550 */
+#define PCI_DEVICE_ID_LAVA_PORT_650 0x0600 /* 1x 16650 */
+#define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */
+#define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR 0x8800
+
+#define PCI_VENDOR_ID_TIMEDIA 0x1409
+#define PCI_DEVICE_ID_TIMEDIA_1889 0x7168
+
+#define PCI_VENDOR_ID_ICE 0x1412
+#define PCI_DEVICE_ID_ICE_1712 0x1712
+#define PCI_DEVICE_ID_VT1724 0x1724
+
+#define PCI_VENDOR_ID_OXSEMI 0x1415
+#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
+#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
+#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
+#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
+#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
+
+#define PCI_VENDOR_ID_SAMSUNG 0x144d
+
+#define PCI_VENDOR_ID_MYRICOM 0x14c1
+
+#define PCI_VENDOR_ID_TITAN 0x14D2
+#define PCI_DEVICE_ID_TITAN_010L 0x8001
+#define PCI_DEVICE_ID_TITAN_100L 0x8010
+#define PCI_DEVICE_ID_TITAN_110L 0x8011
+#define PCI_DEVICE_ID_TITAN_200L 0x8020
+#define PCI_DEVICE_ID_TITAN_210L 0x8021
+#define PCI_DEVICE_ID_TITAN_400L 0x8040
+#define PCI_DEVICE_ID_TITAN_800L 0x8080
+#define PCI_DEVICE_ID_TITAN_100 0xA001
+#define PCI_DEVICE_ID_TITAN_200 0xA005
+#define PCI_DEVICE_ID_TITAN_400 0xA003
+#define PCI_DEVICE_ID_TITAN_800B 0xA004
+
+#define PCI_VENDOR_ID_PANACOM 0x14d4
+#define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400
+#define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402
+
+#define PCI_VENDOR_ID_SIPACKETS 0x14d9
+#define PCI_DEVICE_ID_SP1011 0x0010
+
+#define PCI_VENDOR_ID_AFAVLAB 0x14db
+#define PCI_DEVICE_ID_AFAVLAB_P028 0x2180
+#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182
+#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150
+
+#define PCI_VENDOR_ID_BROADCOM 0x14e4
+#define PCI_DEVICE_ID_TIGON3_5752 0x1600
+#define PCI_DEVICE_ID_TIGON3_5752M 0x1601
+#define PCI_DEVICE_ID_TIGON3_5700 0x1644
+#define PCI_DEVICE_ID_TIGON3_5701 0x1645
+#define PCI_DEVICE_ID_TIGON3_5702 0x1646
+#define PCI_DEVICE_ID_TIGON3_5703 0x1647
+#define PCI_DEVICE_ID_TIGON3_5704 0x1648
+#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649
+#define PCI_DEVICE_ID_NX2_5706 0x164a
+#define PCI_DEVICE_ID_NX2_5708 0x164c
+#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d
+#define PCI_DEVICE_ID_TIGON3_5705 0x1653
+#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654
+#define PCI_DEVICE_ID_TIGON3_5720 0x1658
+#define PCI_DEVICE_ID_TIGON3_5721 0x1659
+#define PCI_DEVICE_ID_TIGON3_5722 0x165a
+#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
+#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
+#define PCI_DEVICE_ID_TIGON3_5714 0x1668
+#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
+#define PCI_DEVICE_ID_TIGON3_5780 0x166a
+#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
+#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
+#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
+#define PCI_DEVICE_ID_TIGON3_5755M 0x1673
+#define PCI_DEVICE_ID_TIGON3_5756 0x1674
+#define PCI_DEVICE_ID_TIGON3_5750 0x1676
+#define PCI_DEVICE_ID_TIGON3_5751 0x1677
+#define PCI_DEVICE_ID_TIGON3_5715 0x1678
+#define PCI_DEVICE_ID_TIGON3_5715S 0x1679
+#define PCI_DEVICE_ID_TIGON3_5754 0x167a
+#define PCI_DEVICE_ID_TIGON3_5755 0x167b
+#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
+#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
+#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
+#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
+#define PCI_DEVICE_ID_TIGON3_5782 0x1696
+#define PCI_DEVICE_ID_TIGON3_5786 0x169a
+#define PCI_DEVICE_ID_TIGON3_5787 0x169b
+#define PCI_DEVICE_ID_TIGON3_5788 0x169c
+#define PCI_DEVICE_ID_TIGON3_5789 0x169d
+#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6
+#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7
+#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
+#define PCI_DEVICE_ID_NX2_5706S 0x16aa
+#define PCI_DEVICE_ID_NX2_5708S 0x16ac
+#define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6
+#define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7
+#define PCI_DEVICE_ID_TIGON3_5781 0x16dd
+#define PCI_DEVICE_ID_TIGON3_5753 0x16f7
+#define PCI_DEVICE_ID_TIGON3_5753M 0x16fd
+#define PCI_DEVICE_ID_TIGON3_5753F 0x16fe
+#define PCI_DEVICE_ID_TIGON3_5901 0x170d
+#define PCI_DEVICE_ID_BCM4401B1 0x170c
+#define PCI_DEVICE_ID_TIGON3_5901_2 0x170e
+#define PCI_DEVICE_ID_TIGON3_5906 0x1712
+#define PCI_DEVICE_ID_TIGON3_5906M 0x1713
+#define PCI_DEVICE_ID_BCM4401 0x4401
+#define PCI_DEVICE_ID_BCM4401B0 0x4402
+
+#define PCI_VENDOR_ID_TOPIC 0x151f
+#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
+
+#define PCI_VENDOR_ID_ENE 0x1524
+#define PCI_DEVICE_ID_ENE_1211 0x1211
+#define PCI_DEVICE_ID_ENE_1225 0x1225
+#define PCI_DEVICE_ID_ENE_1410 0x1410
+#define PCI_DEVICE_ID_ENE_710 0x1411
+#define PCI_DEVICE_ID_ENE_712 0x1412
+#define PCI_DEVICE_ID_ENE_1420 0x1420
+#define PCI_DEVICE_ID_ENE_720 0x1421
+#define PCI_DEVICE_ID_ENE_722 0x1422
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+
+#define PCI_VENDOR_ID_SYBA 0x1592
+#define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782
+#define PCI_DEVICE_ID_SYBA_1P_ECP 0x0783
+
+#define PCI_VENDOR_ID_MORETON 0x15aa
+#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
+
+#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
+#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
+
+#define PCI_VENDOR_ID_MELLANOX 0x15b3
+#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
+#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
+#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
+#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
+
+#define PCI_VENDOR_ID_PDC 0x15e9
+
+
+#define PCI_VENDOR_ID_FARSITE 0x1619
+#define PCI_DEVICE_ID_FARSITE_T2P 0x0400
+#define PCI_DEVICE_ID_FARSITE_T4P 0x0440
+#define PCI_DEVICE_ID_FARSITE_T1U 0x0610
+#define PCI_DEVICE_ID_FARSITE_T2U 0x0620
+#define PCI_DEVICE_ID_FARSITE_T4U 0x0640
+#define PCI_DEVICE_ID_FARSITE_TE1 0x1610
+#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612
+
+#define PCI_VENDOR_ID_SIBYTE 0x166d
+#define PCI_DEVICE_ID_BCM1250_PCI 0x0001
+#define PCI_DEVICE_ID_BCM1250_HT 0x0002
+
+#define PCI_VENDOR_ID_NETCELL 0x169c
+#define PCI_DEVICE_ID_REVOLUTION 0x0044
+
+#define PCI_VENDOR_ID_VITESSE 0x1725
+#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
+
+#define PCI_VENDOR_ID_LINKSYS 0x1737
+#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064
+
+#define PCI_VENDOR_ID_ALTIMA 0x173b
+#define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8
+#define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9
+#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
+#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
+
+#define PCI_VENDOR_ID_ARECA 0x17d3
+#define PCI_DEVICE_ID_ARECA_1110 0x1110
+#define PCI_DEVICE_ID_ARECA_1120 0x1120
+#define PCI_DEVICE_ID_ARECA_1130 0x1130
+#define PCI_DEVICE_ID_ARECA_1160 0x1160
+#define PCI_DEVICE_ID_ARECA_1170 0x1170
+#define PCI_DEVICE_ID_ARECA_1210 0x1210
+#define PCI_DEVICE_ID_ARECA_1220 0x1220
+#define PCI_DEVICE_ID_ARECA_1230 0x1230
+#define PCI_DEVICE_ID_ARECA_1260 0x1260
+#define PCI_DEVICE_ID_ARECA_1270 0x1270
+#define PCI_DEVICE_ID_ARECA_1280 0x1280
+#define PCI_DEVICE_ID_ARECA_1380 0x1380
+#define PCI_DEVICE_ID_ARECA_1381 0x1381
+#define PCI_DEVICE_ID_ARECA_1680 0x1680
+#define PCI_DEVICE_ID_ARECA_1681 0x1681
+
+#define PCI_VENDOR_ID_S2IO 0x17d5
+#define PCI_DEVICE_ID_S2IO_WIN 0x5731
+#define PCI_DEVICE_ID_S2IO_UNI 0x5831
+#define PCI_DEVICE_ID_HERC_WIN 0x5732
+#define PCI_DEVICE_ID_HERC_UNI 0x5832
+
+
+#define PCI_VENDOR_ID_SITECOM 0x182d
+#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069
+
+#define PCI_VENDOR_ID_TOPSPIN 0x1867
+
+#define PCI_VENDOR_ID_TDI 0x192E
+#define PCI_DEVICE_ID_TDI_EHCI 0x0101
+
+#define PCI_VENDOR_ID_JMICRON 0x197B
+#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
+#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
+#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
+#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
+#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
+#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
+
+#define PCI_VENDOR_ID_TEKRAM 0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+
+#define PCI_VENDOR_ID_HINT 0x3388
+#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013
+
+#define PCI_VENDOR_ID_3DLABS 0x3d3d
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009
+
+
+#define PCI_VENDOR_ID_AKS 0x416c
+#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
+
+
+
+#define PCI_VENDOR_ID_S3 0x5333
+#define PCI_DEVICE_ID_S3_TRIO 0x8811
+#define PCI_DEVICE_ID_S3_868 0x8880
+#define PCI_DEVICE_ID_S3_968 0x88f0
+#define PCI_DEVICE_ID_S3_SAVAGE4 0x8a25
+#define PCI_DEVICE_ID_S3_PROSAVAGE8 0x8d04
+#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
+
+#define PCI_VENDOR_ID_DUNORD 0x5544
+#define PCI_DEVICE_ID_DUNORD_I3000 0x0001
+
+
+#define PCI_VENDOR_ID_DCI 0x6666
+#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001
+#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
+#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
+#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
+#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
+#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
+#define PCI_DEVICE_ID_INTEL_82375 0x0482
+#define PCI_DEVICE_ID_INTEL_82424 0x0483
+#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_I960 0x0960
+#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
+#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
+#define PCI_DEVICE_ID_INTEL_82437 0x122d
+#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
+#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX 0x1234
+#define PCI_DEVICE_ID_INTEL_82441 0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
+#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
+#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
+#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
+#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
+#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
+#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
+#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
+#define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415
+#define PCI_DEVICE_ID_INTEL_82801AA_6 0x2416
+#define PCI_DEVICE_ID_INTEL_82801AA_8 0x2418
+#define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420
+#define PCI_DEVICE_ID_INTEL_82801AB_1 0x2421
+#define PCI_DEVICE_ID_INTEL_82801AB_3 0x2423
+#define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425
+#define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426
+#define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428
+#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440
+#define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443
+#define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445
+#define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448
+#define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a
+#define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b
+#define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c
+#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e
+#define PCI_DEVICE_ID_INTEL_82801E_0 0x2450
+#define PCI_DEVICE_ID_INTEL_82801E_11 0x245b
+#define PCI_DEVICE_ID_INTEL_82801CA_0 0x2480
+#define PCI_DEVICE_ID_INTEL_82801CA_3 0x2483
+#define PCI_DEVICE_ID_INTEL_82801CA_5 0x2485
+#define PCI_DEVICE_ID_INTEL_82801CA_6 0x2486
+#define PCI_DEVICE_ID_INTEL_82801CA_10 0x248a
+#define PCI_DEVICE_ID_INTEL_82801CA_11 0x248b
+#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c
+#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
+#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
+#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
+#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
+#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
+#define PCI_DEVICE_ID_INTEL_82801DB_9 0x24c9
+#define PCI_DEVICE_ID_INTEL_82801DB_10 0x24ca
+#define PCI_DEVICE_ID_INTEL_82801DB_11 0x24cb
+#define PCI_DEVICE_ID_INTEL_82801DB_12 0x24cc
+#define PCI_DEVICE_ID_INTEL_82801EB_0 0x24d0
+#define PCI_DEVICE_ID_INTEL_82801EB_1 0x24d1
+#define PCI_DEVICE_ID_INTEL_82801EB_3 0x24d3
+#define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5
+#define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6
+#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
+#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
+#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
+#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
+#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
+#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
+#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
+#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
+#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
+#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
+#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
+#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578
+#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580
+#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
+#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
+#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
+#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
+#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
+#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
+#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a
+#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d
+#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e
+#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f
+#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
+#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
+#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
+#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
+#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
+#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
+#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
+#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd
+#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da
+#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd
+#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de
+#define PCI_DEVICE_ID_INTEL_ICH7_21 0x27df
+#define PCI_DEVICE_ID_INTEL_ICH8_0 0x2810
+#define PCI_DEVICE_ID_INTEL_ICH8_1 0x2811
+#define PCI_DEVICE_ID_INTEL_ICH8_2 0x2812
+#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814
+#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
+#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
+#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
+#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
+#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
+#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
+#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
+#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
+#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592
+#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595
+#define PCI_DEVICE_ID_INTEL_MCH_PA1 0x3596
+#define PCI_DEVICE_ID_INTEL_MCH_PB 0x3597
+#define PCI_DEVICE_ID_INTEL_MCH_PB1 0x3598
+#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
+#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
+#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX 0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
+#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120
+#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121
+#define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122
+#define PCI_DEVICE_ID_INTEL_82810_IG3 0x7123
+#define PCI_DEVICE_ID_INTEL_82810E_MC 0x7124
+#define PCI_DEVICE_ID_INTEL_82810E_IG 0x7125
+#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192
+#define PCI_DEVICE_ID_INTEL_440MX 0x7195
+#define PCI_DEVICE_ID_INTEL_440MX_6 0x7196
+#define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198
+#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199
+#define PCI_DEVICE_ID_INTEL_82443MX_3 0x719b
+#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
+#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
+#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
+#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
+#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
+#define PCI_DEVICE_ID_INTEL_82451NX 0x84ca
+#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
+#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
+#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
+#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+
+#define PCI_VENDOR_ID_SCALEMP 0x8686
+#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
+
+#define PCI_VENDOR_ID_COMPUTONE 0x8e0e
+#define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291
+#define PCI_DEVICE_ID_COMPUTONE_PG 0x0302
+#define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG8 0x0002
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG6 0x0003
+
+#define PCI_VENDOR_ID_KTI 0x8e2e
+
+#define PCI_VENDOR_ID_ADAPTEC 0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178
+#define PCI_DEVICE_ID_ADAPTEC_38602 0x3860
+#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
+#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578
+#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678
+#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778
+#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878
+
+#define PCI_VENDOR_ID_ADAPTEC2 0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011
+#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013
+#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051
+#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
+#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080
+#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081
+#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083
+#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f
+#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0
+#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1
+#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3
+#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf
+#define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN 0x0500
+#define PCI_DEVICE_ID_ADAPTEC2_SCAMP 0x0503
+
+
+#define PCI_VENDOR_ID_HOLTEK 0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
+
+#define PCI_VENDOR_ID_NETMOS 0x9710
+#define PCI_DEVICE_ID_NETMOS_9705 0x9705
+#define PCI_DEVICE_ID_NETMOS_9715 0x9715
+#define PCI_DEVICE_ID_NETMOS_9735 0x9735
+#define PCI_DEVICE_ID_NETMOS_9745 0x9745
+#define PCI_DEVICE_ID_NETMOS_9755 0x9755
+#define PCI_DEVICE_ID_NETMOS_9805 0x9805
+#define PCI_DEVICE_ID_NETMOS_9815 0x9815
+#define PCI_DEVICE_ID_NETMOS_9835 0x9835
+#define PCI_DEVICE_ID_NETMOS_9845 0x9845
+#define PCI_DEVICE_ID_NETMOS_9855 0x9855
+
+#define PCI_SUBVENDOR_ID_EXSYS 0xd84d
+#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014
+#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055
+
+#define PCI_VENDOR_ID_TIGERJET 0xe159
+#define PCI_DEVICE_ID_TIGERJET_300 0x0001
+#define PCI_DEVICE_ID_TIGERJET_100 0x0002
+
+#define PCI_VENDOR_ID_TTTECH 0x0357
+#define PCI_DEVICE_ID_TTTECH_MC322 0x000A
+
+#define PCI_VENDOR_ID_XILINX_RME 0xea60
+#define PCI_DEVICE_ID_RME_DIGI32 0x9896
+#define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897
+#define PCI_DEVICE_ID_RME_DIGI32_8 0x9898
+
+#define PCI_VENDOR_ID_QUICKNET 0x15E2
+#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500
diff --git a/xen/include/asm-ia64/linux/pci_regs.h b/xen/include/asm-ia64/linux/pci_regs.h
new file mode 100644
index 0000000000..c321316f1b
--- /dev/null
+++ b/xen/include/asm-ia64/linux/pci_regs.h
@@ -0,0 +1,488 @@
+/*
+ * pci_regs.h
+ *
+ * PCI standard defines
+ * Copyright 1994, Drew Eckhardt
+ * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
+ *
+ * For more information, please consult the following manuals (look at
+ * http://www.pcisig.com/ for how to get them):
+ *
+ * PCI BIOS Specification
+ * PCI Local Bus Specification
+ * PCI to PCI Bridge Specification
+ * PCI System Design Guide
+ *
+ * For hypertransport information, please consult the following manuals
+ * from http://www.hypertransport.org
+ *
+ * The Hypertransport I/O Link Specification
+ */
+
+#ifndef LINUX_PCI_REGS_H
+#define LINUX_PCI_REGS_H
+
+/*
+ * Under PCI, each device has 256 bytes of configuration address space,
+ * of which the first 64 bytes are standardized as follows:
+ */
+#define PCI_VENDOR_ID 0x00 /* 16 bits */
+#define PCI_DEVICE_ID 0x02 /* 16 bits */
+#define PCI_COMMAND 0x04 /* 16 bits */
+#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
+#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
+#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
+#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
+#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
+#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
+#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
+#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
+#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
+#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
+#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
+#define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */
+#define PCI_STATUS_UDF 0x40 /* Support User Definable Features [obsolete] */
+#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
+#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
+#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
+#define PCI_STATUS_DEVSEL_FAST 0x000
+#define PCI_STATUS_DEVSEL_MEDIUM 0x200
+#define PCI_STATUS_DEVSEL_SLOW 0x400
+#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
+#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
+#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
+#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
+#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
+
+#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */
+#define PCI_REVISION_ID 0x08 /* Revision ID */
+#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
+#define PCI_CLASS_DEVICE 0x0a /* Device class */
+
+#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
+#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
+#define PCI_HEADER_TYPE 0x0e /* 8 bits */
+#define PCI_HEADER_TYPE_NORMAL 0
+#define PCI_HEADER_TYPE_BRIDGE 1
+#define PCI_HEADER_TYPE_CARDBUS 2
+
+#define PCI_BIST 0x0f /* 8 bits */
+#define PCI_BIST_CODE_MASK 0x0f /* Return result */
+#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
+#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
+
+/*
+ * Base addresses specify locations in memory or I/O space.
+ * Decoded size can be determined by writing a value of
+ * 0xffffffff to the register, and reading it back. Only
+ * 1 bits are decoded.
+ */
+#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
+#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */
+#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */
+#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
+#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
+#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
+#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
+#define PCI_BASE_ADDRESS_SPACE_IO 0x01
+#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
+#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
+#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
+#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */
+#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
+#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
+#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL)
+#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL)
+/* bit 1 is reserved if address_space = 1 */
+
+/* Header type 0 (normal devices) */
+#define PCI_CARDBUS_CIS 0x28
+#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
+#define PCI_SUBSYSTEM_ID 0x2e
+#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
+#define PCI_ROM_ADDRESS_ENABLE 0x01
+#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
+
+#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
+
+/* 0x35-0x3b are reserved */
+#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
+#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
+#define PCI_MIN_GNT 0x3e /* 8 bits */
+#define PCI_MAX_LAT 0x3f /* 8 bits */
+
+/* Header type 1 (PCI-to-PCI bridges) */
+#define PCI_PRIMARY_BUS 0x18 /* Primary bus number */
+#define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */
+#define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */
+#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */
+#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */
+#define PCI_IO_LIMIT 0x1d
+#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
+#define PCI_IO_RANGE_TYPE_16 0x00
+#define PCI_IO_RANGE_TYPE_32 0x01
+#define PCI_IO_RANGE_MASK (~0x0fUL)
+#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
+#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
+#define PCI_MEMORY_LIMIT 0x22
+#define PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL
+#define PCI_MEMORY_RANGE_MASK (~0x0fUL)
+#define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */
+#define PCI_PREF_MEMORY_LIMIT 0x26
+#define PCI_PREF_RANGE_TYPE_MASK 0x0fUL
+#define PCI_PREF_RANGE_TYPE_32 0x00
+#define PCI_PREF_RANGE_TYPE_64 0x01
+#define PCI_PREF_RANGE_MASK (~0x0fUL)
+#define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */
+#define PCI_PREF_LIMIT_UPPER32 0x2c
+#define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */
+#define PCI_IO_LIMIT_UPPER16 0x32
+/* 0x34 same as for htype 0 */
+/* 0x35-0x3b is reserved */
+#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */
+/* 0x3c-0x3d are same as for htype 0 */
+#define PCI_BRIDGE_CONTROL 0x3e
+#define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */
+#define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */
+#define PCI_BRIDGE_CTL_NO_ISA 0x04 /* Disable bridging of ISA ports */
+#define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */
+#define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */
+#define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */
+#define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */
+
+/* Header type 2 (CardBus bridges) */
+#define PCI_CB_CAPABILITY_LIST 0x14
+/* 0x15 reserved */
+#define PCI_CB_SEC_STATUS 0x16 /* Secondary status */
+#define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */
+#define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */
+#define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */
+#define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */
+#define PCI_CB_MEMORY_BASE_0 0x1c
+#define PCI_CB_MEMORY_LIMIT_0 0x20
+#define PCI_CB_MEMORY_BASE_1 0x24
+#define PCI_CB_MEMORY_LIMIT_1 0x28
+#define PCI_CB_IO_BASE_0 0x2c
+#define PCI_CB_IO_BASE_0_HI 0x2e
+#define PCI_CB_IO_LIMIT_0 0x30
+#define PCI_CB_IO_LIMIT_0_HI 0x32
+#define PCI_CB_IO_BASE_1 0x34
+#define PCI_CB_IO_BASE_1_HI 0x36
+#define PCI_CB_IO_LIMIT_1 0x38
+#define PCI_CB_IO_LIMIT_1_HI 0x3a
+#define PCI_CB_IO_RANGE_MASK (~0x03UL)
+/* 0x3c-0x3d are same as for htype 0 */
+#define PCI_CB_BRIDGE_CONTROL 0x3e
+#define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */
+#define PCI_CB_BRIDGE_CTL_SERR 0x02
+#define PCI_CB_BRIDGE_CTL_ISA 0x04
+#define PCI_CB_BRIDGE_CTL_VGA 0x08
+#define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20
+#define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */
+#define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */
+#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */
+#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
+#define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400
+#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40
+#define PCI_CB_SUBSYSTEM_ID 0x42
+#define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */
+/* 0x48-0x7f reserved */
+
+/* Capability lists */
+
+#define PCI_CAP_LIST_ID 0 /* Capability ID */
+#define PCI_CAP_ID_PM 0x01 /* Power Management */
+#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */
+#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
+#define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */
+#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */
+#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
+#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
+#define PCI_CAP_ID_HT 0x08 /* HyperTransport */
+#define PCI_CAP_ID_VNDR 0x09 /* Vendor specific capability */
+#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
+#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
+#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
+#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
+#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
+#define PCI_CAP_SIZEOF 4
+
+/* Power Management Registers */
+
+#define PCI_PM_PMC 2 /* PM Capabilities Register */
+#define PCI_PM_CAP_VER_MASK 0x0007 /* Version */
+#define PCI_PM_CAP_PME_CLOCK 0x0008 /* PME clock required */
+#define PCI_PM_CAP_RESERVED 0x0010 /* Reserved field */
+#define PCI_PM_CAP_DSI 0x0020 /* Device specific initialization */
+#define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxilliary power support mask */
+#define PCI_PM_CAP_D1 0x0200 /* D1 power state support */
+#define PCI_PM_CAP_D2 0x0400 /* D2 power state support */
+#define PCI_PM_CAP_PME 0x0800 /* PME pin supported */
+#define PCI_PM_CAP_PME_MASK 0xF800 /* PME Mask of all supported states */
+#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
+#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
+#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
+#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
+#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
+#define PCI_PM_CTRL 4 /* PM control and status register */
+#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
+#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
+#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
+#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
+#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
+#define PCI_PM_CTRL_PME_STATUS 0x8000 /* PME pin status */
+#define PCI_PM_PPB_EXTENSIONS 6 /* PPB support extensions (??) */
+#define PCI_PM_PPB_B2_B3 0x40 /* Stop clock when in D3hot (??) */
+#define PCI_PM_BPCC_ENABLE 0x80 /* Bus power/clock control enable (??) */
+#define PCI_PM_DATA_REGISTER 7 /* (??) */
+#define PCI_PM_SIZEOF 8
+
+/* AGP registers */
+
+#define PCI_AGP_VERSION 2 /* BCD version number */
+#define PCI_AGP_RFU 3 /* Rest of capability flags */
+#define PCI_AGP_STATUS 4 /* Status register */
+#define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */
+#define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */
+#define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */
+#define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */
+#define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */
+#define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */
+#define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */
+#define PCI_AGP_COMMAND 8 /* Control register */
+#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */
+#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */
+#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */
+#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */
+#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */
+#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */
+#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */
+#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */
+#define PCI_AGP_SIZEOF 12
+
+/* Vital Product Data */
+
+#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
+#define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */
+#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
+#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
+
+/* Slot Identification */
+
+#define PCI_SID_ESR 2 /* Expansion Slot Register */
+#define PCI_SID_ESR_NSLOTS 0x1f /* Number of expansion slots available */
+#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
+#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
+
+/* Message Signalled Interrupts registers */
+
+#define PCI_MSI_FLAGS 2 /* Various flags */
+#define PCI_MSI_FLAGS_64BIT 0x80 /* 64-bit addresses allowed */
+#define PCI_MSI_FLAGS_QSIZE 0x70 /* Message queue size configured */
+#define PCI_MSI_FLAGS_QMASK 0x0e /* Maximum queue size available */
+#define PCI_MSI_FLAGS_ENABLE 0x01 /* MSI feature enabled */
+#define PCI_MSI_FLAGS_MASKBIT 0x100 /* 64-bit mask bits allowed */
+#define PCI_MSI_RFU 3 /* Rest of capability flags */
+#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
+#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
+#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
+#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
+
+/* CompactPCI Hotswap Register */
+
+#define PCI_CHSWP_CSR 2 /* Control and Status Register */
+#define PCI_CHSWP_DHA 0x01 /* Device Hiding Arm */
+#define PCI_CHSWP_EIM 0x02 /* ENUM# Signal Mask */
+#define PCI_CHSWP_PIE 0x04 /* Pending Insert or Extract */
+#define PCI_CHSWP_LOO 0x08 /* LED On / Off */
+#define PCI_CHSWP_PI 0x30 /* Programming Interface */
+#define PCI_CHSWP_EXT 0x40 /* ENUM# status - extraction */
+#define PCI_CHSWP_INS 0x80 /* ENUM# status - insertion */
+
+/* PCI-X registers */
+
+#define PCI_X_CMD 2 /* Modes & Features */
+#define PCI_X_CMD_DPERR_E 0x0001 /* Data Parity Error Recovery Enable */
+#define PCI_X_CMD_ERO 0x0002 /* Enable Relaxed Ordering */
+#define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */
+#define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */
+#define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */
+#define PCI_X_STATUS 4 /* PCI-X capabilities */
+#define PCI_X_STATUS_DEVFN 0x000000ff /* A copy of devfn */
+#define PCI_X_STATUS_BUS 0x0000ff00 /* A copy of bus nr */
+#define PCI_X_STATUS_64BIT 0x00010000 /* 64-bit device */
+#define PCI_X_STATUS_133MHZ 0x00020000 /* 133 MHz capable */
+#define PCI_X_STATUS_SPL_DISC 0x00040000 /* Split Completion Discarded */
+#define PCI_X_STATUS_UNX_SPL 0x00080000 /* Unexpected Split Completion */
+#define PCI_X_STATUS_COMPLEX 0x00100000 /* Device Complexity */
+#define PCI_X_STATUS_MAX_READ 0x00600000 /* Designed Max Memory Read Count */
+#define PCI_X_STATUS_MAX_SPLIT 0x03800000 /* Designed Max Outstanding Split Transactions */
+#define PCI_X_STATUS_MAX_CUM 0x1c000000 /* Designed Max Cumulative Read Size */
+#define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */
+#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
+#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
+
+/* PCI Express capability registers */
+
+#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
+#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
+#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
+#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
+#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVCAP_PAYLOAD 0x07 /* Max_Payload_Size */
+#define PCI_EXP_DEVCAP_PHANTOM 0x18 /* Phantom functions */
+#define PCI_EXP_DEVCAP_EXT_TAG 0x20 /* Extended tags */
+#define PCI_EXP_DEVCAP_L0S 0x1c0 /* L0s Acceptable Latency */
+#define PCI_EXP_DEVCAP_L1 0xe00 /* L1 Acceptable Latency */
+#define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */
+#define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */
+#define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */
+#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
+#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
+#define PCI_EXP_DEVCTL 8 /* Device Control */
+#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
+#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
+#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
+#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
+#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
+#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
+#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
+#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
+#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
+#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
+#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
+#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */
+#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */
+#define PCI_EXP_DEVSTA_FED 0x04 /* Fatal Error Detected */
+#define PCI_EXP_DEVSTA_URD 0x08 /* Unsupported Request Detected */
+#define PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */
+#define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */
+#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKCTL 16 /* Link Control */
+#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 /* Enable clkreq */
+#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCTL_SECEE 0x01 /* System Error on Correctable Error */
+#define PCI_EXP_RTCTL_SENFEE 0x02 /* System Error on Non-Fatal Error */
+#define PCI_EXP_RTCTL_SEFEE 0x04 /* System Error on Fatal Error */
+#define PCI_EXP_RTCTL_PMEIE 0x08 /* PME Interrupt Enable */
+#define PCI_EXP_RTCTL_CRSSVE 0x10 /* CRS Software Visibility Enable */
+#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTSTA 32 /* Root Status */
+
+/* Extended Capabilities (PCI-X 2.0 and Express) */
+#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
+#define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf)
+#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
+
+#define PCI_EXT_CAP_ID_ERR 1
+#define PCI_EXT_CAP_ID_VC 2
+#define PCI_EXT_CAP_ID_DSN 3
+#define PCI_EXT_CAP_ID_PWR 4
+
+/* Advanced Error Reporting */
+#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
+#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
+#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
+#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
+#define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */
+#define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */
+#define PCI_ERR_UNC_COMP_ABORT 0x00008000 /* Completer Abort */
+#define PCI_ERR_UNC_UNX_COMP 0x00010000 /* Unexpected Completion */
+#define PCI_ERR_UNC_RX_OVER 0x00020000 /* Receiver Overflow */
+#define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */
+#define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */
+#define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */
+#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
+ /* Same bits as above */
+#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
+ /* Same bits as above */
+#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */
+#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */
+#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */
+#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
+#define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */
+#define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */
+#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
+ /* Same bits as above */
+#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
+#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */
+#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
+#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
+#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
+#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
+#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
+#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
+/* Correctable Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001
+/* Non-fatal Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002
+/* Fatal Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004
+#define PCI_ERR_ROOT_STATUS 48
+#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
+/* Multi ERR_COR Received */
+#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002
+/* ERR_FATAL/NONFATAL Recevied */
+#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004
+/* Multi ERR_FATAL/NONFATAL Recevied */
+#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008
+#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
+#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
+#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
+#define PCI_ERR_ROOT_COR_SRC 52
+#define PCI_ERR_ROOT_SRC 54
+
+/* Virtual Channel */
+#define PCI_VC_PORT_REG1 4
+#define PCI_VC_PORT_REG2 8
+#define PCI_VC_PORT_CTRL 12
+#define PCI_VC_PORT_STATUS 14
+#define PCI_VC_RES_CAP 16
+#define PCI_VC_RES_CTRL 20
+#define PCI_VC_RES_STATUS 26
+
+/* Power Budgeting */
+#define PCI_PWR_DSR 4 /* Data Select Register */
+#define PCI_PWR_DATA 8 /* Data Register */
+#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */
+#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */
+#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */
+#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
+#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */
+#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
+#define PCI_PWR_CAP 12 /* Capability */
+#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
+
+/* Hypertransport sub capability types */
+#define HT_CAPTYPE_SLAVE 0x00 /* Slave/Primary link configuration */
+#define HT_CAPTYPE_HOST 0x20 /* Host/Secondary link configuration */
+#define HT_CAPTYPE_IRQ 0x80 /* IRQ Configuration */
+#define HT_CAPTYPE_REMAPPING_40 0xA0 /* 40 bit address remapping */
+#define HT_CAPTYPE_REMAPPING_64 0xA2 /* 64 bit address remapping */
+#define HT_CAPTYPE_UNITID_CLUMP 0x90 /* Unit ID clumping */
+#define HT_CAPTYPE_EXTCONF 0x98 /* Extended Configuration Space Access */
+#define HT_CAPTYPE_MSI_MAPPING 0xA8 /* MSI Mapping Capability */
+#define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */
+#define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */
+#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
+#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
+#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
+
+
+#endif /* LINUX_PCI_REGS_H */
diff --git a/xen/include/asm-ia64/linux/pm.h b/xen/include/asm-ia64/linux/pm.h
new file mode 100644
index 0000000000..070394e846
--- /dev/null
+++ b/xen/include/asm-ia64/linux/pm.h
@@ -0,0 +1,279 @@
+/*
+ * pm.h - Power management interface
+ *
+ * Copyright (C) 2000 Andrew Henroid
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_PM_H
+#define _LINUX_PM_H
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+#include <asm/atomic.h>
+
+/*
+ * Power management requests... these are passed to pm_send_all() and friends.
+ *
+ * these functions are old and deprecated, see below.
+ */
+typedef int __bitwise pm_request_t;
+
+#define PM_SUSPEND ((__force pm_request_t) 1) /* enter D1-D3 */
+#define PM_RESUME ((__force pm_request_t) 2) /* enter D0 */
+
+
+/*
+ * Device types... these are passed to pm_register
+ */
+typedef int __bitwise pm_dev_t;
+
+#define PM_UNKNOWN_DEV ((__force pm_dev_t) 0) /* generic */
+#define PM_SYS_DEV ((__force pm_dev_t) 1) /* system device (fan, KB controller, ...) */
+#define PM_PCI_DEV ((__force pm_dev_t) 2) /* PCI device */
+#define PM_USB_DEV ((__force pm_dev_t) 3) /* USB device */
+#define PM_SCSI_DEV ((__force pm_dev_t) 4) /* SCSI device */
+#define PM_ISA_DEV ((__force pm_dev_t) 5) /* ISA device */
+#define PM_MTD_DEV ((__force pm_dev_t) 6) /* Memory Technology Device */
+
+/*
+ * System device hardware ID (PnP) values
+ */
+enum
+{
+ PM_SYS_UNKNOWN = 0x00000000, /* generic */
+ PM_SYS_KBC = 0x41d00303, /* keyboard controller */
+ PM_SYS_COM = 0x41d00500, /* serial port */
+ PM_SYS_IRDA = 0x41d00510, /* IRDA controller */
+ PM_SYS_FDC = 0x41d00700, /* floppy controller */
+ PM_SYS_VGA = 0x41d00900, /* VGA controller */
+ PM_SYS_PCMCIA = 0x41d00e00, /* PCMCIA controller */
+};
+
+/*
+ * Device identifier
+ */
+#define PM_PCI_ID(dev) ((dev)->bus->number << 16 | (dev)->devfn)
+
+/*
+ * Request handler callback
+ */
+struct pm_dev;
+
+typedef int (*pm_callback)(struct pm_dev *dev, pm_request_t rqst, void *data);
+
+/*
+ * Dynamic device information
+ */
+struct pm_dev
+{
+ pm_dev_t type;
+ unsigned long id;
+ pm_callback callback;
+ void *data;
+
+ unsigned long flags;
+ unsigned long state;
+ unsigned long prev_state;
+
+ struct list_head entry;
+};
+
+/* Functions above this comment are list-based old-style power
+ * managment. Please avoid using them. */
+
+/*
+ * Callbacks for platform drivers to implement.
+ */
+extern void (*pm_idle)(void);
+extern void (*pm_power_off)(void);
+
+typedef int __bitwise suspend_state_t;
+
+#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
+#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
+#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
+#define PM_SUSPEND_DISK ((__force suspend_state_t) 4)
+#define PM_SUSPEND_MAX ((__force suspend_state_t) 5)
+
+typedef int __bitwise suspend_disk_method_t;
+
+#define PM_DISK_FIRMWARE ((__force suspend_disk_method_t) 1)
+#define PM_DISK_PLATFORM ((__force suspend_disk_method_t) 2)
+#define PM_DISK_SHUTDOWN ((__force suspend_disk_method_t) 3)
+#define PM_DISK_REBOOT ((__force suspend_disk_method_t) 4)
+#define PM_DISK_TEST ((__force suspend_disk_method_t) 5)
+#define PM_DISK_TESTPROC ((__force suspend_disk_method_t) 6)
+#define PM_DISK_MAX ((__force suspend_disk_method_t) 7)
+
+struct pm_ops {
+ suspend_disk_method_t pm_disk_mode;
+ int (*valid)(suspend_state_t state);
+ int (*prepare)(suspend_state_t state);
+ int (*enter)(suspend_state_t state);
+ int (*finish)(suspend_state_t state);
+};
+
+extern void pm_set_ops(struct pm_ops *);
+extern struct pm_ops *pm_ops;
+extern int pm_suspend(suspend_state_t state);
+
+
+/*
+ * Device power management
+ */
+
+struct device;
+
+typedef struct pm_message {
+ int event;
+} pm_message_t;
+
+/*
+ * Several driver power state transitions are externally visible, affecting
+ * the state of pending I/O queues and (for drivers that touch hardware)
+ * interrupts, wakeups, DMA, and other hardware state. There may also be
+ * internal transitions to various low power modes, which are transparent
+ * to the rest of the driver stack (such as a driver that's ON gating off
+ * clocks which are not in active use).
+ *
+ * One transition is triggered by resume(), after a suspend() call; the
+ * message is implicit:
+ *
+ * ON Driver starts working again, responding to hardware events
+ * and software requests. The hardware may have gone through
+ * a power-off reset, or it may have maintained state from the
+ * previous suspend() which the driver will rely on while
+ * resuming. On most platforms, there are no restrictions on
+ * availability of resources like clocks during resume().
+ *
+ * Other transitions are triggered by messages sent using suspend(). All
+ * these transitions quiesce the driver, so that I/O queues are inactive.
+ * That commonly entails turning off IRQs and DMA; there may be rules
+ * about how to quiesce that are specific to the bus or the device's type.
+ * (For example, network drivers mark the link state.) Other details may
+ * differ according to the message:
+ *
+ * SUSPEND Quiesce, enter a low power device state appropriate for
+ * the upcoming system state (such as PCI_D3hot), and enable
+ * wakeup events as appropriate.
+ *
+ * FREEZE Quiesce operations so that a consistent image can be saved;
+ * but do NOT otherwise enter a low power device state, and do
+ * NOT emit system wakeup events.
+ *
+ * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring
+ * the system from a snapshot taken after an earlier FREEZE.
+ * Some drivers will need to reset their hardware state instead
+ * of preserving it, to ensure that it's never mistaken for the
+ * state which that earlier snapshot had set up.
+ *
+ * A minimally power-aware driver treats all messages as SUSPEND, fully
+ * reinitializes its device during resume() -- whether or not it was reset
+ * during the suspend/resume cycle -- and can't issue wakeup events.
+ *
+ * More power-aware drivers may also use low power states at runtime as
+ * well as during system sleep states like PM_SUSPEND_STANDBY. They may
+ * be able to use wakeup events to exit from runtime low-power states,
+ * or from system low-power states such as standby or suspend-to-RAM.
+ */
+
+#define PM_EVENT_ON 0
+#define PM_EVENT_FREEZE 1
+#define PM_EVENT_SUSPEND 2
+#define PM_EVENT_PRETHAW 3
+
+#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
+#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
+#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
+#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
+
+struct dev_pm_info {
+ pm_message_t power_state;
+ unsigned can_wakeup:1;
+#ifdef CONFIG_PM
+ unsigned should_wakeup:1;
+ pm_message_t prev_state;
+ void * saved_state;
+ struct device * pm_parent;
+ struct list_head entry;
+#endif
+};
+
+extern void device_pm_set_parent(struct device * dev, struct device * parent);
+
+extern int device_power_down(pm_message_t state);
+extern void device_power_up(void);
+extern void device_resume(void);
+
+#ifdef CONFIG_PM
+extern suspend_disk_method_t pm_disk_mode;
+
+extern int device_suspend(pm_message_t state);
+extern int device_prepare_suspend(pm_message_t state);
+
+#define device_set_wakeup_enable(dev,val) \
+ ((dev)->power.should_wakeup = !!(val))
+#define device_may_wakeup(dev) \
+ (device_can_wakeup(dev) && (dev)->power.should_wakeup)
+
+extern int dpm_runtime_suspend(struct device *, pm_message_t);
+extern void dpm_runtime_resume(struct device *);
+extern void __suspend_report_result(const char *function, void *fn, int ret);
+
+#define suspend_report_result(fn, ret) \
+ do { \
+ __suspend_report_result(__FUNCTION__, fn, ret); \
+ } while (0)
+
+#else /* !CONFIG_PM */
+
+static inline int device_suspend(pm_message_t state)
+{
+ return 0;
+}
+
+#define device_set_wakeup_enable(dev,val) do{}while(0)
+#define device_may_wakeup(dev) (0)
+
+static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state)
+{
+ return 0;
+}
+
+static inline void dpm_runtime_resume(struct device * dev)
+{
+}
+
+#define suspend_report_result(fn, ret) do { } while (0)
+
+#endif
+
+/* changes to device_may_wakeup take effect on the next pm state change.
+ * by default, devices should wakeup if they can.
+ */
+#define device_can_wakeup(dev) \
+ ((dev)->power.can_wakeup)
+#define device_init_wakeup(dev,val) \
+ do { \
+ device_can_wakeup(dev) = !!(val); \
+ device_set_wakeup_enable(dev,val); \
+ } while(0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_PM_H */
diff --git a/xen/include/asm-ia64/linux/sysfs.h b/xen/include/asm-ia64/linux/sysfs.h
new file mode 100644
index 0000000000..6d5c43d31d
--- /dev/null
+++ b/xen/include/asm-ia64/linux/sysfs.h
@@ -0,0 +1,206 @@
+/*
+ * sysfs.h - definitions for the device driver filesystem
+ *
+ * Copyright (c) 2001,2002 Patrick Mochel
+ * Copyright (c) 2004 Silicon Graphics, Inc.
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
+ */
+
+#ifndef _SYSFS_H_
+#define _SYSFS_H_
+
+#include <linux/compiler.h>
+#include <asm/atomic.h>
+
+struct kobject;
+struct module;
+
+struct attribute {
+ const char * name;
+ struct module * owner;
+ mode_t mode;
+};
+
+struct attribute_group {
+ const char * name;
+ struct attribute ** attrs;
+};
+
+
+
+/**
+ * Use these macros to make defining attributes easier. See include/linux/device.h
+ * for examples..
+ */
+
+#define __ATTR(_name,_mode,_show,_store) { \
+ .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+#define __ATTR_RO(_name) { \
+ .attr = { .name = __stringify(_name), .mode = 0444, .owner = THIS_MODULE }, \
+ .show = _name##_show, \
+}
+
+#define __ATTR_NULL { .attr = { .name = NULL } }
+
+#define attr_name(_attr) (_attr).attr.name
+
+struct vm_area_struct;
+
+struct bin_attribute {
+ struct attribute attr;
+ size_t size;
+ void *private;
+ ssize_t (*read)(struct kobject *, char *, loff_t, size_t);
+ ssize_t (*write)(struct kobject *, char *, loff_t, size_t);
+ int (*mmap)(struct kobject *, struct bin_attribute *attr,
+ struct vm_area_struct *vma);
+};
+
+struct sysfs_ops {
+ ssize_t (*show)(struct kobject *, struct attribute *,char *);
+ ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
+};
+
+struct sysfs_dirent {
+ atomic_t s_count;
+ struct list_head s_sibling;
+ struct list_head s_children;
+ void * s_element;
+ int s_type;
+ umode_t s_mode;
+ struct dentry * s_dentry;
+ struct iattr * s_iattr;
+ atomic_t s_event;
+};
+
+#define SYSFS_ROOT 0x0001
+#define SYSFS_DIR 0x0002
+#define SYSFS_KOBJ_ATTR 0x0004
+#define SYSFS_KOBJ_BIN_ATTR 0x0008
+#define SYSFS_KOBJ_LINK 0x0020
+#define SYSFS_NOT_PINNED (SYSFS_KOBJ_ATTR | SYSFS_KOBJ_BIN_ATTR | SYSFS_KOBJ_LINK)
+
+#ifdef CONFIG_SYSFS
+
+extern int __must_check
+sysfs_create_dir(struct kobject *);
+
+extern void
+sysfs_remove_dir(struct kobject *);
+
+extern int __must_check
+sysfs_rename_dir(struct kobject *, const char *new_name);
+
+extern int __must_check
+sysfs_create_file(struct kobject *, const struct attribute *);
+
+extern int __must_check
+sysfs_update_file(struct kobject *, const struct attribute *);
+
+extern int __must_check
+sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode);
+
+extern void
+sysfs_remove_file(struct kobject *, const struct attribute *);
+
+extern int __must_check
+sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name);
+
+extern void
+sysfs_remove_link(struct kobject *, const char * name);
+
+int __must_check sysfs_create_bin_file(struct kobject *kobj,
+ struct bin_attribute *attr);
+void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr);
+
+int __must_check sysfs_create_group(struct kobject *,
+ const struct attribute_group *);
+void sysfs_remove_group(struct kobject *, const struct attribute_group *);
+void sysfs_notify(struct kobject * k, char *dir, char *attr);
+
+extern int __must_check sysfs_init(void);
+
+#else /* CONFIG_SYSFS */
+
+static inline int sysfs_create_dir(struct kobject * k)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_dir(struct kobject * k)
+{
+ ;
+}
+
+static inline int sysfs_rename_dir(struct kobject * k, const char *new_name)
+{
+ return 0;
+}
+
+static inline int sysfs_create_file(struct kobject * k, const struct attribute * a)
+{
+ return 0;
+}
+
+static inline int sysfs_update_file(struct kobject * k, const struct attribute * a)
+{
+ return 0;
+}
+static inline int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_file(struct kobject * k, const struct attribute * a)
+{
+ ;
+}
+
+static inline int sysfs_create_link(struct kobject * k, struct kobject * t, const char * n)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_link(struct kobject * k, const char * name)
+{
+ ;
+}
+
+
+static inline int sysfs_create_bin_file(struct kobject * k, struct bin_attribute * a)
+{
+ return 0;
+}
+
+static inline int sysfs_remove_bin_file(struct kobject * k, struct bin_attribute * a)
+{
+ return 0;
+}
+
+static inline int sysfs_create_group(struct kobject * k, const struct attribute_group *g)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_group(struct kobject * k, const struct attribute_group * g)
+{
+ ;
+}
+
+static inline void sysfs_notify(struct kobject * k, char *dir, char *attr)
+{
+}
+
+static inline int __must_check sysfs_init(void)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SYSFS */
+
+#endif /* _SYSFS_H_ */
diff --git a/xen/include/asm-ia64/multicall.h b/xen/include/asm-ia64/multicall.h
index 15b825bbf0..5002382359 100644
--- a/xen/include/asm-ia64/multicall.h
+++ b/xen/include/asm-ia64/multicall.h
@@ -4,26 +4,26 @@
#include <public/xen.h>
#include <xen/errno.h>
-typedef unsigned long (*hypercall_t)(
+extern unsigned long ia64_do_multicall_call(
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5);
-
-extern const hypercall_t ia64_hypercall_table[];
+ unsigned long arg5,
+ unsigned long op);
static inline void do_multicall_call(multicall_entry_t *call)
{
if (call->op < NR_hypercalls)
- call->result = (*ia64_hypercall_table[call->op])(
+ call->result = ia64_do_multicall_call(
call->args[0],
call->args[1],
call->args[2],
call->args[3],
call->args[4],
- call->args[5]);
+ call->args[5],
+ call->op);
else
call->result = -ENOSYS;
}
diff --git a/xen/include/asm-ia64/tlbflush.h b/xen/include/asm-ia64/tlbflush.h
index 49ff463b11..a659f6a0e4 100644
--- a/xen/include/asm-ia64/tlbflush.h
+++ b/xen/include/asm-ia64/tlbflush.h
@@ -17,7 +17,7 @@ void vcpu_flush_vtlb_all(struct vcpu *v);
void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range);
/* Global all flush of vTLB */
-void domain_flush_vtlb_all (void);
+void domain_flush_vtlb_all(struct domain *d);
/* Global range-flush of vTLB. */
void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range);
diff --git a/xen/include/asm-ia64/vcpu.h b/xen/include/asm-ia64/vcpu.h
index b61915e3b2..1f0684263a 100644
--- a/xen/include/asm-ia64/vcpu.h
+++ b/xen/include/asm-ia64/vcpu.h
@@ -75,6 +75,7 @@ extern IA64FAULT vcpu_get_ipsr(VCPU * vcpu, u64 * pval);
extern IA64FAULT vcpu_get_isr(VCPU * vcpu, u64 * pval);
extern IA64FAULT vcpu_get_iip(VCPU * vcpu, u64 * pval);
extern IA64FAULT vcpu_increment_iip(VCPU * vcpu);
+extern IA64FAULT vcpu_decrement_iip(VCPU * vcpu);
extern IA64FAULT vcpu_get_ifa(VCPU * vcpu, u64 * pval);
extern IA64FAULT vcpu_get_itir(VCPU * vcpu, u64 * pval);
extern unsigned long vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
diff --git a/xen/include/asm-ia64/vmx_vcpu.h b/xen/include/asm-ia64/vmx_vcpu.h
index 0defc0d4f4..24b3b60578 100644
--- a/xen/include/asm-ia64/vmx_vcpu.h
+++ b/xen/include/asm-ia64/vmx_vcpu.h
@@ -115,8 +115,6 @@ extern void memwrite_v(VCPU * vcpu, thash_data_t * vtlb, u64 * src, u64 * dest,
size_t s);
extern void memwrite_p(VCPU * vcpu, u64 * src, u64 * dest, size_t s);
extern void vcpu_load_kernel_regs(VCPU * vcpu);
-extern IA64FAULT vmx_vcpu_increment_iip(VCPU * vcpu);
-extern IA64FAULT vmx_vcpu_decrement_iip(VCPU * vcpu);
extern void vmx_switch_rr7(unsigned long, shared_info_t *, void *, void *,
void *);
diff --git a/xen/include/asm-ia64/xensystem.h b/xen/include/asm-ia64/xensystem.h
index d966d1917b..95a1991e1a 100644
--- a/xen/include/asm-ia64/xensystem.h
+++ b/xen/include/asm-ia64/xensystem.h
@@ -11,7 +11,6 @@
*
*/
#include <asm/config.h>
-#include <linux/kernel.h>
/* Define HV space hierarchy.
VMM memory space is protected by CPL for paravirtualized domains and
diff --git a/xen/include/asm-ia64/xentypes.h b/xen/include/asm-ia64/xentypes.h
index 83ff65890e..d8bc8c0925 100644
--- a/xen/include/asm-ia64/xentypes.h
+++ b/xen/include/asm-ia64/xentypes.h
@@ -5,25 +5,6 @@
typedef unsigned long ssize_t;
typedef unsigned long size_t;
typedef long long loff_t;
-
-#ifdef __KERNEL__
-/* these lines taken from linux/types.h. they belong in xen/types.h */
-#ifdef __CHECKER__
-#define __bitwise __attribute__((bitwise))
-#else
-#define __bitwise
-#endif
-
-typedef __u16 __bitwise __le16;
-typedef __u16 __bitwise __be16;
-typedef __u32 __bitwise __le32;
-typedef __u32 __bitwise __be32;
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-typedef __u64 __bitwise __le64;
-typedef __u64 __bitwise __be64;
-#endif
-
-# endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_XENTYPES_H */
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 63159504ae..35cc998585 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -27,6 +27,20 @@
#include <asm/hvm/io.h>
#include <public/hvm/params.h>
+typedef void SaveStateHandler(hvm_domain_context_t *h, void *opaque);
+typedef int LoadStateHandler(hvm_domain_context_t *h, void *opaque, int version_id);
+
+#define HVM_SE_IDSTR_LEN 32
+typedef struct HVMStateEntry {
+ char idstr[HVM_SE_IDSTR_LEN];
+ int instance_id;
+ int version_id;
+ SaveStateHandler *save_state;
+ LoadStateHandler *load_state;
+ void *opaque;
+ struct HVMStateEntry *next;
+} HVMStateEntry;
+
struct hvm_domain {
unsigned long shared_page_va;
unsigned long buffered_io_va;
@@ -44,6 +58,9 @@ struct hvm_domain {
spinlock_t pbuf_lock;
uint64_t params[HVM_NR_PARAMS];
+
+ struct hvm_domain_context *hvm_ctxt;
+ HVMStateEntry *first_se;
};
#endif /* __ASM_X86_HVM_DOMAIN_H__ */
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 5051ab276b..95a1cfbe9f 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -79,6 +79,13 @@ struct hvm_function_table {
struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
void (*load_cpu_guest_regs)(
struct vcpu *v, struct cpu_user_regs *r);
+
+ /* save and load hvm guest cpu context for save/restore */
+ void (*save_cpu_ctxt)(
+ struct vcpu *v, struct hvmcpu_context *ctxt);
+ void (*load_cpu_ctxt)(
+ struct vcpu *v, struct hvmcpu_context *ctxt);
+
/*
* Examine specifics of the guest state:
* 1) determine whether paging is enabled,
@@ -157,6 +164,35 @@ hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
hvm_funcs.load_cpu_guest_regs(v, r);
}
+void hvm_set_guest_time(struct vcpu *v, u64 gtime);
+u64 hvm_get_guest_time(struct vcpu *v);
+
+static inline void
+hvm_save_cpu_context(
+ struct vcpu *v, struct hvmcpu_context *ctxt)
+{
+ hvm_funcs.save_cpu_ctxt(v, ctxt);
+
+ /* save guest time */
+ ctxt->gtime = hvm_get_guest_time(v);
+
+ /* set valid flag to recover whole vmcs when restore */
+ ctxt->valid = 0x55885588;
+}
+
+static inline void
+hvm_load_cpu_context(
+ struct vcpu *v, struct hvmcpu_context *ctxt)
+{
+ if ( ctxt->valid != 0x55885588)
+ return;
+
+ hvm_funcs.load_cpu_ctxt(v, ctxt);
+
+ /* restore guest time*/
+ hvm_set_guest_time(v, ctxt->gtime);
+}
+
static inline int
hvm_paging_enabled(struct vcpu *v)
{
@@ -222,8 +258,6 @@ hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void hvm_stts(struct vcpu *v);
-void hvm_set_guest_time(struct vcpu *v, u64 gtime);
-u64 hvm_get_guest_time(struct vcpu *v);
void hvm_migrate_timers(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 721492412a..2274578aa1 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -121,6 +121,133 @@ extern unsigned int opt_hvm_debug_level;
#define TRACE_VMEXIT(index, value) \
current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
+/* save/restore support */
+
+//#define HVM_DEBUG_SUSPEND
+
+extern int hvm_register_savevm(struct domain *d,
+ const char *idstr,
+ int instance_id,
+ int version_id,
+ SaveStateHandler *save_state,
+ LoadStateHandler *load_state,
+ void *opaque);
+
+static inline void hvm_ctxt_seek(hvm_domain_context_t *h, unsigned int pos)
+{
+ h->cur = pos;
+}
+
+static inline uint32_t hvm_ctxt_tell(hvm_domain_context_t *h)
+{
+ return h->cur;
+}
+
+static inline int hvm_ctxt_end(hvm_domain_context_t *h)
+{
+ return (h->cur >= h->size || h->cur >= HVM_CTXT_SIZE);
+}
+
+static inline void hvm_put_byte(hvm_domain_context_t *h, unsigned int i)
+{
+ if (h->cur >= HVM_CTXT_SIZE) {
+ h->cur++;
+ return;
+ }
+ h->data[h->cur++] = (char)i;
+}
+
+static inline void hvm_put_8u(hvm_domain_context_t *h, uint8_t b)
+{
+ hvm_put_byte(h, b);
+}
+
+static inline void hvm_put_16u(hvm_domain_context_t *h, uint16_t b)
+{
+ hvm_put_8u(h, b >> 8);
+ hvm_put_8u(h, b);
+}
+
+static inline void hvm_put_32u(hvm_domain_context_t *h, uint32_t b)
+{
+ hvm_put_16u(h, b >> 16);
+ hvm_put_16u(h, b);
+}
+
+static inline void hvm_put_64u(hvm_domain_context_t *h, uint64_t b)
+{
+ hvm_put_32u(h, b >> 32);
+ hvm_put_32u(h, b);
+}
+
+static inline void hvm_put_buffer(hvm_domain_context_t *h, const char *buf, int len)
+{
+ memcpy(&h->data[h->cur], buf, len);
+ h->cur += len;
+}
+
+
+static inline char hvm_get_byte(hvm_domain_context_t *h)
+{
+ if (h->cur >= HVM_CTXT_SIZE) {
+ printk("hvm_get_byte overflow.\n");
+ return -1;
+ }
+
+ if (h->cur >= h->size) {
+ printk("hvm_get_byte exceed data area.\n");
+ return -1;
+ }
+
+ return h->data[h->cur++];
+}
+
+static inline uint8_t hvm_get_8u(hvm_domain_context_t *h)
+{
+ return hvm_get_byte(h);
+}
+
+static inline uint16_t hvm_get_16u(hvm_domain_context_t *h)
+{
+ uint16_t v;
+ v = hvm_get_8u(h) << 8;
+ v |= hvm_get_8u(h);
+
+ return v;
+}
+
+static inline uint32_t hvm_get_32u(hvm_domain_context_t *h)
+{
+ uint32_t v;
+ v = hvm_get_16u(h) << 16;
+ v |= hvm_get_16u(h);
+
+ return v;
+}
+
+static inline uint64_t hvm_get_64u(hvm_domain_context_t *h)
+{
+ uint64_t v;
+ v = (uint64_t)hvm_get_32u(h) << 32;
+ v |= hvm_get_32u(h);
+
+ return v;
+}
+
+static inline void hvm_get_buffer(hvm_domain_context_t *h, char *buf, int len)
+{
+ memcpy(buf, &h->data[h->cur], len);
+ h->cur += len;
+}
+
+extern int hvm_save(struct vcpu*, hvm_domain_context_t *h);
+extern int hvm_load(struct vcpu*, hvm_domain_context_t *h);
+
+extern int arch_sethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
+extern int arch_gethvm_ctxt(struct vcpu *v, struct hvm_domain_context *c);
+
+extern void shpage_init(struct domain *d, shared_iopage_t *sp);
+
extern int hvm_enabled;
int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
diff --git a/xen/include/asm-x86/hvm/vpt.h b/xen/include/asm-x86/hvm/vpt.h
index d9ad3f70df..37809f9b34 100644
--- a/xen/include/asm-x86/hvm/vpt.h
+++ b/xen/include/asm-x86/hvm/vpt.h
@@ -152,7 +152,7 @@ void pt_update_irq(struct vcpu *v);
struct periodic_time *is_pt_irq(struct vcpu *v, int vector, int type);
void pt_intr_post(struct vcpu *v, int vector, int type);
void pt_reset(struct vcpu *v);
-void create_periodic_time(struct periodic_time *pt, uint64_t period,
+void create_periodic_time(struct vcpu *v, struct periodic_time *pt, uint64_t period,
uint8_t irq, char one_shot, time_cb *cb, void *data);
void destroy_periodic_time(struct periodic_time *pt);
diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h
index 65cc233af3..ac8292d2a0 100644
--- a/xen/include/public/arch-ia64.h
+++ b/xen/include/public/arch-ia64.h
@@ -62,6 +62,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
+/* Arch specific callback irq definition */
+/* using Requester-ID(RID) as callback irq */
+#define IA64_CALLBACK_IRQ_RID (1 << 31)
+
/* Maximum number of virtual CPUs in multi-processor guests. */
/* WARNING: before changing this, check that shared_info fits on a page */
#define MAX_VIRT_CPUS 64
@@ -283,10 +287,9 @@ struct mapped_regs {
*/
unsigned char *interrupt_mask_addr;
int pending_interruption;
- int incomplete_regframe; // see SDM vol2 6.8
unsigned char vpsr_pp;
- unsigned char reserved5_2[7];
- unsigned long reserved5_1[3];
+ unsigned char reserved5_2[3];
+ unsigned long reserved5_1[4];
int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
int banknum; // 0 or 1, which virtual register bank is active
unsigned long rrs[8]; // region registers
@@ -430,35 +433,36 @@ struct xen_ia64_boot_param {
#define XMAPPEDREGS_OFS XSI_SIZE
/* Hyperprivops. */
-#define HYPERPRIVOP_RFI 0x1
-#define HYPERPRIVOP_RSM_DT 0x2
-#define HYPERPRIVOP_SSM_DT 0x3
-#define HYPERPRIVOP_COVER 0x4
-#define HYPERPRIVOP_ITC_D 0x5
-#define HYPERPRIVOP_ITC_I 0x6
-#define HYPERPRIVOP_SSM_I 0x7
-#define HYPERPRIVOP_GET_IVR 0x8
-#define HYPERPRIVOP_GET_TPR 0x9
-#define HYPERPRIVOP_SET_TPR 0xa
-#define HYPERPRIVOP_EOI 0xb
-#define HYPERPRIVOP_SET_ITM 0xc
-#define HYPERPRIVOP_THASH 0xd
-#define HYPERPRIVOP_PTC_GA 0xe
-#define HYPERPRIVOP_ITR_D 0xf
-#define HYPERPRIVOP_GET_RR 0x10
-#define HYPERPRIVOP_SET_RR 0x11
-#define HYPERPRIVOP_SET_KR 0x12
-#define HYPERPRIVOP_FC 0x13
-#define HYPERPRIVOP_GET_CPUID 0x14
-#define HYPERPRIVOP_GET_PMD 0x15
-#define HYPERPRIVOP_GET_EFLAG 0x16
-#define HYPERPRIVOP_SET_EFLAG 0x17
-#define HYPERPRIVOP_RSM_BE 0x18
-#define HYPERPRIVOP_GET_PSR 0x19
-#define HYPERPRIVOP_MAX 0x19
+#define HYPERPRIVOP_START 0x1
+#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
+#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
+#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
+#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
+#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
+#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
+#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
+#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
+#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
+#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
+#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
+#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
+#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
+#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
+#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
+#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
+#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
+#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
+#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
+#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
+#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
+#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
+#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
+#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
+#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
+#define HYPERPRIVOP_MAX (0x19)
/* Fast and light hypercalls. */
-#define __HYPERVISOR_ia64_fast_eoi 0x0200
+#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
/* Xencomm macros. */
#define XENCOMM_INLINE_MASK 0xf800000000000000UL
diff --git a/xen/include/public/arch-x86/xen.h b/xen/include/public/arch-x86/xen.h
index 34cbdb7c59..3e3a4ffdfe 100644
--- a/xen/include/public/arch-x86/xen.h
+++ b/xen/include/public/arch-x86/xen.h
@@ -109,6 +109,70 @@ DEFINE_XEN_GUEST_HANDLE(trap_info_t);
typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
/*
+ * World vmcs state
+ */
+struct vmcs_data {
+ uint64_t eip; /* execution pointer */
+ uint64_t esp; /* stack pointer */
+ uint64_t eflags; /* flags register */
+ uint64_t cr0;
+ uint64_t cr3; /* page table directory */
+ uint64_t cr4;
+ uint32_t idtr_limit; /* idt */
+ uint64_t idtr_base;
+ uint32_t gdtr_limit; /* gdt */
+ uint64_t gdtr_base;
+ uint32_t cs_sel; /* cs selector */
+ uint32_t cs_limit;
+ uint64_t cs_base;
+ uint32_t cs_arbytes;
+ uint32_t ds_sel; /* ds selector */
+ uint32_t ds_limit;
+ uint64_t ds_base;
+ uint32_t ds_arbytes;
+ uint32_t es_sel; /* es selector */
+ uint32_t es_limit;
+ uint64_t es_base;
+ uint32_t es_arbytes;
+ uint32_t ss_sel; /* ss selector */
+ uint32_t ss_limit;
+ uint64_t ss_base;
+ uint32_t ss_arbytes;
+ uint32_t fs_sel; /* fs selector */
+ uint32_t fs_limit;
+ uint64_t fs_base;
+ uint32_t fs_arbytes;
+ uint32_t gs_sel; /* gs selector */
+ uint32_t gs_limit;
+ uint64_t gs_base;
+ uint32_t gs_arbytes;
+ uint32_t tr_sel; /* task selector */
+ uint32_t tr_limit;
+ uint64_t tr_base;
+ uint32_t tr_arbytes;
+ uint32_t ldtr_sel; /* ldtr selector */
+ uint32_t ldtr_limit;
+ uint64_t ldtr_base;
+ uint32_t ldtr_arbytes;
+ uint32_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+ /* msr for em64t */
+ uint64_t shadow_gs;
+ uint64_t flags;
+ /* same size as VMX_MSR_COUNT */
+ uint64_t msr_items[6];
+ uint64_t vmxassist_enabled;
+};
+typedef struct vmcs_data vmcs_data_t;
+
+struct hvmcpu_context {
+ uint32_t valid;
+ struct vmcs_data data;
+ uint64_t gtime;
+};
+
+/*
* The following is all CPU context. Note that the fpu_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
*/
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index d9b23d5b75..0a75ef7a20 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -386,6 +386,21 @@ struct xen_domctl_settimeoffset {
};
typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
+
+#define HVM_CTXT_SIZE 6144
+typedef struct hvm_domain_context {
+ uint32_t cur;
+ uint32_t size;
+ uint8_t data[HVM_CTXT_SIZE];
+} hvm_domain_context_t;
+DEFINE_XEN_GUEST_HANDLE(hvm_domain_context_t);
+
+#define XEN_DOMCTL_gethvmcontext 33
+#define XEN_DOMCTL_sethvmcontext 34
+typedef struct xen_domctl_hvmcontext {
+ XEN_GUEST_HANDLE(hvm_domain_context_t) ctxt; /* IN/OUT */
+} xen_domctl_hvmcontext_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
#define XEN_DOMCTL_real_mode_area 26
struct xen_domctl_real_mode_area {
@@ -423,6 +438,7 @@ struct xen_domctl {
struct xen_domctl_arch_setup arch_setup;
struct xen_domctl_settimeoffset settimeoffset;
struct xen_domctl_real_mode_area real_mode_area;
+ struct xen_domctl_hvmcontext hvmcontext;
uint8_t pad[128];
} u;
};
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 950f60b2f4..84f1a43fe2 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -8,6 +8,8 @@
? vcpu_time_info xen.h
! cpu_user_regs arch-x86/xen-@arch@.h
! trap_info arch-x86/xen.h
+! hvmcpu_context arch-x86/xen.h
+! vmcs_data arch-x86/xen.h
! vcpu_guest_context arch-x86/xen.h
? acm_getdecision acm_ops.h
! ctl_cpumap domctl.h